[BACK]Return to pmap_motorola.c CVS log [TXT][DIR] Up to [local] / sys / arch / m68k / m68k

Annotation of sys/arch/m68k/m68k/pmap_motorola.c, Revision 1.1.1.1

1.1       nbrk        1: /*     $OpenBSD: pmap_motorola.c,v 1.52 2007/04/13 18:57:49 art Exp $ */
                      2:
                      3: /*
                      4:  * Copyright (c) 1999 The NetBSD Foundation, Inc.
                      5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Jason R. Thorpe.
                      9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  * 3. All advertising materials mentioning features or use of this software
                     19:  *    must display the following acknowledgement:
                     20:  *     This product includes software developed by the NetBSD
                     21:  *     Foundation, Inc. and its contributors.
                     22:  * 4. Neither the name of The NetBSD Foundation nor the names of its
                     23:  *    contributors may be used to endorse or promote products derived
                     24:  *    from this software without specific prior written permission.
                     25:  *
                     26:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     27:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     28:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     29:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     30:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     31:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     32:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     33:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     34:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     35:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     36:  * POSSIBILITY OF SUCH DAMAGE.
                     37:  */
                     38:
                     39: /*
                     40:  * Copyright (c) 1995 Theo de Raadt
                     41:  *
                     42:  * Redistribution and use in source and binary forms, with or without
                     43:  * modification, are permitted provided that the following conditions
                     44:  * are met:
                     45:  * 1. Redistributions of source code must retain the above copyright
                     46:  *    notice, this list of conditions and the following disclaimer.
                     47:  * 2. Redistributions in binary form must reproduce the above copyright
                     48:  *    notice, this list of conditions and the following disclaimer in the
                     49:  *    documentation and/or other materials provided with the distribution.
                     50:  *
                     51:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
                     52:  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
                     53:  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
                     54:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
                     55:  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
                     56:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
                     57:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                     58:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
                     59:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
                     60:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
                     61:  * SUCH DAMAGE.
                     62:  */
                     63:
                     64: /*
                     65:  * Copyright (c) 1991, 1993
                     66:  *     The Regents of the University of California.  All rights reserved.
                     67:  *
                     68:  * This code is derived from software contributed to Berkeley by
                     69:  * the Systems Programming Group of the University of Utah Computer
                     70:  * Science Department.
                     71:  *
                     72:  * Redistribution and use in source and binary forms, with or without
                     73:  * modification, are permitted provided that the following conditions
                     74:  * are met:
                     75:  * 1. Redistributions of source code must retain the above copyright
                     76:  *    notice, this list of conditions and the following disclaimer.
                     77:  * 2. Redistributions in binary form must reproduce the above copyright
                     78:  *    notice, this list of conditions and the following disclaimer in the
                     79:  *    documentation and/or other materials provided with the distribution.
                     80:  * 3. Neither the name of the University nor the names of its contributors
                     81:  *    may be used to endorse or promote products derived from this software
                     82:  *    without specific prior written permission.
                     83:  *
                     84:  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
                     85:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                     86:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
                     87:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
                     88:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
                     89:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
                     90:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                     91:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
                     92:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
                     93:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
                     94:  * SUCH DAMAGE.
                     95:  *
                     96:  *     @(#)pmap.c      8.6 (Berkeley) 5/27/94
                     97:  */
                     98:
                     99: /*
                    100:  * m68k series physical map management code.
                    101:  *
                    102:  * Supports:
                    103:  *     68020 with HP MMU
                    104:  *     68020 with 68851 MMU
                    105:  *     68030 with on-chip MMU
                    106:  *     68040 with on-chip MMU
                    107:  *     68060 with on-chip MMU
                    108:  *
                    109:  * Notes:
                    110:  *     Don't even pay lip service to multiprocessor support.
                    111:  *
                    112:  *     We assume TLB entries don't have process tags (except for the
                    113:  *     supervisor/user distinction) so we only invalidate TLB entries
                    114:  *     when changing mappings for the current (or kernel) pmap.  This is
                    115:  *     technically not true for the 68851 but we flush the TLB on every
                    116:  *     context switch, so it effectively winds up that way.
                    117:  *
                    118:  *     Bitwise and/or operations are significantly faster than bitfield
                    119:  *     references so we use them when accessing STE/PTEs in the pmap_pte_*
                    120:  *     macros.  Note also that the two are not always equivalent; e.g.:
                    121:  *             (*pte & PG_PROT) [4] != pte->pg_prot [1]
                    122:  *     and a couple of routines that deal with protection and wiring take
                    123:  *     some shortcuts that assume the and/or definitions.
                    124:  *
                    125:  *     This implementation will only work for PAGE_SIZE == NBPG
                    126:  *     (i.e. 4096 bytes).
                    127:  */
                    128:
                    129: /*
                    130:  *     Manages physical address maps.
                    131:  *
                    132:  *     In addition to hardware address maps, this
                    133:  *     module is called upon to provide software-use-only
                    134:  *     maps which may or may not be stored in the same
                    135:  *     form as hardware maps.  These pseudo-maps are
                    136:  *     used to store intermediate results from copy
                    137:  *     operations to and from address spaces.
                    138:  *
                    139:  *     Since the information managed by this module is
                    140:  *     also stored by the logical address mapping module,
                    141:  *     this module may throw away valid virtual-to-physical
                    142:  *     mappings at almost any time.  However, invalidations
                    143:  *     of virtual-to-physical mappings must be done as
                    144:  *     requested.
                    145:  *
                    146:  *     In order to cope with hardware architectures which
                    147:  *     make virtual-to-physical map invalidates expensive,
                    148:  *     this module may delay invalidate or reduced protection
                    149:  *     operations until such time as they are actually
                    150:  *     necessary.  This module is given full information as
                    151:  *     to which processors are currently using which maps,
                    152:  *     and to when physical maps must be made correct.
                    153:  */
                    154:
                    155: #include <sys/param.h>
                    156: #include <sys/systm.h>
                    157: #include <sys/proc.h>
                    158: #include <sys/malloc.h>
                    159: #include <sys/user.h>
                    160: #include <sys/pool.h>
                    161:
                    162: #include <machine/pte.h>
                    163:
                    164: /* #define UVM_PAGE_INLINE */
                    165: #include <uvm/uvm.h>
                    166:
                    167: #include <machine/cpu.h>
                    168:
                    169: #ifdef PMAP_DEBUG
                    170: #define PDB_FOLLOW     0x0001
                    171: #define PDB_INIT       0x0002
                    172: #define PDB_ENTER      0x0004
                    173: #define PDB_REMOVE     0x0008
                    174: #define PDB_CREATE     0x0010
                    175: #define PDB_PTPAGE     0x0020
                    176: #define PDB_CACHE      0x0040
                    177: #define PDB_BITS       0x0080
                    178: #define PDB_COLLECT    0x0100
                    179: #define PDB_PROTECT    0x0200
                    180: #define PDB_SEGTAB     0x0400
                    181: #define PDB_MULTIMAP   0x0800
                    182: #define PDB_PARANOIA   0x2000
                    183: #define PDB_WIRING     0x4000
                    184: #define PDB_PVDUMP     0x8000
                    185: #define PDB_ALL                0xFFFF
                    186:
                    187: int pmapdebug = PDB_PARANOIA;
                    188:
                    189: #define        PMAP_DPRINTF(l, x)      if (pmapdebug & (l)) printf x
                    190:
                    191: #if defined(M68040) || defined(M68060)
                    192: int dowriteback = 1;   /* 68040: enable writeback caching */
                    193: int dokwriteback = 1;  /* 68040: enable writeback caching of kernel AS */
                    194: #endif
                    195: #else
                    196: #define        PMAP_DPRINTF(l, x)      /* nothing */
                    197: #endif /* PMAP_DEBUG */
                    198:
                    199: /*
                    200:  * Get STEs and PTEs for user/kernel address space
                    201:  */
                    202: #if defined(M68040) || defined(M68060)
                    203: #define        pmap_ste1(m, v) \
                    204:        (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
                    205: /* XXX assumes physically contiguous ST pages (if more than one) */
                    206: #define pmap_ste2(m, v) \
                    207:        (&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \
                    208:                        - (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
                    209: #define        pmap_ste(m, v)  \
                    210:        (&((m)->pm_stab[(vaddr_t)(v) \
                    211:                        >> (mmutype <= MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
                    212: #define pmap_ste_v(m, v) \
                    213:        (mmutype <= MMU_68040 \
                    214:         ? ((*pmap_ste1(m, v) & SG_V) && \
                    215:            (*pmap_ste2(m, v) & SG_V)) \
                    216:         : (*pmap_ste(m, v) & SG_V))
                    217: #else
                    218: #define        pmap_ste(m, v)   (&((m)->pm_stab[(vaddr_t)(v) >> SG_ISHIFT]))
                    219: #define pmap_ste_v(m, v) (*pmap_ste(m, v) & SG_V)
                    220: #endif
                    221:
                    222: #define pmap_pte(m, v) (&((m)->pm_ptab[(vaddr_t)(v) >> PG_SHIFT]))
                    223: #define pmap_pte_pa(pte)       (*(pte) & PG_FRAME)
                    224: #define pmap_pte_w(pte)                (*(pte) & PG_W)
                    225: #define pmap_pte_ci(pte)       (*(pte) & PG_CI)
                    226: #define pmap_pte_m(pte)                (*(pte) & PG_M)
                    227: #define pmap_pte_u(pte)                (*(pte) & PG_U)
                    228: #define pmap_pte_prot(pte)     (*(pte) & PG_PROT)
                    229: #define pmap_pte_v(pte)                (*(pte) & PG_V)
                    230:
                    231: #define pmap_pte_set_w(pte, v) \
                    232:        if (v) *(pte) |= PG_W; else *(pte) &= ~PG_W
                    233: #define pmap_pte_set_prot(pte, v) \
                    234:        if (v) *(pte) |= PG_PROT; else *(pte) &= ~PG_PROT
                    235: #define pmap_pte_w_chg(pte, nw)                ((nw) ^ pmap_pte_w(pte))
                    236: #define pmap_pte_prot_chg(pte, np)     ((np) ^ pmap_pte_prot(pte))
                    237:
                    238: /*
                    239:  * Given a map and a machine independent protection code,
                    240:  * convert to an m68k protection code.
                    241:  */
                    242: #define pte_prot(p)    ((p) & VM_PROT_WRITE ? PG_RW : PG_RO)
                    243:
                    244: /*
                    245:  * Kernel page table page management.
                    246:  */
                    247: struct kpt_page {
                    248:        struct kpt_page *kpt_next;      /* link on either used or free list */
                    249:        vaddr_t         kpt_va;         /* always valid kernel VA */
                    250:        paddr_t         kpt_pa;         /* PA of this page (for speed) */
                    251: };
                    252: struct kpt_page *kpt_free_list, *kpt_used_list;
                    253: struct kpt_page *kpt_pages;
                    254:
                    255: /*
                    256:  * Kernel segment/page table and page table map.
                    257:  * The page table map gives us a level of indirection we need to dynamically
                    258:  * expand the page table.  It is essentially a copy of the segment table
                    259:  * with PTEs instead of STEs.  All are initialized in locore at boot time.
                    260:  * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs.
                    261:  * Segtabzero is an empty segment table which all processes share til they
                    262:  * reference something.
                    263:  */
                    264: st_entry_t     *Sysseg;
                    265: pt_entry_t     *Sysmap, *Sysptmap;
                    266: st_entry_t     *Segtabzero, *Segtabzeropa;
                    267: vsize_t                Sysptsize = VM_KERNEL_PT_PAGES;
                    268:
                    269: extern caddr_t CADDR1, CADDR2;
                    270:
                    271: pt_entry_t     *caddr1_pte;    /* PTE for CADDR1 */
                    272: pt_entry_t     *caddr2_pte;    /* PTE for CADDR2 */
                    273:
                    274: struct pmap    kernel_pmap_store;
                    275: struct vm_map  *st_map, *pt_map;
                    276: struct vm_map  st_map_store, pt_map_store;
                    277:
                    278: paddr_t        avail_start;    /* PA of first available physical page */
                    279: paddr_t                avail_end;      /* PA of last available physical page */
                    280: vsize_t                mem_size;       /* memory size in bytes */
                    281: vaddr_t                virtual_avail;  /* VA of first avail page (after kernel bss)*/
                    282: vaddr_t                virtual_end;    /* VA of last avail page (end of kernel AS) */
                    283:
                    284: TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
                    285: int            pv_nfree;
                    286:
                    287: #if defined(M68K_MMU_HP)
                    288: extern int     pmap_aliasmask; /* separation at which VA aliasing is ok */
                    289: #endif
                    290: #if defined(M68040) || defined(M68060)
                    291: int            protostfree;    /* prototype (default) free ST map */
                    292: #endif
                    293:
                    294: struct pool    pmap_pmap_pool; /* memory pool for pmap structures */
                    295:
                    296: /*
                    297:  * Internal routines
                    298:  */
                    299: struct pv_entry        *pmap_alloc_pv(void);
                    300: void            pmap_free_pv(struct pv_entry *);
                    301: #ifdef COMPAT_HPUX
                    302: int             pmap_mapmulti(pmap_t, vaddr_t);
                    303: #endif
                    304: void            pmap_remove_flags(pmap_t, vaddr_t, vaddr_t, int);
                    305: void            pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, int);
                    306: boolean_t       pmap_testbit(struct vm_page *, int);
                    307: void            pmap_changebit(struct vm_page *, int, int);
                    308: int             pmap_enter_ptpage(pmap_t, vaddr_t);
                    309: void            pmap_ptpage_addref(vaddr_t);
                    310: int             pmap_ptpage_delref(vaddr_t);
                    311: void            pmap_collect1(paddr_t, paddr_t);
                    312:
                    313:
                    314: #ifdef PMAP_DEBUG
                    315: void pmap_pvdump(paddr_t);
                    316: void pmap_check_wiring(char *, vaddr_t);
                    317: #endif
                    318:
                    319: /* pmap_remove_mapping flags */
                    320: #define        PRM_TFLUSH      0x01
                    321: #define        PRM_CFLUSH      0x02
                    322: #define        PRM_KEEPPTPAGE  0x04
                    323: #define        PRM_SKIPWIRED   0x08
                    324:
                    325: static struct pv_entry *pa_to_pvh(paddr_t);
                    326: static struct pv_entry *pg_to_pvh(struct vm_page *);
                    327:
                    328: static __inline struct pv_entry *
                    329: pa_to_pvh(paddr_t pa)
                    330: {
                    331:        struct vm_page *pg;
                    332:
                    333:        pg = PHYS_TO_VM_PAGE(pa);
                    334:        return &pg->mdpage.pvent;
                    335: }
                    336:
                    337: static __inline struct pv_entry *
                    338: pg_to_pvh(struct vm_page *pg)
                    339: {
                    340:        return &pg->mdpage.pvent;
                    341: }
                    342:
                    343: #ifdef PMAP_STEAL_MEMORY
                    344: vaddr_t
                    345: pmap_steal_memory(size, vstartp, vendp)
                    346:        vsize_t size;
                    347:        vaddr_t *vstartp, *vendp;
                    348: {
                    349:        vaddr_t va;
                    350:        u_int npg;
                    351:
                    352:        size = round_page(size);
                    353:        npg = atop(size);
                    354:
                    355:        /* m68k systems which define PMAP_STEAL_MEMORY only have one segment. */
                    356: #ifdef DIAGNOSTIC
                    357:        if (vm_physmem[0].avail_end - vm_physmem[0].avail_start < npg)
                    358:                panic("pmap_steal_memory(%x): out of memory", size);
                    359: #endif
                    360:
                    361:        va = ptoa(vm_physmem[0].avail_start);
                    362:        vm_physmem[0].avail_start += npg;
                    363:        vm_physmem[0].start += npg;
                    364:
                    365:        if (vstartp != NULL)
                    366:                *vstartp = virtual_avail;
                    367:        if (vendp != NULL)
                    368:                *vendp = virtual_end;
                    369:
                    370:        bzero((void *)va, size);
                    371:        return (va);
                    372: }
                    373: #else
                    374: /*
                    375:  * pmap_virtual_space:         [ INTERFACE ]
                    376:  *
                    377:  *     Report the range of available kernel virtual address
                    378:  *     space to the VM system during bootstrap.
                    379:  *
                    380:  *     This is only an interface function if we do not use
                    381:  *     pmap_steal_memory()!
                    382:  *
                    383:  *     Note: no locking is necessary in this function.
                    384:  */
                    385: void
                    386: pmap_virtual_space(vstartp, vendp)
                    387:        vaddr_t *vstartp, *vendp;
                    388: {
                    389:
                    390:        *vstartp = virtual_avail;
                    391:        *vendp = virtual_end;
                    392: }
                    393: #endif
                    394:
                    395: /*
                    396:  * pmap_init:                  [ INTERFACE ]
                    397:  *
                    398:  *     Initialize the pmap module.  Called by vm_init(), to initialize any
                    399:  *     structures that the pmap system needs to map virtual memory.
                    400:  *
                    401:  *     Note: no locking is necessary in this function.
                    402:  */
                    403: void
                    404: pmap_init()
                    405: {
                    406:        vaddr_t         addr, addr2;
                    407:        vsize_t         s;
                    408:        int             rv;
                    409:        int             npages;
                    410:
                    411:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_init()\n"));
                    412:
                    413:        /*
                    414:         * Before we do anything else, initialize the PTE pointers
                    415:         * used by pmap_zero_page() and pmap_copy_page().
                    416:         */
                    417:        caddr1_pte = pmap_pte(pmap_kernel(), CADDR1);
                    418:        caddr2_pte = pmap_pte(pmap_kernel(), CADDR2);
                    419:
                    420:        /*
                    421:         * Now that kernel map has been allocated, we can mark as
                    422:         * unavailable regions which we have mapped in pmap_bootstrap().
                    423:         */
                    424:        PMAP_INIT_MD();
                    425:        addr = (vaddr_t) Sysmap;
                    426:        if (uvm_map(kernel_map, &addr, MACHINE_MAX_PTSIZE,
                    427:                    NULL, UVM_UNKNOWN_OFFSET, 0,
                    428:                    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
                    429:                                UVM_INH_NONE, UVM_ADV_RANDOM,
                    430:                                UVM_FLAG_FIXED))) {
                    431:                /*
                    432:                 * If this fails, it is probably because the static
                    433:                 * portion of the kernel page table isn't big enough
                    434:                 * and we overran the page table map.
                    435:                 */
                    436:                panic("pmap_init: bogons in the VM system!");
                    437:        }
                    438:
                    439:        PMAP_DPRINTF(PDB_INIT,
                    440:            ("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
                    441:            Sysseg, Sysmap, Sysptmap));
                    442:        PMAP_DPRINTF(PDB_INIT,
                    443:            ("  pstart %lx, pend %lx, vstart %lx, vend %lx\n",
                    444:            avail_start, avail_end, virtual_avail, virtual_end));
                    445:
                    446:        /*
                    447:         * Allocate memory the initial segment table.
                    448:         */
                    449:        addr = uvm_km_zalloc(kernel_map, round_page(MACHINE_STSIZE));
                    450:        if (addr == 0)
                    451:                panic("pmap_init: can't allocate data structures");
                    452:
                    453:        Segtabzero = (st_entry_t *) addr;
                    454:        pmap_extract(pmap_kernel(), addr, (paddr_t *)&Segtabzeropa);
                    455: #ifdef M68060
                    456:        if (mmutype == MMU_68060) {
                    457:                for (addr2 = addr; addr2 < addr + MACHINE_STSIZE;
                    458:                    addr2 += PAGE_SIZE) {
                    459:                        pt_entry_t *pte;
                    460:
                    461:                        pte = pmap_pte(pmap_kernel(), addr2);
                    462:                        *pte = (*pte | PG_CI) & ~PG_CCB;
                    463:                        TBIS(addr2);
                    464:                }
                    465:                DCIS();
                    466:        }
                    467: #endif
                    468:        addr += MACHINE_STSIZE;
                    469:
                    470:        PMAP_DPRINTF(PDB_INIT, ("pmap_init: s0 %p(%p)\n",
                    471:            Segtabzero, Segtabzeropa));
                    472:
                    473:        /*
                    474:         * Allocate physical memory for kernel PT pages and their management.
                    475:         * We need 1 PT page per possible task plus some slop.
                    476:         */
                    477:        npages = min(atop(MACHINE_MAX_KPTSIZE), maxproc+16);
                    478:        s = ptoa(npages) + round_page(npages * sizeof(struct kpt_page));
                    479:
                    480:        /*
                    481:         * Verify that space will be allocated in region for which
                    482:         * we already have kernel PT pages.
                    483:         */
                    484:        addr = 0;
                    485:        rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0,
                    486:                     UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
                    487:                                 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
                    488:        if (rv || (addr + s) >= (vaddr_t)Sysmap)
                    489:                panic("pmap_init: kernel PT too small");
                    490:        uvm_unmap(kernel_map, addr, addr + s);
                    491:
                    492:        /*
                    493:         * Now allocate the space and link the pages together to
                    494:         * form the KPT free list.
                    495:         */
                    496:        addr = uvm_km_zalloc(kernel_map, s);
                    497:        if (addr == 0)
                    498:                panic("pmap_init: cannot allocate KPT free list");
                    499:        s = ptoa(npages);
                    500:        addr2 = addr + s;
                    501:        kpt_pages = &((struct kpt_page *)addr2)[npages];
                    502:        kpt_free_list = NULL;
                    503:        do {
                    504:                addr2 -= PAGE_SIZE;
                    505:                (--kpt_pages)->kpt_next = kpt_free_list;
                    506:                kpt_free_list = kpt_pages;
                    507:                kpt_pages->kpt_va = addr2;
                    508:                pmap_extract(pmap_kernel(), addr2, &kpt_pages->kpt_pa);
                    509: #ifdef M68060
                    510:                if (mmutype == MMU_68060) {
                    511:                        pt_entry_t *pte;
                    512:
                    513:                        pte = pmap_pte(pmap_kernel(), addr2);
                    514:                        *pte = (*pte | PG_CI) & ~PG_CCB;
                    515:                        TBIS(addr2);
                    516:                }
                    517: #endif
                    518:        } while (addr != addr2);
                    519: #ifdef M68060
                    520:        if (mmutype == MMU_68060)
                    521:                DCIS();
                    522: #endif
                    523:
                    524:        PMAP_DPRINTF(PDB_INIT, ("pmap_init: KPT: %ld pages from %lx to %lx\n",
                    525:            atop(s), addr, addr + s));
                    526:
                    527:        /*
                    528:         * Allocate the segment table map and the page table map.
                    529:         */
                    530:        s = maxproc * MACHINE_STSIZE;
                    531:        st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0, FALSE,
                    532:            &st_map_store);
                    533:
                    534:        addr = MACHINE_PTBASE;
                    535:        if ((MACHINE_PTMAXSIZE / MACHINE_MAX_PTSIZE) < maxproc) {
                    536:                s = MACHINE_PTMAXSIZE;
                    537:                /*
                    538:                 * XXX We don't want to hang when we run out of
                    539:                 * page tables, so we lower maxproc so that fork()
                    540:                 * will fail instead.  Note that root could still raise
                    541:                 * this value via sysctl(3).
                    542:                 */
                    543:                maxproc = (MACHINE_PTMAXSIZE / MACHINE_MAX_PTSIZE);
                    544:        } else
                    545:                s = (maxproc * MACHINE_MAX_PTSIZE);
                    546:        pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0,
                    547:            TRUE, &pt_map_store);
                    548:
                    549: #if defined(M68040) || defined(M68060)
                    550:        if (mmutype <= MMU_68040) {
                    551:                protostfree = ~l2tobm(0);
                    552:                for (rv = MAXUL2SIZE; rv < sizeof(protostfree)*NBBY; rv++)
                    553:                        protostfree &= ~l2tobm(rv);
                    554:        }
                    555: #endif
                    556:
                    557:        /*
                    558:         * Initialize the pmap pools.
                    559:         */
                    560:        pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
                    561:            &pool_allocator_nointr);
                    562: }
                    563:
                    564: /*
                    565:  * pmap_alloc_pv:
                    566:  *
                    567:  *     Allocate a pv_entry.
                    568:  */
                    569: struct pv_entry *
                    570: pmap_alloc_pv()
                    571: {
                    572:        struct pv_page *pvp;
                    573:        struct pv_entry *pv;
                    574:        int i;
                    575:
                    576:        if (pv_nfree == 0) {
                    577:                pvp = (struct pv_page *)uvm_km_kmemalloc(kernel_map,
                    578:                    uvm.kernel_object, PAGE_SIZE, UVM_KMF_NOWAIT);
                    579:                if (pvp == NULL)
                    580:                        return NULL;
                    581:                pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
                    582:                for (i = NPVPPG - 2; i; i--, pv++)
                    583:                        pv->pv_next = pv + 1;
                    584:                pv->pv_next = 0;
                    585:                pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1;
                    586:                TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
                    587:                pv = &pvp->pvp_pv[0];
                    588:        } else {
                    589:                --pv_nfree;
                    590:                pvp = TAILQ_FIRST(&pv_page_freelist);
                    591:                if (--pvp->pvp_pgi.pgi_nfree == 0) {
                    592:                        TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
                    593:                }
                    594:                pv = pvp->pvp_pgi.pgi_freelist;
                    595: #ifdef DIAGNOSTIC
                    596:                if (pv == 0)
                    597:                        panic("pmap_alloc_pv: pgi_nfree inconsistent");
                    598: #endif
                    599:                pvp->pvp_pgi.pgi_freelist = pv->pv_next;
                    600:        }
                    601:        return pv;
                    602: }
                    603:
                    604: /*
                    605:  * pmap_free_pv:
                    606:  *
                    607:  *     Free a pv_entry.
                    608:  */
                    609: void
                    610: pmap_free_pv(pv)
                    611:        struct pv_entry *pv;
                    612: {
                    613:        struct pv_page *pvp;
                    614:
                    615:        pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
                    616:        switch (++pvp->pvp_pgi.pgi_nfree) {
                    617:        case 1:
                    618:                TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
                    619:        default:
                    620:                pv->pv_next = pvp->pvp_pgi.pgi_freelist;
                    621:                pvp->pvp_pgi.pgi_freelist = pv;
                    622:                ++pv_nfree;
                    623:                break;
                    624:        case NPVPPG:
                    625:                pv_nfree -= NPVPPG - 1;
                    626:                TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
                    627:                uvm_km_free(kernel_map, (vaddr_t)pvp, PAGE_SIZE);
                    628:                break;
                    629:        }
                    630: }
                    631:
                    632: /*
                    633:  * pmap_create:                        [ INTERFACE ]
                    634:  *
                    635:  *     Create and return a physical map.
                    636:  *
                    637:  *     Note: no locking is necessary in this function.
                    638:  */
                    639: pmap_t
                    640: pmap_create()
                    641: {
                    642:        pmap_t pmap;
                    643:
                    644:        PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
                    645:            ("pmap_create\n"));
                    646:
                    647:        pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
                    648:        bzero(pmap, sizeof(*pmap));
                    649:
                    650:        /*
                    651:         * No need to allocate page table space yet but we do need a
                    652:         * valid segment table.  Initially, we point everyone at the
                    653:         * "null" segment table.  On the first pmap_enter, a real
                    654:         * segment table will be allocated.
                    655:         */
                    656:        pmap->pm_stab = Segtabzero;
                    657:        pmap->pm_stpa = Segtabzeropa;
                    658: #if defined(M68040) || defined(M68060)
                    659:        if (mmutype <= MMU_68040)
                    660:                pmap->pm_stfree = protostfree;
                    661: #endif
                    662:        pmap->pm_count = 1;
                    663:        simple_lock_init(&pmap->pm_lock);
                    664:
                    665:        return pmap;
                    666: }
                    667:
                    668: /*
                    669:  * pmap_destroy:               [ INTERFACE ]
                    670:  *
                    671:  *     Drop the reference count on the specified pmap, releasing
                    672:  *     all resources if the reference count drops to zero.
                    673:  */
                    674: void
                    675: pmap_destroy(pmap)
                    676:        pmap_t pmap;
                    677: {
                    678:        int count;
                    679:
                    680:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_destroy(%p)\n", pmap));
                    681:        simple_lock(&pmap->pm_lock);
                    682:        count = --pmap->pm_count;
                    683:        simple_unlock(&pmap->pm_lock);
                    684:        if (count == 0) {
                    685:                if (pmap->pm_ptab) {
                    686:                        pmap_remove(pmap_kernel(), (vaddr_t)pmap->pm_ptab,
                    687:                            (vaddr_t)pmap->pm_ptab + MACHINE_MAX_PTSIZE);
                    688:                        pmap_update(pmap_kernel());
                    689:                        uvm_km_pgremove(uvm.kernel_object,
                    690:                            (vaddr_t)pmap->pm_ptab,
                    691:                            (vaddr_t)pmap->pm_ptab + MACHINE_MAX_PTSIZE);
                    692:                        uvm_km_free_wakeup(pt_map, (vaddr_t)pmap->pm_ptab,
                    693:                                           MACHINE_MAX_PTSIZE);
                    694:                }
                    695:                KASSERT(pmap->pm_stab == Segtabzero);
                    696:                pool_put(&pmap_pmap_pool, pmap);
                    697:        }
                    698: }
                    699:
                    700: /*
                    701:  * pmap_reference:             [ INTERFACE ]
                    702:  *
                    703:  *     Add a reference to the specified pmap.
                    704:  */
                    705: void
                    706: pmap_reference(pmap)
                    707:        pmap_t  pmap;
                    708: {
                    709:
                    710:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_reference(%p)\n", pmap));
                    711:        simple_lock(&pmap->pm_lock);
                    712:        pmap->pm_count++;
                    713:        simple_unlock(&pmap->pm_lock);
                    714: }
                    715:
                    716: /*
                    717:  * pmap_activate:              [ INTERFACE ]
                    718:  *
                    719:  *     Activate the pmap used by the specified process.  This includes
                    720:  *     reloading the MMU context of the current process, and marking
                    721:  *     the pmap in use by the processor.
                    722:  *
                    723:  *     Note: we may only use spin locks here, since we are called
                    724:  *     by a critical section in cpu_switch()!
                    725:  */
                    726: void
                    727: pmap_activate(p)
                    728:        struct proc *p;
                    729: {
                    730:        pmap_t pmap = p->p_vmspace->vm_map.pmap;
                    731:
                    732:        PMAP_DPRINTF(PDB_FOLLOW|PDB_SEGTAB,
                    733:            ("pmap_activate(%p)\n", p));
                    734:
                    735:        PMAP_ACTIVATE(pmap, p == curproc);
                    736: }
                    737:
                    738: /*
                    739:  * pmap_deactivate:            [ INTERFACE ]
                    740:  *
                    741:  *     Mark that the pmap used by the specified process is no longer
                    742:  *     in use by the processor.
                    743:  *
                    744:  *     The comment above pmap_activate() wrt. locking applies here,
                    745:  *     as well.
                    746:  */
                    747: void
                    748: pmap_deactivate(p)
                    749:        struct proc *p;
                    750: {
                    751:
                    752:        /* No action necessary in this pmap implementation. */
                    753: }
                    754:
                    755: /*
                    756:  * pmap_remove:                        [ INTERFACE ]
                    757:  *
                    758:  *     Remove the given range of addresses from the specified map.
                    759:  *
                    760:  *     It is assumed that the start and end are properly
                    761:  *     rounded to the page size.
                    762:  */
                    763: void
                    764: pmap_remove(pmap, sva, eva)
                    765:        pmap_t pmap;
                    766:        vaddr_t sva, eva;
                    767: {
                    768:        int flags;
                    769:
                    770:        PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
                    771:            ("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva));
                    772:
                    773:        flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
                    774:        pmap_remove_flags(pmap, sva, eva, flags);
                    775: }
                    776:
                    777: void
                    778: pmap_remove_flags(pmap, sva, eva, flags)
                    779:        pmap_t pmap;
                    780:        vaddr_t sva, eva;
                    781:        int flags;
                    782: {
                    783:        vaddr_t nssva;
                    784:        pt_entry_t *pte;
                    785: #ifdef M68K_MMU_HP
                    786:        boolean_t firstpage, needcflush;
                    787: #endif
                    788:
                    789: #ifdef M68K_MMU_HP
                    790:        firstpage = TRUE;
                    791:        needcflush = FALSE;
                    792: #endif
                    793:        while (sva < eva) {
                    794:                nssva = m68k_trunc_seg(sva) + NBSEG;
                    795:                if (nssva == 0 || nssva > eva)
                    796:                        nssva = eva;
                    797:
                    798:                /*
                    799:                 * Invalidate every valid mapping within this segment.
                    800:                 */
                    801:
                    802:                pte = pmap_pte(pmap, sva);
                    803:                while (sva < nssva) {
                    804:
                    805:                        /*
                    806:                         * If this segment is unallocated,
                    807:                         * skip to the next segment boundary.
                    808:                         */
                    809:
                    810:                        if (!pmap_ste_v(pmap, sva)) {
                    811:                                sva = nssva;
                    812:                                break;
                    813:                        }
                    814:                        if (pmap_pte_v(pte)) {
                    815:                                if ((flags & PRM_SKIPWIRED) &&
                    816:                                    pmap_pte_w(pte))
                    817:                                        goto skip;
                    818: #ifdef M68K_MMU_HP
                    819:                                if (pmap_aliasmask) {
                    820:                                        /*
                    821:                                         * Purge kernel side of VAC to ensure
                    822:                                         * we get the correct state of any
                    823:                                         * hardware maintained bits.
                    824:                                         */
                    825:                                        if (firstpage) {
                    826:                                                DCIS();
                    827:                                        }
                    828:                                        /*
                    829:                                         * Remember if we may need to
                    830:                                         * flush the VAC due to a non-CI
                    831:                                         * mapping.
                    832:                                         */
                    833:                                        if (!needcflush && !pmap_pte_ci(pte))
                    834:                                                needcflush = TRUE;
                    835:
                    836:                                        firstpage = FALSE;
                    837:                                }
                    838: #endif
                    839:                                pmap_remove_mapping(pmap, sva, pte, flags);
                    840: skip:
                    841:                        }
                    842:                        pte++;
                    843:                        sva += PAGE_SIZE;
                    844:                }
                    845:        }
                    846: #ifdef M68K_MMU_HP
                    847:        if (pmap_aliasmask) {
                    848:                /*
                    849:                 * Didn't do anything, no need for cache flushes
                    850:                 */
                    851:                if (firstpage)
                    852:                        return;
                    853:                /*
                    854:                 * In a couple of cases, we don't need to worry about flushing
                    855:                 * the VAC:
                    856:                 *      1. if this is a kernel mapping,
                    857:                 *         we have already done it
                    858:                 *      2. if it is a user mapping not for the current process,
                    859:                 *         it won't be there
                    860:                 */
                    861:                if (!active_user_pmap(pmap))
                    862:                        needcflush = FALSE;
                    863:                if (needcflush) {
                    864:                        if (pmap == pmap_kernel()) {
                    865:                                DCIS();
                    866:                        } else {
                    867:                                DCIU();
                    868:                        }
                    869:                }
                    870:        }
                    871: #endif
                    872: }
                    873:
                    874: /*
                    875:  * pmap_page_protect:          [ INTERFACE ]
                    876:  *
                    877:  *     Lower the permission for all mappings to a given page to
                    878:  *     the permissions specified.
                    879:  */
                    880: void
                    881: pmap_page_protect(pg, prot)
                    882:        struct vm_page *pg;
                    883:        vm_prot_t       prot;
                    884: {
                    885:        struct pv_entry *pv;
                    886:        int s;
                    887:
                    888: #ifdef PMAP_DEBUG
                    889:        if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
                    890:            (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
                    891:                printf("pmap_page_protect(%lx, %x)\n", pg, prot);
                    892: #endif
                    893:
                    894:        if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
                    895:                pv = pg_to_pvh(pg);
                    896:                s = splvm();
                    897:                while (pv->pv_pmap != NULL) {
                    898:                        pt_entry_t *pte;
                    899:
                    900:                        pte = pmap_pte(pv->pv_pmap, pv->pv_va);
                    901: #ifdef PMAP_DEBUG
                    902:                        if (!pmap_ste_v(pv->pv_pmap, pv->pv_va) ||
                    903:                            pmap_pte_pa(pte) != VM_PAGE_TO_PHYS(pg))
                    904:                                panic("pmap_page_protect: bad mapping");
                    905: #endif
                    906:                        pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
                    907:                            pte, PRM_TFLUSH|PRM_CFLUSH);
                    908:                }
                    909:                splx(s);
                    910:        } else if ((prot & VM_PROT_WRITE) == VM_PROT_NONE)
                    911:                pmap_changebit(pg, PG_RO, ~0);
                    912: }
                    913:
                    914: /*
                    915:  * pmap_protect:               [ INTERFACE ]
                    916:  *
                    917:  *     Set the physical protection on the specified range of this map
                    918:  *     as requested.
                    919:  */
                    920: void
                    921: pmap_protect(pmap, sva, eva, prot)
                    922:        pmap_t          pmap;
                    923:        vaddr_t         sva, eva;
                    924:        vm_prot_t       prot;
                    925: {
                    926:        vaddr_t nssva;
                    927:        pt_entry_t *pte;
                    928:        boolean_t needtflush;
                    929:        int isro;
                    930: #ifdef M68K_MMU_HP
                    931:        boolean_t firstpage;
                    932: #endif
                    933:
                    934:        PMAP_DPRINTF(PDB_FOLLOW|PDB_PROTECT,
                    935:            ("pmap_protect(%p, %lx, %lx, %x)\n",
                    936:            pmap, sva, eva, prot));
                    937:
                    938:        if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
                    939:                pmap_remove(pmap, sva, eva);
                    940:                return;
                    941:        }
                    942:
                    943:        isro = pte_prot(prot);
                    944:        needtflush = active_pmap(pmap);
                    945: #ifdef M68K_MMU_HP
                    946:        firstpage = TRUE;
                    947: #endif
                    948:        while (sva < eva) {
                    949:                nssva = m68k_trunc_seg(sva) + NBSEG;
                    950:                if (nssva == 0 || nssva > eva)
                    951:                        nssva = eva;
                    952:                /*
                    953:                 * If VA belongs to an unallocated segment,
                    954:                 * skip to the next segment boundary.
                    955:                 */
                    956:                if (!pmap_ste_v(pmap, sva)) {
                    957:                        sva = nssva;
                    958:                        continue;
                    959:                }
                    960:                /*
                    961:                 * Change protection on mapping if it is valid and doesn't
                    962:                 * already have the correct protection.
                    963:                 */
                    964:                pte = pmap_pte(pmap, sva);
                    965:                while (sva < nssva) {
                    966:                        if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) {
                    967: #ifdef M68K_MMU_HP
                    968:                                /*
                    969:                                 * Purge kernel side of VAC to ensure we
                    970:                                 * get the correct state of any hardware
                    971:                                 * maintained bits.
                    972:                                 *
                    973:                                 * XXX do we need to clear the VAC in
                    974:                                 * general to reflect the new protection?
                    975:                                 */
                    976:                                if (firstpage && pmap_aliasmask)
                    977:                                        DCIS();
                    978: #endif
                    979: #if defined(M68040) || defined(M68060)
                    980:                                /*
                    981:                                 * Clear caches if making RO (see section
                    982:                                 * "7.3 Cache Coherency" in the manual).
                    983:                                 */
                    984:                                if (isro && mmutype <= MMU_68040) {
                    985:                                        paddr_t pa = pmap_pte_pa(pte);
                    986:
                    987:                                        DCFP(pa);
                    988:                                        ICPP(pa);
                    989:                                }
                    990: #endif
                    991:                                pmap_pte_set_prot(pte, isro);
                    992:                                if (needtflush)
                    993:                                        TBIS(sva);
                    994: #ifdef M68K_MMU_HP
                    995:                                firstpage = FALSE;
                    996: #endif
                    997:                        }
                    998:                        pte++;
                    999:                        sva += PAGE_SIZE;
                   1000:                }
                   1001:        }
                   1002: }
                   1003:
                   1004: /*
                   1005:  * pmap_enter:                 [ INTERFACE ]
                   1006:  *
                   1007:  *     Insert the given physical page (pa) at
                   1008:  *     the specified virtual address (va) in the
                   1009:  *     target physical map with the protection requested.
                   1010:  *
                   1011:  *     If specified, the page will be wired down, meaning
                   1012:  *     that the related pte cannot be reclaimed.
                   1013:  *
                   1014:  *     Note: This is the only routine which MAY NOT lazy-evaluate
                   1015:  *     or lose information.  That is, this routine must actually
                   1016:  *     insert this page into the given map NOW.
                   1017:  */
                   1018: int
                   1019: pmap_enter(pmap, va, pa, prot, flags)
                   1020:        pmap_t pmap;
                   1021:        vaddr_t va;
                   1022:        paddr_t pa;
                   1023:        vm_prot_t prot;
                   1024:        int flags;
                   1025: {
                   1026:        pt_entry_t pte;
                   1027:
                   1028:        pte = 0;
                   1029: #if defined(M68040) || defined(M68060)
                   1030:        if (mmutype <= MMU_68040 && (pte_prot(prot) & PG_PROT) == PG_RW)
                   1031: #ifdef PMAP_DEBUG
                   1032:                if (dowriteback && (dokwriteback || pmap != pmap_kernel()))
                   1033: #endif
                   1034:                pte |= PG_CCB;
                   1035: #endif
                   1036:        return (pmap_enter_cache(pmap, va, pa, prot, flags, pte));
                   1037: }
                   1038:
                   1039: /*
                   1040:  * Similar to pmap_enter(), but allows the caller to control the
                   1041:  * cacheability of the mapping. However if it is found that this mapping
                   1042:  * needs to be cache inhibited, the cache bits from the caller are ignored.
                   1043:  */
                   1044: int
                   1045: pmap_enter_cache(pmap, va, pa, prot, flags, template)
                   1046:        pmap_t pmap;
                   1047:        vaddr_t va;
                   1048:        paddr_t pa;
                   1049:        vm_prot_t prot;
                   1050:        int flags;
                   1051:        pt_entry_t template;
                   1052: {
                   1053:        struct vm_page *pg;
                   1054:        pt_entry_t *pte;
                   1055:        int npte, error;
                   1056:        paddr_t opa;
                   1057:        boolean_t cacheable = TRUE;
                   1058: #ifdef M68K_MMU_HP
                   1059:        boolean_t checkpv = TRUE;
                   1060: #endif
                   1061:        boolean_t wired = (flags & PMAP_WIRED) != 0;
                   1062:
                   1063:        PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
                   1064:            ("pmap_enter_cache(%p, %lx, %lx, %x, %x, %x)\n",
                   1065:            pmap, va, pa, prot, wired, template));
                   1066:
                   1067: #ifdef DIAGNOSTIC
                   1068:        /*
                   1069:         * pmap_enter() should never be used for CADDR1 and CADDR2.
                   1070:         */
                   1071:        if (pmap == pmap_kernel() &&
                   1072:            (va == (vaddr_t)CADDR1 || va == (vaddr_t)CADDR2))
                   1073:                panic("pmap_enter: used for CADDR1 or CADDR2");
                   1074: #endif
                   1075:
                   1076:        /*
                   1077:         * For user mapping, allocate kernel VM resources if necessary.
                   1078:         */
                   1079:        if (pmap->pm_ptab == NULL)
                   1080:                pmap->pm_ptab = (pt_entry_t *)
                   1081:                        uvm_km_valloc_wait(pt_map, MACHINE_MAX_PTSIZE);
                   1082:
                   1083:        /*
                   1084:         * Segment table entry not valid, we need a new PT page
                   1085:         */
                   1086:        if (!pmap_ste_v(pmap, va)) {
                   1087:                error = pmap_enter_ptpage(pmap, va);
                   1088:                if (error != 0) {
                   1089:                        if  (flags & PMAP_CANFAIL)
                   1090:                                return (error);
                   1091:                        else
                   1092:                                panic("pmap_enter: out of address space");
                   1093:                }
                   1094:        }
                   1095:
                   1096:        pa = trunc_page(pa);
                   1097:        pte = pmap_pte(pmap, va);
                   1098:        opa = pmap_pte_pa(pte);
                   1099:
                   1100:        PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
                   1101:
                   1102:        /*
                   1103:         * Mapping has not changed, must be protection or wiring change.
                   1104:         */
                   1105:        if (opa == pa) {
                   1106:                /*
                   1107:                 * Wiring change, just update stats.
                   1108:                 * We don't worry about wiring PT pages as they remain
                   1109:                 * resident as long as there are valid mappings in them.
                   1110:                 * Hence, if a user page is wired, the PT page will be also.
                   1111:                 */
                   1112:                if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {
                   1113:                        PMAP_DPRINTF(PDB_ENTER,
                   1114:                            ("enter: wiring change -> %x\n", wired));
                   1115:                        if (wired)
                   1116:                                pmap->pm_stats.wired_count++;
                   1117:                        else
                   1118:                                pmap->pm_stats.wired_count--;
                   1119:                }
                   1120:                /*
                   1121:                 * Retain cache inhibition status
                   1122:                 */
                   1123: #ifdef M68K_MMU_HP
                   1124:                checkpv = FALSE;
                   1125: #endif
                   1126:                if (pmap_pte_ci(pte))
                   1127:                        cacheable = FALSE;
                   1128:                goto validate;
                   1129:        }
                   1130:
                   1131:        /*
                   1132:         * Mapping has changed, invalidate old range and fall through to
                   1133:         * handle validating new mapping.
                   1134:         */
                   1135:        if (opa) {
                   1136:                PMAP_DPRINTF(PDB_ENTER,
                   1137:                    ("enter: removing old mapping %lx\n", va));
                   1138:                pmap_remove_mapping(pmap, va, pte,
                   1139:                    PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE);
                   1140:        }
                   1141:
                   1142:        /*
                   1143:         * If this is a new user mapping, increment the wiring count
                   1144:         * on this PT page.  PT pages are wired down as long as there
                   1145:         * is a valid mapping in the page.
                   1146:         */
                   1147:        if (pmap != pmap_kernel()) {
                   1148:                pmap_ptpage_addref(trunc_page((vaddr_t)pte));
                   1149:        }
                   1150:
                   1151:        /*
                   1152:         * Enter on the PV list if part of our managed memory
                   1153:         * Note that we raise IPL while manipulating the PV list
                   1154:         * since pmap_enter can be called at interrupt time.
                   1155:         */
                   1156:        pg = PHYS_TO_VM_PAGE(pa);
                   1157:        if (pg != NULL) {
                   1158:                struct pv_entry *pv, *npv;
                   1159:                int s;
                   1160:
                   1161:                pv = pg_to_pvh(pg);
                   1162:                s = splvm();
                   1163:                PMAP_DPRINTF(PDB_ENTER,
                   1164:                    ("enter: pv at %p: %lx/%p/%p\n",
                   1165:                    pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
                   1166:                /*
                   1167:                 * No entries yet, use header as the first entry
                   1168:                 */
                   1169:                if (pv->pv_pmap == NULL) {
                   1170:                        pv->pv_va = va;
                   1171:                        pv->pv_pmap = pmap;
                   1172:                        pv->pv_next = NULL;
                   1173:                        pv->pv_ptste = NULL;
                   1174:                        pv->pv_ptpmap = NULL;
                   1175:                        pv->pv_flags = 0;
                   1176:                }
                   1177:                /*
                   1178:                 * There is at least one other VA mapping this page.
                   1179:                 * Place this entry after the header.
                   1180:                 */
                   1181:                else {
                   1182: #ifdef PMAP_DEBUG
                   1183:                        for (npv = pv; npv; npv = npv->pv_next)
                   1184:                                if (pmap == npv->pv_pmap && va == npv->pv_va)
                   1185:                                        panic("pmap_enter: already in pv_tab");
                   1186: #endif
                   1187:                        npv = pmap_alloc_pv();
                   1188:                        if (npv == NULL) {
                   1189:                                if (flags & PMAP_CANFAIL) {
                   1190:                                        splx(s);
                   1191:                                        return (ENOMEM);
                   1192:                                } else
                   1193:                                        panic("pmap_enter: pmap_alloc_pv() failed");
                   1194:                        }
                   1195:                        npv->pv_va = va;
                   1196:                        npv->pv_pmap = pmap;
                   1197:                        npv->pv_next = pv->pv_next;
                   1198:                        npv->pv_ptste = NULL;
                   1199:                        npv->pv_ptpmap = NULL;
                   1200:                        npv->pv_flags = 0;
                   1201:                        pv->pv_next = npv;
                   1202: #ifdef M68K_MMU_HP
                   1203:                        /*
                   1204:                         * Since there is another logical mapping for the
                   1205:                         * same page we may need to cache-inhibit the
                   1206:                         * descriptors on those CPUs with external VACs.
                   1207:                         * We don't need to CI if:
                   1208:                         *
                   1209:                         * - No two mappings belong to the same user pmaps.
                   1210:                         *   Since the cache is flushed on context switches
                   1211:                         *   there is no problem between user processes.
                   1212:                         *
                   1213:                         * - Mappings within a single pmap are a certain
                   1214:                         *   magic distance apart.  VAs at these appropriate
                   1215:                         *   boundaries map to the same cache entries or
                   1216:                         *   otherwise don't conflict.
                   1217:                         *
                   1218:                         * To keep it simple, we only check for these special
                   1219:                         * cases if there are only two mappings, otherwise we
                   1220:                         * punt and always CI.
                   1221:                         *
                   1222:                         * Note that there are no aliasing problems with the
                   1223:                         * on-chip data-cache when the WA bit is set.
                   1224:                         */
                   1225:                        if (pmap_aliasmask) {
                   1226:                                if (pv->pv_flags & PV_CI) {
                   1227:                                        PMAP_DPRINTF(PDB_CACHE,
                   1228:                                            ("enter: pa %lx already CI'ed\n",
                   1229:                                            pa));
                   1230:                                        checkpv = cacheable = FALSE;
                   1231:                                } else if (npv->pv_next ||
                   1232:                                           ((pmap == pv->pv_pmap ||
                   1233:                                             pmap == pmap_kernel() ||
                   1234:                                             pv->pv_pmap == pmap_kernel()) &&
                   1235:                                            ((pv->pv_va & pmap_aliasmask) !=
                   1236:                                             (va & pmap_aliasmask)))) {
                   1237:                                        PMAP_DPRINTF(PDB_CACHE,
                   1238:                                            ("enter: pa %lx CI'ing all\n",
                   1239:                                            pa));
                   1240:                                        cacheable = FALSE;
                   1241:                                        pv->pv_flags |= PV_CI;
                   1242:                                }
                   1243:                        }
                   1244: #endif
                   1245:                }
                   1246:
                   1247:                /*
                   1248:                 * Speed pmap_is_referenced() or pmap_is_modified() based
                   1249:                 * on the hint provided in access_type.
                   1250:                 */
                   1251: #ifdef DIAGNOSTIC
                   1252:                if ((flags & VM_PROT_ALL) & ~prot)
                   1253:                        panic("pmap_enter: access type exceeds prot");
                   1254: #endif
                   1255:                if (flags & VM_PROT_WRITE)
                   1256:                        pv->pv_flags |= (PG_U|PG_M);
                   1257:                else if (flags & VM_PROT_ALL)
                   1258:                        pv->pv_flags |= PG_U;
                   1259:
                   1260:                splx(s);
                   1261:        }
                   1262:        /*
                   1263:         * Assumption: if it is not part of our managed memory
                   1264:         * then it must be device memory which may be volatile.
                   1265:         */
                   1266:        else {
                   1267: #ifdef M68K_MMU_HP
                   1268:                checkpv =
                   1269: #endif
                   1270:                cacheable = FALSE;
                   1271:        }
                   1272:
                   1273:        /*
                   1274:         * Increment counters
                   1275:         */
                   1276:        pmap->pm_stats.resident_count++;
                   1277:        if (wired)
                   1278:                pmap->pm_stats.wired_count++;
                   1279:
                   1280: validate:
                   1281: #ifdef M68K_MMU_HP
                   1282:        /*
                   1283:         * Purge kernel side of VAC to ensure we get correct state
                   1284:         * of HW bits so we don't clobber them.
                   1285:         */
                   1286:        if (pmap_aliasmask)
                   1287:                DCIS();
                   1288: #endif
                   1289:        /*
                   1290:         * Build the new PTE.
                   1291:         */
                   1292:        npte = pa | pte_prot(prot) | (*pte & (PG_M|PG_U)) | PG_V;
                   1293:        if (wired)
                   1294:                npte |= PG_W;
                   1295:
                   1296: #if defined(M68040) || defined(M68060)
                   1297:        /* Don't cache if process can't take it, like SunOS ones.  */
                   1298:        if (mmutype <= MMU_68040 && pmap != pmap_kernel() &&
                   1299:            (curproc->p_md.md_flags & MDP_UNCACHE_WX) &&
                   1300:            (prot & VM_PROT_EXECUTE) && (prot & VM_PROT_WRITE))
                   1301: #ifdef M68K_MMU_HP
                   1302:                checkpv =
                   1303: #endif
                   1304:                cacheable = FALSE;
                   1305: #endif
                   1306:
                   1307: #ifdef M68K_MMU_HP
                   1308:        if (!checkpv && !cacheable)
                   1309: #else
                   1310:        if (!cacheable)
                   1311: #endif
                   1312:                npte |= PG_CI;
                   1313:        else
                   1314:                npte |= template;
                   1315:
                   1316:        PMAP_DPRINTF(PDB_ENTER, ("enter: new pte value %x\n", npte));
                   1317:
                   1318:        /*
                   1319:         * Remember if this was a wiring-only change.
                   1320:         * If so, we need not flush the TLB and caches.
                   1321:         */
                   1322:        wired = ((*pte ^ npte) == PG_W);
                   1323: #if defined(M68040) || defined(M68060)
                   1324:        if (mmutype <= MMU_68040 && !wired) {
                   1325:                DCFP(pa);
                   1326:                ICPP(pa);
                   1327:        }
                   1328: #endif
                   1329:        *pte = npte;
                   1330:        if (!wired && active_pmap(pmap))
                   1331:                TBIS(va);
                   1332: #ifdef M68K_MMU_HP
                   1333:        /*
                   1334:         * The following is executed if we are entering a second
                   1335:         * (or greater) mapping for a physical page and the mappings
                   1336:         * may create an aliasing problem.  In this case we must
                   1337:         * cache inhibit the descriptors involved and flush any
                   1338:         * external VAC.
                   1339:         */
                   1340:        if (checkpv && !cacheable) {
                   1341:                pmap_changebit(pg, PG_CI, ~0);
                   1342:                DCIA();
                   1343: #ifdef PMAP_DEBUG
                   1344:                if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
                   1345:                    (PDB_CACHE|PDB_PVDUMP))
                   1346:                        pmap_pvdump(pa);
                   1347: #endif
                   1348:        }
                   1349: #endif
                   1350: #ifdef PMAP_DEBUG
                   1351:        if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel())
                   1352:                pmap_check_wiring("enter", trunc_page((vaddr_t)pte));
                   1353: #endif
                   1354:
                   1355:        return (0);
                   1356: }
                   1357:
                   1358: void
                   1359: pmap_kenter_pa(va, pa, prot)
                   1360:        vaddr_t va;
                   1361:        paddr_t pa;
                   1362:        vm_prot_t prot;
                   1363: {
                   1364:        pt_entry_t pte;
                   1365:
                   1366:        pte = pte_prot(prot);
                   1367: #if defined(M68040) || defined(M68060)
                   1368:        if (mmutype <= MMU_68040 && (pte & (PG_PROT)) == PG_RW)
                   1369:                pte |= PG_CCB;
                   1370: #endif
                   1371:        pmap_kenter_cache(va, pa, pte);
                   1372: }
                   1373:
                   1374: /*
                   1375:  * Similar to pmap_kenter_pa(), but allows the caller to control the
                   1376:  * cacheability of the mapping.
                   1377:  */
                   1378: void
                   1379: pmap_kenter_cache(va, pa, template)
                   1380:        vaddr_t va;
                   1381:        paddr_t pa;
                   1382:        pt_entry_t template;
                   1383: {
                   1384:        struct pmap *pmap = pmap_kernel();
                   1385:        pt_entry_t *pte;
                   1386:        int s, npte, error;
                   1387:
                   1388:        PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
                   1389:            ("pmap_kenter_cache(%lx, %lx, %x)\n", va, pa, prot));
                   1390:
                   1391:        /*
                   1392:         * Segment table entry not valid, we need a new PT page
                   1393:         */
                   1394:
                   1395:        if (!pmap_ste_v(pmap, va)) {
                   1396:                s = splvm();
                   1397:                error = pmap_enter_ptpage(pmap, va);
                   1398:                if (error != 0)
                   1399:                        panic("pmap_kenter_cache: out of address space");
                   1400:                splx(s);
                   1401:        }
                   1402:
                   1403:        pa = trunc_page(pa);
                   1404:        pte = pmap_pte(pmap, va);
                   1405:
                   1406:        PMAP_DPRINTF(PDB_ENTER, ("kenter: pte %p, *pte %x\n", pte, *pte));
                   1407:        KASSERT(!pmap_pte_v(pte));
                   1408:
                   1409:        /*
                   1410:         * Increment counters
                   1411:         */
                   1412:
                   1413:        pmap->pm_stats.resident_count++;
                   1414:        pmap->pm_stats.wired_count++;
                   1415:
                   1416:        /*
                   1417:         * Build the new PTE.
                   1418:         */
                   1419:
                   1420:        npte = pa | template | PG_V | PG_W;
                   1421:
                   1422:        PMAP_DPRINTF(PDB_ENTER, ("kenter: new pte value %x\n", npte));
                   1423: #if defined(M68040) || defined(M68060)
                   1424:        if (mmutype <= MMU_68040) {
                   1425:                DCFP(pa);
                   1426:                ICPP(pa);
                   1427:        }
                   1428: #endif
                   1429:        *pte = npte;
                   1430: }
                   1431:
                   1432: void
                   1433: pmap_kremove(va, len)
                   1434:        vaddr_t va;
                   1435:        vsize_t len;
                   1436: {
                   1437:        struct pmap *pmap = pmap_kernel();
                   1438:        vaddr_t sva, eva, nssva;
                   1439:        pt_entry_t *pte;
                   1440: #ifdef M68K_MMU_HP
                   1441:        boolean_t firstpage, needcflush;
                   1442: #endif
                   1443:
                   1444:        PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
                   1445:            ("pmap_kremove(%lx, %lx)\n", va, len));
                   1446:
                   1447:        sva = va;
                   1448:        eva = va + len;
                   1449: #ifdef M68K_MMU_HP
                   1450:        firstpage = TRUE;
                   1451:        needcflush = FALSE;
                   1452: #endif
                   1453:        while (sva < eva) {
                   1454:                nssva = m68k_trunc_seg(sva) + NBSEG;
                   1455:                if (nssva == 0 || nssva > eva)
                   1456:                        nssva = eva;
                   1457:
                   1458:                /*
                   1459:                 * If VA belongs to an unallocated segment,
                   1460:                 * skip to the next segment boundary.
                   1461:                 */
                   1462:
                   1463:                if (!pmap_ste_v(pmap, sva)) {
                   1464:                        sva = nssva;
                   1465:                        continue;
                   1466:                }
                   1467:
                   1468:                /*
                   1469:                 * Invalidate every valid mapping within this segment.
                   1470:                 */
                   1471:
                   1472:                pte = pmap_pte(pmap, sva);
                   1473:                while (sva < nssva) {
                   1474:                        if (pmap_pte_v(pte)) {
                   1475: #ifdef PMAP_DEBUG
                   1476:                                struct pv_entry *pv;
                   1477:                                int s;
                   1478:
                   1479:                                pv = pa_to_pvh(pmap_pte_pa(pte));
                   1480:                                s = splvm();
                   1481:                                while (pv->pv_pmap != NULL) {
                   1482:                                        KASSERT(pv->pv_pmap != pmap_kernel() ||
                   1483:                                            pv->pv_va != sva);
                   1484:                                        pv = pv->pv_next;
                   1485:                                        if (pv == NULL) {
                   1486:                                                break;
                   1487:                                        }
                   1488:                                }
                   1489:                                splx(s);
                   1490: #endif
                   1491: #ifdef M68K_MMU_HP
                   1492:                                if (pmap_aliasmask) {
                   1493:
                   1494:                                        /*
                   1495:                                         * Purge kernel side of VAC to ensure
                   1496:                                         * we get the correct state of any
                   1497:                                         * hardware maintained bits.
                   1498:                                         */
                   1499:
                   1500:                                        if (firstpage) {
                   1501:                                                DCIS();
                   1502:                                        }
                   1503:
                   1504:                                        /*
                   1505:                                         * Remember if we may need to
                   1506:                                         * flush the VAC.
                   1507:                                         */
                   1508:
                   1509:                                        needcflush = TRUE;
                   1510:                                        firstpage = FALSE;
                   1511:                                }
                   1512: #endif
                   1513:                                /*
                   1514:                                 * Update statistics
                   1515:                                 */
                   1516:
                   1517:                                pmap->pm_stats.wired_count--;
                   1518:                                pmap->pm_stats.resident_count--;
                   1519:
                   1520:                                /*
                   1521:                                 * Invalidate the PTE.
                   1522:                                 */
                   1523:
                   1524:                                *pte = PG_NV;
                   1525:                                TBIS(sva);
                   1526:                        }
                   1527:                        pte++;
                   1528:                        sva += PAGE_SIZE;
                   1529:                }
                   1530:        }
                   1531:
                   1532: #ifdef M68K_MMU_HP
                   1533:        if (pmap_aliasmask) {
                   1534:                /*
                   1535:                 * Didn't do anything, no need for cache flushes
                   1536:                 */
                   1537:
                   1538:                if (firstpage)
                   1539:                        return;
                   1540:
                   1541:                /*
                   1542:                 * In a couple of cases, we don't need to worry about flushing
                   1543:                 * the VAC:
                   1544:                 *      1. if this is a kernel mapping,
                   1545:                 *         we have already done it
                   1546:                 *      2. if it is a user mapping not for the current process,
                   1547:                 *         it won't be there
                   1548:                 */
                   1549:
                   1550:                if (!active_user_pmap(pmap))
                   1551:                        needcflush = FALSE;
                   1552:                if (needcflush) {
                   1553:                        if (pmap == pmap_kernel()) {
                   1554:                                DCIS();
                   1555:                        } else {
                   1556:                                DCIU();
                   1557:                        }
                   1558:                }
                   1559:        }
                   1560: #endif
                   1561: }
                   1562:
                   1563: /*
                   1564:  * pmap_unwire:                        [ INTERFACE]
                   1565:  *
                   1566:  *     Clear the wired attribute for a map/virtual-address pair.
                   1567:  *
                   1568:  *     The mapping must already exist in the pmap.
                   1569:  */
                   1570: void
                   1571: pmap_unwire(pmap, va)
                   1572:        pmap_t          pmap;
                   1573:        vaddr_t         va;
                   1574: {
                   1575:        pt_entry_t *pte;
                   1576:
                   1577:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_unwire(%p, %lx)\n", pmap, va));
                   1578:
                   1579:        pte = pmap_pte(pmap, va);
                   1580: #ifdef PMAP_DEBUG
                   1581:        /*
                   1582:         * Page table page is not allocated.
                   1583:         * Should this ever happen?  Ignore it for now,
                   1584:         * we don't want to force allocation of unnecessary PTE pages.
                   1585:         */
                   1586:        if (!pmap_ste_v(pmap, va)) {
                   1587:                if (pmapdebug & PDB_PARANOIA)
                   1588:                        printf("pmap_unwire: invalid STE for %lx\n", va);
                   1589:                return;
                   1590:        }
                   1591:        /*
                   1592:         * Page not valid.  Should this ever happen?
                   1593:         * Just continue and change wiring anyway.
                   1594:         */
                   1595:        if (!pmap_pte_v(pte)) {
                   1596:                if (pmapdebug & PDB_PARANOIA)
                   1597:                        printf("pmap_unwire: invalid PTE for %lx\n", va);
                   1598:        }
                   1599: #endif
                   1600:        /*
                   1601:         * If wiring actually changed (always?) set the wire bit and
                   1602:         * update the wire count.  Note that wiring is not a hardware
                   1603:         * characteristic so there is no need to invalidate the TLB.
                   1604:         */
                   1605:        if (pmap_pte_w_chg(pte, 0)) {
                   1606:                pmap_pte_set_w(pte, 0);
                   1607:                pmap->pm_stats.wired_count--;
                   1608:        }
                   1609: }
                   1610:
                   1611: /*
                   1612:  * pmap_extract:               [ INTERFACE ]
                   1613:  *
                   1614:  *     Extract the physical address associated with the given
                   1615:  *     pmap/virtual address pair.
                   1616:  */
                   1617: boolean_t
                   1618: pmap_extract(pmap, va, pap)
                   1619:        pmap_t  pmap;
                   1620:        vaddr_t va;
                   1621:        paddr_t *pap;
                   1622: {
                   1623:        boolean_t rv = FALSE;
                   1624:        paddr_t pa;
                   1625:        pt_entry_t *pte;
                   1626:
                   1627:        PMAP_DPRINTF(PDB_FOLLOW,
                   1628:            ("pmap_extract(%p, %lx) -> ", pmap, va));
                   1629:
                   1630: #ifdef __HAVE_PMAP_DIRECT
                   1631:        if (pmap == pmap_kernel() && trunc_page(va) > VM_MAX_KERNEL_ADDRESS) {
                   1632:                if (pap != NULL)
                   1633:                        *pap = va;
                   1634:                return (TRUE);
                   1635:        }
                   1636: #endif
                   1637:
                   1638:        if (pmap_ste_v(pmap, va)) {
                   1639:                pte = pmap_pte(pmap, va);
                   1640:                if (pmap_pte_v(pte)) {
                   1641:                        pa = pmap_pte_pa(pte) | (va & ~PG_FRAME);
                   1642:                        if (pap != NULL)
                   1643:                                *pap = pa;
                   1644:                        rv = TRUE;
                   1645:                }
                   1646:        }
                   1647: #ifdef PMAP_DEBUG
                   1648:        if (pmapdebug & PDB_FOLLOW) {
                   1649:                if (rv)
                   1650:                        printf("%lx\n", pa);
                   1651:                else
                   1652:                        printf("failed\n");
                   1653:        }
                   1654: #endif
                   1655:        return (rv);
                   1656: }
                   1657:
                   1658: /*
                   1659:  * pmap_collect:               [ INTERFACE ]
                   1660:  *
                   1661:  *     Garbage collects the physical map system for pages which are no
                   1662:  *     longer used.  Success need not be guaranteed -- that is, there
                   1663:  *     may well be pages which are not referenced, but others may be
                   1664:  *     collected.
                   1665:  *
                   1666:  *     Called by the pageout daemon when pages are scarce.
                   1667:  */
                   1668: void
                   1669: pmap_collect(pmap)
                   1670:        pmap_t          pmap;
                   1671: {
                   1672:        int flags;
                   1673:
                   1674:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_collect(%p)\n", pmap));
                   1675:
                   1676:        if (pmap == pmap_kernel()) {
                   1677:                int bank, s;
                   1678:
                   1679:                /*
                   1680:                 * XXX This is very bogus.  We should handle kernel PT
                   1681:                 * XXX pages much differently.
                   1682:                 */
                   1683:
                   1684:                s = splvm();
                   1685:                for (bank = 0; bank < vm_nphysseg; bank++)
                   1686:                        pmap_collect1(ptoa(vm_physmem[bank].start),
                   1687:                            ptoa(vm_physmem[bank].end));
                   1688:                splx(s);
                   1689:        } else {
                   1690:                /*
                   1691:                 * This process is about to be swapped out; free all of
                   1692:                 * the PT pages by removing the physical mappings for its
                   1693:                 * entire address space.  Note: pmap_remove() performs
                   1694:                 * all necessary locking.
                   1695:                 */
                   1696:                flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
                   1697:                pmap_remove_flags(pmap, VM_MIN_ADDRESS, VM_MAX_ADDRESS,
                   1698:                    flags | PRM_SKIPWIRED);
                   1699:                pmap_update(pmap);
                   1700:        }
                   1701: }
                   1702:
                   1703: /*
                   1704:  * pmap_collect1:
                   1705:  *
                   1706:  *     Garbage-collect KPT pages.  Helper for the above (bogus)
                   1707:  *     pmap_collect().
                   1708:  *
                   1709:  *     Note: THIS SHOULD GO AWAY, AND BE REPLACED WITH A BETTER
                   1710:  *     WAY OF HANDLING PT PAGES!
                   1711:  */
                   1712: void
                   1713: pmap_collect1(startpa, endpa)
                   1714:        paddr_t         startpa, endpa;
                   1715: {
                   1716:        paddr_t pa;
                   1717:        struct pv_entry *pv;
                   1718:        pt_entry_t *pte;
                   1719:        paddr_t kpa;
                   1720: #ifdef PMAP_DEBUG
                   1721:        st_entry_t *ste;
                   1722:        int opmapdebug = 0 /* XXX initialize to quiet gcc -Wall */;
                   1723: #endif
                   1724:
                   1725:        for (pa = startpa; pa < endpa; pa += PAGE_SIZE) {
                   1726:                struct kpt_page *kpt, **pkpt;
                   1727:
                   1728:                /*
                   1729:                 * Locate physical pages which are being used as kernel
                   1730:                 * page table pages.
                   1731:                 */
                   1732:                pv = pa_to_pvh(pa);
                   1733:                if (pv->pv_pmap != pmap_kernel() || !(pv->pv_flags & PV_PTPAGE))
                   1734:                        continue;
                   1735:                do {
                   1736:                        if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel())
                   1737:                                break;
                   1738:                } while ((pv = pv->pv_next));
                   1739:                if (pv == NULL)
                   1740:                        continue;
                   1741: #ifdef PMAP_DEBUG
                   1742:                if (pv->pv_va < (vaddr_t)Sysmap ||
                   1743:                    pv->pv_va >= (vaddr_t)Sysmap + MACHINE_MAX_PTSIZE)
                   1744:                        printf("collect: kernel PT VA out of range\n");
                   1745:                else
                   1746:                        goto ok;
                   1747:                pmap_pvdump(pa);
                   1748:                continue;
                   1749: ok:
                   1750: #endif
                   1751:                pte = (pt_entry_t *)(pv->pv_va + PAGE_SIZE);
                   1752:                while (--pte >= (pt_entry_t *)pv->pv_va && *pte == PG_NV)
                   1753:                        ;
                   1754:                if (pte >= (pt_entry_t *)pv->pv_va)
                   1755:                        continue;
                   1756:
                   1757: #ifdef PMAP_DEBUG
                   1758:                if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {
                   1759:                        printf("collect: freeing KPT page at %lx (ste %x@%p)\n",
                   1760:                               pv->pv_va, *pv->pv_ptste, pv->pv_ptste);
                   1761:                        opmapdebug = pmapdebug;
                   1762:                        pmapdebug |= PDB_PTPAGE;
                   1763:                }
                   1764:
                   1765:                ste = pv->pv_ptste;
                   1766: #endif
                   1767:                /*
                   1768:                 * If all entries were invalid we can remove the page.
                   1769:                 * We call pmap_remove_entry to take care of invalidating
                   1770:                 * ST and Sysptmap entries.
                   1771:                 */
                   1772:                pmap_extract(pmap_kernel(), pv->pv_va, &kpa);
                   1773:                pmap_remove_mapping(pmap_kernel(), pv->pv_va, PT_ENTRY_NULL,
                   1774:                                    PRM_TFLUSH|PRM_CFLUSH);
                   1775:                /*
                   1776:                 * Use the physical address to locate the original
                   1777:                 * (kmem_alloc assigned) address for the page and put
                   1778:                 * that page back on the free list.
                   1779:                 */
                   1780:                for (pkpt = &kpt_used_list, kpt = *pkpt;
                   1781:                     kpt != NULL;
                   1782:                     pkpt = &kpt->kpt_next, kpt = *pkpt)
                   1783:                        if (kpt->kpt_pa == kpa)
                   1784:                                break;
                   1785: #ifdef PMAP_DEBUG
                   1786:                if (kpt == NULL)
                   1787:                        panic("pmap_collect: lost a KPT page");
                   1788:                if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
                   1789:                        printf("collect: %lx (%lx) to free list\n",
                   1790:                               kpt->kpt_va, kpa);
                   1791: #endif
                   1792:                *pkpt = kpt->kpt_next;
                   1793:                kpt->kpt_next = kpt_free_list;
                   1794:                kpt_free_list = kpt;
                   1795: #ifdef PMAP_DEBUG
                   1796:                if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
                   1797:                        pmapdebug = opmapdebug;
                   1798:
                   1799:                if (*ste & SG_V)
                   1800:                        printf("collect: kernel STE at %p still valid (%x)\n",
                   1801:                               ste, *ste);
                   1802:                ste = &Sysptmap[ste - pmap_ste(pmap_kernel(), 0)];
                   1803:                if (*ste & SG_V)
                   1804:                        printf("collect: kernel PTmap at %p still valid (%x)\n",
                   1805:                               ste, *ste);
                   1806: #endif
                   1807:        }
                   1808: }
                   1809:
                   1810: /*
                   1811:  * pmap_zero_page:             [ INTERFACE ]
                   1812:  *
                   1813:  *     Zero the specified (machine independent) page by mapping the page
                   1814:  *     into virtual memory and using bzero to clear its contents, one
                   1815:  *     machine dependent page at a time.
                   1816:  *
                   1817:  *     Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
                   1818:  */
                   1819: void
                   1820: pmap_zero_page(struct vm_page *pg)
                   1821: {
                   1822:        paddr_t phys = VM_PAGE_TO_PHYS(pg);
                   1823:        int npte;
                   1824:
                   1825:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_zero_page(%lx)\n", phys));
                   1826:
                   1827:        npte = phys | PG_V;
                   1828: #ifdef M68K_MMU_HP
                   1829:        if (pmap_aliasmask) {
                   1830:                /*
                   1831:                 * Cache-inhibit the mapping on VAC machines, as we would
                   1832:                 * be wasting the cache load.
                   1833:                 */
                   1834:                npte |= PG_CI;
                   1835:        }
                   1836: #endif
                   1837:
                   1838: #if defined(M68040) || defined(M68060)
                   1839:        if (mmutype <= MMU_68040) {
                   1840:                /*
                   1841:                 * Set copyback caching on the page; this is required
                   1842:                 * for cache consistency (since regular mappings are
                   1843:                 * copyback as well).
                   1844:                 */
                   1845:                npte |= PG_CCB;
                   1846:        }
                   1847: #endif
                   1848:
                   1849:        *caddr1_pte = npte;
                   1850:        TBIS((vaddr_t)CADDR1);
                   1851:
                   1852:        zeropage(CADDR1);
                   1853:
                   1854: #ifdef PMAP_DEBUG
                   1855:        *caddr1_pte = PG_NV;
                   1856:        TBIS((vaddr_t)CADDR1);
                   1857: #endif
                   1858: }
                   1859:
                   1860: /*
                   1861:  * pmap_copy_page:             [ INTERFACE ]
                   1862:  *
                   1863:  *     Copy the specified (machine independent) page by mapping the page
                   1864:  *     into virtual memory and using bcopy to copy the page, one machine
                   1865:  *     dependent page at a time.
                   1866:  *
                   1867:  *     Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
                   1868:  */
                   1869: void
                   1870: pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg)
                   1871: {
                   1872:        paddr_t src = VM_PAGE_TO_PHYS(srcpg);
                   1873:        paddr_t dst = VM_PAGE_TO_PHYS(dstpg);
                   1874:
                   1875:        int npte1, npte2;
                   1876:
                   1877:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_copy_page(%lx, %lx)\n", src, dst));
                   1878:
                   1879:        npte1 = src | PG_RO | PG_V;
                   1880:        npte2 = dst | PG_V;
                   1881: #ifdef M68K_MMU_HP
                   1882:        if (pmap_aliasmask) {
                   1883:                /*
                   1884:                 * Cache-inhibit the mapping on VAC machines, as we would
                   1885:                 * be wasting the cache load.
                   1886:                 */
                   1887:                npte1 |= PG_CI;
                   1888:                npte2 |= PG_CI;
                   1889:        }
                   1890: #endif
                   1891:
                   1892: #if defined(M68040) || defined(M68060)
                   1893:        if (mmutype <= MMU_68040) {
                   1894:                /*
                   1895:                 * Set copyback caching on the pages; this is required
                   1896:                 * for cache consistency (since regular mappings are
                   1897:                 * copyback as well).
                   1898:                 */
                   1899:                npte1 |= PG_CCB;
                   1900:                npte2 |= PG_CCB;
                   1901:        }
                   1902: #endif
                   1903:
                   1904:        *caddr1_pte = npte1;
                   1905:        TBIS((vaddr_t)CADDR1);
                   1906:
                   1907:        *caddr2_pte = npte2;
                   1908:        TBIS((vaddr_t)CADDR2);
                   1909:
                   1910:        copypage(CADDR1, CADDR2);
                   1911:
                   1912: #ifdef PMAP_DEBUG
                   1913:        *caddr1_pte = PG_NV;
                   1914:        TBIS((vaddr_t)CADDR1);
                   1915:
                   1916:        *caddr2_pte = PG_NV;
                   1917:        TBIS((vaddr_t)CADDR2);
                   1918: #endif
                   1919: }
                   1920:
                   1921: /*
                   1922:  * pmap_clear_modify:          [ INTERFACE ]
                   1923:  *
                   1924:  *     Clear the modify bits on the specified physical page.
                   1925:  */
                   1926: boolean_t
                   1927: pmap_clear_modify(pg)
                   1928:        struct vm_page *pg;
                   1929: {
                   1930:        boolean_t rv;
                   1931:
                   1932:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%lx)\n", pg));
                   1933:
                   1934:        rv = pmap_testbit(pg, PG_M);
                   1935:        pmap_changebit(pg, 0, ~PG_M);
                   1936:        return rv;
                   1937: }
                   1938:
                   1939: /*
                   1940:  * pmap_clear_reference:       [ INTERFACE ]
                   1941:  *
                   1942:  *     Clear the reference bit on the specified physical page.
                   1943:  */
                   1944: boolean_t
                   1945: pmap_clear_reference(pg)
                   1946:        struct vm_page *pg;
                   1947: {
                   1948:        boolean_t rv;
                   1949:
                   1950:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_reference(%lx)\n", pg));
                   1951:
                   1952:        rv = pmap_testbit(pg, PG_U);
                   1953:        pmap_changebit(pg, 0, ~PG_U);
                   1954:        return rv;
                   1955: }
                   1956:
                   1957: /*
                   1958:  * pmap_is_referenced:         [ INTERFACE ]
                   1959:  *
                   1960:  *     Return whether or not the specified physical page is referenced
                   1961:  *     by any physical maps.
                   1962:  */
                   1963: boolean_t
                   1964: pmap_is_referenced(pg)
                   1965:        struct vm_page *pg;
                   1966: {
                   1967: #ifdef PMAP_DEBUG
                   1968:        if (pmapdebug & PDB_FOLLOW) {
                   1969:                boolean_t rv = pmap_testbit(pg, PG_U);
                   1970:                printf("pmap_is_referenced(%lx) -> %c\n", pg, "FT"[rv]);
                   1971:                return(rv);
                   1972:        }
                   1973: #endif
                   1974:        return(pmap_testbit(pg, PG_U));
                   1975: }
                   1976:
                   1977: /*
                   1978:  * pmap_is_modified:           [ INTERFACE ]
                   1979:  *
                   1980:  *     Return whether or not the specified physical page is modified
                   1981:  *     by any physical maps.
                   1982:  */
                   1983: boolean_t
                   1984: pmap_is_modified(pg)
                   1985:        struct vm_page *pg;
                   1986: {
                   1987: #ifdef PMAP_DEBUG
                   1988:        if (pmapdebug & PDB_FOLLOW) {
                   1989:                boolean_t rv = pmap_testbit(pg, PG_M);
                   1990:                printf("pmap_is_modified(%lx) -> %c\n", pg, "FT"[rv]);
                   1991:                return(rv);
                   1992:        }
                   1993: #endif
                   1994:        return(pmap_testbit(pg, PG_M));
                   1995: }
                   1996:
                   1997: #ifdef M68K_MMU_HP
                   1998: /*
                   1999:  * pmap_prefer:                        [ INTERFACE ]
                   2000:  *
                   2001:  *     Find the first virtual address >= *vap that does not
                   2002:  *     cause a virtually-tagged cache alias problem.
                   2003:  */
                   2004: void
                   2005: pmap_prefer(foff, vap)
                   2006:        vaddr_t foff, *vap;
                   2007: {
                   2008:        vaddr_t va;
                   2009:        vsize_t d;
                   2010:
                   2011: #ifdef M68K_MMU_MOTOROLA
                   2012:        if (pmap_aliasmask)
                   2013: #endif
                   2014:        {
                   2015:                va = *vap;
                   2016:                d = foff - va;
                   2017:                d &= pmap_aliasmask;
                   2018:                *vap = va + d;
                   2019:        }
                   2020: }
                   2021: #endif /* M68K_MMU_HP */
                   2022:
                   2023: #ifdef COMPAT_HPUX
                   2024: /*
                   2025:  * pmap_mapmulti:
                   2026:  *
                   2027:  *     'PUX hack for dealing with the so called multi-mapped address space.
                   2028:  *     The first 256mb is mapped in at every 256mb region from 0x10000000
                   2029:  *     up to 0xF0000000.  This allows for 15 bits of tag information.
                   2030:  *
                   2031:  *     We implement this at the segment table level, the machine independent
                   2032:  *     VM knows nothing about it.
                   2033:  */
                   2034: int
                   2035: pmap_mapmulti(pmap, va)
                   2036:        pmap_t pmap;
                   2037:        vaddr_t va;
                   2038: {
                   2039:        st_entry_t *ste, *bste;
                   2040:
                   2041: #ifdef PMAP_DEBUG
                   2042:        if (pmapdebug & PDB_MULTIMAP) {
                   2043:                ste = pmap_ste(pmap, HPMMBASEADDR(va));
                   2044:                printf("pmap_mapmulti(%p, %lx): bste %p(%x)",
                   2045:                       pmap, va, ste, *ste);
                   2046:                ste = pmap_ste(pmap, va);
                   2047:                printf(" ste %p(%x)\n", ste, *ste);
                   2048:        }
                   2049: #endif
                   2050:        bste = pmap_ste(pmap, HPMMBASEADDR(va));
                   2051:        ste = pmap_ste(pmap, va);
                   2052:        if (!(*ste & SG_V) && (*bste & SG_V)) {
                   2053:                *ste = *bste;
                   2054:                TBIAU();
                   2055:                return (0);
                   2056:        }
                   2057:        return (EFAULT);
                   2058: }
                   2059: #endif /* COMPAT_HPUX */
                   2060:
                   2061: /*
                   2062:  * Miscellaneous support routines follow
                   2063:  */
                   2064:
                   2065: /*
                   2066:  * pmap_remove_mapping:
                   2067:  *
                   2068:  *     Invalidate a single page denoted by pmap/va.
                   2069:  *
                   2070:  *     If (pte != NULL), it is the already computed PTE for the page.
                   2071:  *
                   2072:  *     If (flags & PRM_TFLUSH), we must invalidate any TLB information.
                   2073:  *
                   2074:  *     If (flags & PRM_CFLUSH), we must flush/invalidate any cache
                   2075:  *     information.
                   2076:  *
                   2077:  *     If (flags & PRM_KEEPPTPAGE), we don't free the page table page
                   2078:  *     if the reference drops to zero.
                   2079:  */
                   2080: void
                   2081: pmap_remove_mapping(pmap, va, pte, flags)
                   2082:        pmap_t pmap;
                   2083:        vaddr_t va;
                   2084:        pt_entry_t *pte;
                   2085:        int flags;
                   2086: {
                   2087:        struct vm_page *pg;
                   2088:        paddr_t pa;
                   2089:        struct pv_entry *pv, *prev, *cur;
                   2090:        pmap_t ptpmap;
                   2091:        st_entry_t *ste;
                   2092:        int s, bits;
                   2093: #ifdef PMAP_DEBUG
                   2094:        pt_entry_t opte;
                   2095: #endif
                   2096:
                   2097:        PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
                   2098:            ("pmap_remove_mapping(%p, %lx, %p, %x)\n",
                   2099:            pmap, va, pte, flags));
                   2100:
                   2101:        /*
                   2102:         * PTE not provided, compute it from pmap and va.
                   2103:         */
                   2104:
                   2105:        if (pte == PT_ENTRY_NULL) {
                   2106:                pte = pmap_pte(pmap, va);
                   2107:                if (*pte == PG_NV)
                   2108:                        return;
                   2109:        }
                   2110: #ifdef M68K_MMU_HP
                   2111:        if (pmap_aliasmask && (flags & PRM_CFLUSH)) {
                   2112:
                   2113:                /*
                   2114:                 * Purge kernel side of VAC to ensure we get the correct
                   2115:                 * state of any hardware maintained bits.
                   2116:                 */
                   2117:
                   2118:                DCIS();
                   2119:
                   2120:                /*
                   2121:                 * If this is a non-CI user mapping for the current process,
                   2122:                 * flush the VAC.  Note that the kernel side was flushed
                   2123:                 * above so we don't worry about non-CI kernel mappings.
                   2124:                 */
                   2125:
                   2126:                if (active_user_pmap(pmap) && !pmap_pte_ci(pte)) {
                   2127:                        DCIU();
                   2128:                }
                   2129:        }
                   2130: #endif
                   2131:        pa = pmap_pte_pa(pte);
                   2132: #ifdef PMAP_DEBUG
                   2133:        opte = *pte;
                   2134: #endif
                   2135:
                   2136: #if defined(M68040) || defined(M68060)
                   2137:        if ((mmutype <= MMU_68040) && (flags & PRM_CFLUSH)) {
                   2138:                DCFP(pa);
                   2139:                ICPP(pa);
                   2140:        }
                   2141: #endif
                   2142:
                   2143:        /*
                   2144:         * Update statistics
                   2145:         */
                   2146:
                   2147:        if (pmap_pte_w(pte))
                   2148:                pmap->pm_stats.wired_count--;
                   2149:        pmap->pm_stats.resident_count--;
                   2150:
                   2151:        /*
                   2152:         * Invalidate the PTE after saving the reference modify info.
                   2153:         */
                   2154:
                   2155:        PMAP_DPRINTF(PDB_REMOVE, ("remove: invalidating pte at %p\n", pte));
                   2156:        bits = *pte & (PG_U|PG_M);
                   2157:        *pte = PG_NV;
                   2158:        if ((flags & PRM_TFLUSH) && active_pmap(pmap))
                   2159:                TBIS(va);
                   2160:
                   2161:        /*
                   2162:         * For user mappings decrement the wiring count on
                   2163:         * the PT page.
                   2164:         */
                   2165:
                   2166:        if (pmap != pmap_kernel()) {
                   2167:                vaddr_t ptpva = trunc_page((vaddr_t)pte);
                   2168:                int refs = pmap_ptpage_delref(ptpva);
                   2169: #ifdef PMAP_DEBUG
                   2170:                if (pmapdebug & PDB_WIRING)
                   2171:                        pmap_check_wiring("remove", ptpva);
                   2172: #endif
                   2173:
                   2174:                /*
                   2175:                 * If reference count drops to zero, and we're not instructed
                   2176:                 * to keep it around, free the PT page.
                   2177:                 */
                   2178:
                   2179:                if (refs == 0 && (flags & PRM_KEEPPTPAGE) == 0) {
                   2180: #ifdef DIAGNOSTIC
                   2181:                        struct pv_entry *pv;
                   2182: #endif
                   2183:                        paddr_t pa;
                   2184:
                   2185:                        pa = pmap_pte_pa(pmap_pte(pmap_kernel(), ptpva));
                   2186:                        pg = PHYS_TO_VM_PAGE(pa);
                   2187: #ifdef DIAGNOSTIC
                   2188:                        if (pg == NULL)
                   2189:                                panic("pmap_remove_mapping: unmanaged PT page");
                   2190:                        pv = pg_to_pvh(pg);
                   2191:                        if (pv->pv_ptste == NULL)
                   2192:                                panic("pmap_remove_mapping: ptste == NULL");
                   2193:                        if (pv->pv_pmap != pmap_kernel() ||
                   2194:                            pv->pv_va != ptpva ||
                   2195:                            pv->pv_next != NULL)
                   2196:                                panic("pmap_remove_mapping: "
                   2197:                                    "bad PT page pmap %p, va 0x%lx, next %p",
                   2198:                                    pv->pv_pmap, pv->pv_va, pv->pv_next);
                   2199: #endif
                   2200:                        pmap_remove_mapping(pmap_kernel(), ptpva,
                   2201:                            PT_ENTRY_NULL, PRM_TFLUSH|PRM_CFLUSH);
                   2202:                        uvm_pagefree(pg);
                   2203:                        PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
                   2204:                            ("remove: PT page 0x%lx (0x%lx) freed\n",
                   2205:                            ptpva, pa));
                   2206:                }
                   2207:        }
                   2208:
                   2209:        /*
                   2210:         * If this isn't a managed page, we are all done.
                   2211:         */
                   2212:
                   2213:        pg = PHYS_TO_VM_PAGE(pa);
                   2214:        if (pg == NULL)
                   2215:                return;
                   2216:
                   2217:        /*
                   2218:         * Otherwise remove it from the PV table
                   2219:         * (raise IPL since we may be called at interrupt time).
                   2220:         */
                   2221:
                   2222:        pv = pg_to_pvh(pg);
                   2223:        s = splvm();
                   2224:
                   2225:        /*
                   2226:         * If it is the first entry on the list, it is actually
                   2227:         * in the header and we must copy the following entry up
                   2228:         * to the header.  Otherwise we must search the list for
                   2229:         * the entry.  In either case we free the now unused entry.
                   2230:         */
                   2231:        if (pmap == pv->pv_pmap && va == pv->pv_va) {
                   2232:                ste = pv->pv_ptste;
                   2233:                ptpmap = pv->pv_ptpmap;
                   2234:                cur = pv->pv_next;
                   2235:                if (cur != NULL) {
                   2236:                        cur->pv_flags = pv->pv_flags;
                   2237:                        *pv = *cur;
                   2238:                        pmap_free_pv(cur);
                   2239:                } else
                   2240:                        pv->pv_pmap = NULL;
                   2241:        } else {
                   2242:                prev = pv;
                   2243:                for (cur = pv->pv_next; cur != NULL; cur = cur->pv_next) {
                   2244:                        if (pmap == cur->pv_pmap && va == cur->pv_va)
                   2245:                                break;
                   2246:                        prev = cur;
                   2247:                }
                   2248: #ifdef PMAP_DEBUG
                   2249:                if (cur == NULL)
                   2250:                        panic("pmap_remove: PA not in pv_tab");
                   2251: #endif
                   2252:                ste = cur->pv_ptste;
                   2253:                ptpmap = cur->pv_ptpmap;
                   2254:                prev->pv_next = cur->pv_next;
                   2255:                pmap_free_pv(cur);
                   2256:        }
                   2257: #ifdef M68K_MMU_HP
                   2258:
                   2259:        /*
                   2260:         * If only one mapping left we no longer need to cache inhibit
                   2261:         */
                   2262:
                   2263:        if (pmap_aliasmask &&
                   2264:            pv->pv_pmap && pv->pv_next == NULL && (pv->pv_flags & PV_CI)) {
                   2265:                PMAP_DPRINTF(PDB_CACHE,
                   2266:                    ("remove: clearing CI for pa %lx\n", pa));
                   2267:                pv->pv_flags &= ~PV_CI;
                   2268:                pmap_changebit(pg, 0, ~PG_CI);
                   2269: #ifdef PMAP_DEBUG
                   2270:                if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
                   2271:                    (PDB_CACHE|PDB_PVDUMP))
                   2272:                        pmap_pvdump(pa);
                   2273: #endif
                   2274:        }
                   2275: #endif
                   2276:
                   2277:        /*
                   2278:         * If this was a PT page we must also remove the
                   2279:         * mapping from the associated segment table.
                   2280:         */
                   2281:
                   2282:        if (ste) {
                   2283:                PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
                   2284:                    ("remove: ste was %x@%p pte was %x@%p\n",
                   2285:                    *ste, ste, opte, pmap_pte(pmap, va)));
                   2286: #if defined(M68040) || defined(M68060)
                   2287:                if (mmutype <= MMU_68040) {
                   2288:                        st_entry_t *este = &ste[NPTEPG/SG4_LEV3SIZE];
                   2289:
                   2290:                        while (ste < este)
                   2291:                                *ste++ = SG_NV;
                   2292: #ifdef PMAP_DEBUG
                   2293:                        ste -= NPTEPG/SG4_LEV3SIZE;
                   2294: #endif
                   2295:                } else
                   2296: #endif
                   2297:                *ste = SG_NV;
                   2298:
                   2299:                /*
                   2300:                 * If it was a user PT page, we decrement the
                   2301:                 * reference count on the segment table as well,
                   2302:                 * freeing it if it is now empty.
                   2303:                 */
                   2304:
                   2305:                if (ptpmap != pmap_kernel()) {
                   2306:                        PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
                   2307:                            ("remove: stab %p, refcnt %d\n",
                   2308:                            ptpmap->pm_stab, ptpmap->pm_sref - 1));
                   2309: #ifdef PMAP_DEBUG
                   2310:                        if ((pmapdebug & PDB_PARANOIA) &&
                   2311:                            ptpmap->pm_stab != (st_entry_t *)trunc_page((vaddr_t)ste))
                   2312:                                panic("remove: bogus ste");
                   2313: #endif
                   2314:                        if (--(ptpmap->pm_sref) == 0) {
                   2315:                                PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
                   2316:                                    ("remove: free stab %p\n",
                   2317:                                    ptpmap->pm_stab));
                   2318:                                pmap_remove(pmap_kernel(),
                   2319:                                    (vaddr_t)ptpmap->pm_stab,
                   2320:                                    (vaddr_t)ptpmap->pm_stab + MACHINE_STSIZE);
                   2321:                                pmap_update(pmap_kernel());
                   2322:                                uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t)
                   2323:                                    ptpmap->pm_stpa));
                   2324:                                uvm_km_free_wakeup(st_map,
                   2325:                                                (vaddr_t)ptpmap->pm_stab,
                   2326:                                                MACHINE_STSIZE);
                   2327:                                ptpmap->pm_stab = Segtabzero;
                   2328:                                ptpmap->pm_stpa = Segtabzeropa;
                   2329: #if defined(M68040) || defined(M68060)
                   2330:                                if (mmutype <= MMU_68040)
                   2331:                                        ptpmap->pm_stfree = protostfree;
                   2332: #endif
                   2333:
                   2334:                                /*
                   2335:                                 * XXX may have changed segment table
                   2336:                                 * pointer for current process so
                   2337:                                 * update now to reload hardware.
                   2338:                                 */
                   2339:
                   2340:                                if (active_user_pmap(ptpmap))
                   2341:                                        PMAP_ACTIVATE(ptpmap, 1);
                   2342:                        }
                   2343: #ifdef PMAP_DEBUG
                   2344:                        else if (ptpmap->pm_sref < 0)
                   2345:                                panic("remove: sref < 0");
                   2346: #endif
                   2347:                }
                   2348: #if 0
                   2349:                /*
                   2350:                 * XXX this should be unnecessary as we have been
                   2351:                 * flushing individual mappings as we go.
                   2352:                 */
                   2353:                if (ptpmap == pmap_kernel())
                   2354:                        TBIAS();
                   2355:                else
                   2356:                        TBIAU();
                   2357: #endif
                   2358:                pv->pv_flags &= ~PV_PTPAGE;
                   2359:                ptpmap->pm_ptpages--;
                   2360:        }
                   2361:
                   2362:        /*
                   2363:         * Update saved attributes for managed page
                   2364:         */
                   2365:
                   2366:        pv->pv_flags |= bits;
                   2367:        splx(s);
                   2368: }
                   2369:
                   2370: /*
                   2371:  * pmap_testbit:
                   2372:  *
                   2373:  *     Test the modified/referenced bits of a physical page.
                   2374:  */
                   2375: boolean_t
                   2376: pmap_testbit(pg, bit)
                   2377:        struct vm_page *pg;
                   2378:        int bit;
                   2379: {
                   2380:        struct pv_entry *pv, *pvl;
                   2381:        pt_entry_t *pte;
                   2382:        int s;
                   2383:
                   2384:        s = splvm();
                   2385:        pv = pg_to_pvh(pg);
                   2386:
                   2387:        /*
                   2388:         * Check saved info first
                   2389:         */
                   2390:
                   2391:        if (pv->pv_flags & bit) {
                   2392:                splx(s);
                   2393:                return(TRUE);
                   2394:        }
                   2395: #ifdef M68K_MMU_HP
                   2396:        /*
                   2397:         * Flush VAC to get correct state of any hardware maintained bits.
                   2398:         */
                   2399:        if (pmap_aliasmask && (bit & (PG_U|PG_M)))
                   2400:                DCIS();
                   2401: #endif
                   2402:        /*
                   2403:         * Not found.  Check current mappings, returning immediately if
                   2404:         * found.  Cache a hit to speed future lookups.
                   2405:         */
                   2406:        if (pv->pv_pmap != NULL) {
                   2407:                for (pvl = pv; pvl != NULL; pvl = pvl->pv_next) {
                   2408:                        pte = pmap_pte(pvl->pv_pmap, pvl->pv_va);
                   2409:                        if (*pte & bit) {
                   2410:                                pv->pv_flags |= bit;
                   2411:                                splx(s);
                   2412:                                return(TRUE);
                   2413:                        }
                   2414:                }
                   2415:        }
                   2416:        splx(s);
                   2417:        return(FALSE);
                   2418: }
                   2419:
                   2420: /*
                   2421:  * pmap_changebit:
                   2422:  *
                   2423:  *     Change the modified/referenced bits, or other PTE bits,
                   2424:  *     for a physical page.
                   2425:  */
                   2426: void
                   2427: pmap_changebit(pg, set, mask)
                   2428:        struct vm_page *pg;
                   2429:        int set, mask;
                   2430: {
                   2431:        struct pv_entry *pv;
                   2432:        pt_entry_t *pte, npte;
                   2433:        vaddr_t va;
                   2434:        int s;
                   2435: #if defined(M68040) || defined(M68060)
                   2436:        paddr_t pa;
                   2437: #endif
                   2438: #if defined(M68K_MMU_HP) || defined(M68040) || defined(M68060)
                   2439:        boolean_t firstpage = TRUE;
                   2440: #endif
                   2441:
                   2442:        PMAP_DPRINTF(PDB_BITS,
                   2443:            ("pmap_changebit(%lx, %x, %x)\n", pg, set, mask));
                   2444:
                   2445:        s = splvm();
                   2446:        pv = pg_to_pvh(pg);
                   2447:
                   2448:        /*
                   2449:         * Clear saved attributes (modify, reference)
                   2450:         */
                   2451:
                   2452:        pv->pv_flags &= mask;
                   2453:
                   2454:        /*
                   2455:         * Loop over all current mappings setting/clearing as appropos
                   2456:         * If setting RO do we need to clear the VAC?
                   2457:         */
                   2458:
                   2459:        if (pv->pv_pmap != NULL) {
                   2460: #ifdef PMAP_DEBUG
                   2461:                int toflush = 0;
                   2462: #endif
                   2463:                for (; pv; pv = pv->pv_next) {
                   2464: #ifdef PMAP_DEBUG
                   2465:                        toflush |= (pv->pv_pmap == pmap_kernel()) ? 2 : 1;
                   2466: #endif
                   2467:                        va = pv->pv_va;
                   2468:                        pte = pmap_pte(pv->pv_pmap, va);
                   2469: #ifdef M68K_MMU_HP
                   2470:                        /*
                   2471:                         * Flush VAC to ensure we get correct state of HW bits
                   2472:                         * so we don't clobber them.
                   2473:                         */
                   2474:                        if (firstpage && pmap_aliasmask) {
                   2475:                                firstpage = FALSE;
                   2476:                                DCIS();
                   2477:                        }
                   2478: #endif
                   2479:                        npte = (*pte | set) & mask;
                   2480:                        if (*pte != npte) {
                   2481: #if defined(M68040) || defined(M68060)
                   2482:                                /*
                   2483:                                 * If we are changing caching status or
                   2484:                                 * protection make sure the caches are
                   2485:                                 * flushed (but only once).
                   2486:                                 */
                   2487:                                if (firstpage && (mmutype <= MMU_68040) &&
                   2488:                                    ((set == PG_RO) ||
                   2489:                                     (set & PG_CMASK) ||
                   2490:                                     (mask & PG_CMASK) == 0)) {
                   2491:                                        firstpage = FALSE;
                   2492:                                        pa = VM_PAGE_TO_PHYS(pg);
                   2493:                                        DCFP(pa);
                   2494:                                        ICPP(pa);
                   2495:                                }
                   2496: #endif
                   2497:                                *pte = npte;
                   2498:                                if (active_pmap(pv->pv_pmap))
                   2499:                                        TBIS(va);
                   2500:                        }
                   2501:                }
                   2502:        }
                   2503:        splx(s);
                   2504: }
                   2505:
                   2506: /*
                   2507:  * pmap_enter_ptpage:
                   2508:  *
                   2509:  *     Allocate and map a PT page for the specified pmap/va pair.
                   2510:  */
                   2511: int
                   2512: pmap_enter_ptpage(pmap, va)
                   2513:        pmap_t pmap;
                   2514:        vaddr_t va;
                   2515: {
                   2516:        paddr_t ptpa;
                   2517:        struct vm_page *pg;
                   2518:        struct pv_entry *pv;
                   2519:        st_entry_t *ste;
                   2520:        int s;
                   2521: #if defined(M68040) || defined(M68060)
                   2522:        paddr_t stpa;
                   2523: #endif
                   2524:
                   2525:        PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE,
                   2526:            ("pmap_enter_ptpage: pmap %p, va %lx\n", pmap, va));
                   2527:
                   2528:        /*
                   2529:         * Allocate a segment table if necessary.  Note that it is allocated
                   2530:         * from a private map and not pt_map.  This keeps user page tables
                   2531:         * aligned on segment boundaries in the kernel address space.
                   2532:         * The segment table is wired down.  It will be freed whenever the
                   2533:         * reference count drops to zero.
                   2534:         */
                   2535:        if (pmap->pm_stab == Segtabzero) {
                   2536:                pmap->pm_stab = (st_entry_t *)
                   2537:                        uvm_km_zalloc(st_map, MACHINE_STSIZE);
                   2538:                pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_stab,
                   2539:                        (paddr_t *)&pmap->pm_stpa);
                   2540: #if defined(M68040) || defined(M68060)
                   2541:                if (mmutype <= MMU_68040) {
                   2542: #ifdef PMAP_DEBUG
                   2543:                        if (dowriteback && dokwriteback) {
                   2544: #endif
                   2545:                        stpa = (paddr_t)pmap->pm_stpa;
                   2546: #if defined(M68060)
                   2547:                        if (mmutype == MMU_68060) {
                   2548:                                while (stpa < (paddr_t)pmap->pm_stpa +
                   2549:                                    MACHINE_STSIZE) {
                   2550:                                        pg = PHYS_TO_VM_PAGE(stpa);
                   2551:                                        pmap_changebit(pg, PG_CI, ~PG_CCB);
                   2552:                                        stpa += PAGE_SIZE;
                   2553:                                }
                   2554:                                DCIS(); /* XXX */
                   2555:                        } else
                   2556: #endif
                   2557:                        {
                   2558:                                pg = PHYS_TO_VM_PAGE(stpa);
                   2559:                                pmap_changebit(pg, 0, ~PG_CCB);
                   2560:                        }
                   2561: #ifdef PMAP_DEBUG
                   2562:                        }
                   2563: #endif
                   2564:                        pmap->pm_stfree = protostfree;
                   2565:                }
                   2566: #endif
                   2567:                /*
                   2568:                 * XXX may have changed segment table pointer for current
                   2569:                 * process so update now to reload hardware.
                   2570:                 */
                   2571:                if (active_user_pmap(pmap))
                   2572:                        PMAP_ACTIVATE(pmap, 1);
                   2573:
                   2574:                PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2575:                    ("enter: pmap %p stab %p(%p)\n",
                   2576:                    pmap, pmap->pm_stab, pmap->pm_stpa));
                   2577:        }
                   2578:
                   2579:        ste = pmap_ste(pmap, va);
                   2580: #if defined(M68040) || defined(M68060)
                   2581:        /*
                   2582:         * Allocate level 2 descriptor block if necessary
                   2583:         */
                   2584:        if (mmutype <= MMU_68040) {
                   2585:                if (*ste == SG_NV) {
                   2586:                        int ix;
                   2587:                        caddr_t addr;
                   2588:
                   2589:                        ix = bmtol2(pmap->pm_stfree);
                   2590:                        if (ix == -1) {
                   2591:                                return (ENOMEM);
                   2592:                        }
                   2593:                        pmap->pm_stfree &= ~l2tobm(ix);
                   2594:                        addr = (caddr_t)&pmap->pm_stab[ix*SG4_LEV2SIZE];
                   2595:                        bzero(addr, SG4_LEV2SIZE*sizeof(st_entry_t));
                   2596:                        addr = (caddr_t)&pmap->pm_stpa[ix*SG4_LEV2SIZE];
                   2597:                        *ste = (u_int)addr | SG_RW | SG_U | SG_V;
                   2598:
                   2599:                        PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2600:                            ("enter: alloc ste2 %d(%p)\n", ix, addr));
                   2601:                }
                   2602:                ste = pmap_ste2(pmap, va);
                   2603:                /*
                   2604:                 * Since a level 2 descriptor maps a block of SG4_LEV3SIZE
                   2605:                 * level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE
                   2606:                 * (16) such descriptors (PAGE_SIZE/SG4_LEV3SIZE bytes) to map a
                   2607:                 * PT page--the unit of allocation.  We set `ste' to point
                   2608:                 * to the first entry of that chunk which is validated in its
                   2609:                 * entirety below.
                   2610:                 */
                   2611:                ste = (st_entry_t *)((int)ste & ~(PAGE_SIZE/SG4_LEV3SIZE-1));
                   2612:
                   2613:                PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2614:                    ("enter: ste2 %p (%p)\n", pmap_ste2(pmap, va), ste));
                   2615:        }
                   2616: #endif
                   2617:        va = trunc_page((vaddr_t)pmap_pte(pmap, va));
                   2618:
                   2619:        /*
                   2620:         * In the kernel we allocate a page from the kernel PT page
                   2621:         * free list and map it into the kernel page table map (via
                   2622:         * pmap_enter).
                   2623:         */
                   2624:        if (pmap == pmap_kernel()) {
                   2625:                struct kpt_page *kpt;
                   2626:
                   2627:                s = splvm();
                   2628:                if ((kpt = kpt_free_list) == NULL) {
                   2629:                        /*
                   2630:                         * No PT pages available.
                   2631:                         * Try once to free up unused ones.
                   2632:                         */
                   2633:                        PMAP_DPRINTF(PDB_COLLECT,
                   2634:                            ("enter: no KPT pages, collecting...\n"));
                   2635:                        pmap_collect(pmap_kernel());
                   2636:                        if ((kpt = kpt_free_list) == NULL) {
                   2637:                                splx(s);
                   2638:                                return (ENOMEM);
                   2639:                        }
                   2640:                }
                   2641:                kpt_free_list = kpt->kpt_next;
                   2642:                kpt->kpt_next = kpt_used_list;
                   2643:                kpt_used_list = kpt;
                   2644:                ptpa = kpt->kpt_pa;
                   2645:                pg = PHYS_TO_VM_PAGE(ptpa);
                   2646:                bzero((caddr_t)kpt->kpt_va, PAGE_SIZE);
                   2647:                pmap_enter(pmap, va, ptpa, VM_PROT_READ | VM_PROT_WRITE,
                   2648:                    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
                   2649: #if defined(M68060)
                   2650:                if (mmutype == MMU_68060)
                   2651:                        pmap_changebit(pg, PG_CI, ~PG_CCB);
                   2652: #endif
                   2653:                pmap_update(pmap);
                   2654: #ifdef PMAP_DEBUG
                   2655:                if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) {
                   2656:                        int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0);
                   2657:
                   2658:                        printf("enter: add &Sysptmap[%d]: %x (KPT page %lx)\n",
                   2659:                               ix, Sysptmap[ix], kpt->kpt_va);
                   2660:                }
                   2661: #endif
                   2662:                splx(s);
                   2663:        } else {
                   2664:
                   2665:                /*
                   2666:                 * For user processes we just allocate a page from the
                   2667:                 * VM system.  Note that we set the page "wired" count to 1,
                   2668:                 * which is what we use to check if the page can be freed.
                   2669:                 * See pmap_remove_mapping().
                   2670:                 *
                   2671:                 * Count the segment table reference first so that we won't
                   2672:                 * lose the segment table when low on memory.
                   2673:                 */
                   2674:
                   2675:                pmap->pm_sref++;
                   2676:                PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
                   2677:                    ("enter: about to alloc UPT pg at %lx\n", va));
                   2678:                while ((pg = uvm_pagealloc(uvm.kernel_object, va, NULL,
                   2679:                    UVM_PGA_ZERO)) == NULL) {
                   2680:                        uvm_wait("ptpage");
                   2681:                }
                   2682:                atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE);
                   2683:                UVM_PAGE_OWN(pg, NULL);
                   2684:                ptpa = VM_PAGE_TO_PHYS(pg);
                   2685:                pmap_enter(pmap_kernel(), va, ptpa,
                   2686:                    VM_PROT_READ | VM_PROT_WRITE,
                   2687:                    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
                   2688:                pmap_update(pmap_kernel());
                   2689:        }
                   2690: #if defined(M68040) || defined(M68060)
                   2691:        /*
                   2692:         * Turn off copyback caching of page table pages,
                   2693:         * could get ugly otherwise.
                   2694:         */
                   2695: #ifdef PMAP_DEBUG
                   2696:        if (dowriteback && dokwriteback)
                   2697: #endif
                   2698:        if (mmutype <= MMU_68040) {
                   2699: #ifdef PMAP_DEBUG
                   2700:                pt_entry_t *pte = pmap_pte(pmap_kernel(), va);
                   2701:                if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0)
                   2702:                        printf("%s PT no CCB: kva=%lx ptpa=%lx pte@%p=%x\n",
                   2703:                               pmap == pmap_kernel() ? "Kernel" : "User",
                   2704:                               va, ptpa, pte, *pte);
                   2705: #endif
                   2706: #ifdef M68060
                   2707:                if (mmutype == MMU_68060) {
                   2708:                        pmap_changebit(pg, PG_CI, ~PG_CCB);
                   2709:                        DCIS();
                   2710:                } else
                   2711: #endif
                   2712:                        pmap_changebit(pg, 0, ~PG_CCB);
                   2713:        }
                   2714: #endif
                   2715:        /*
                   2716:         * Locate the PV entry in the kernel for this PT page and
                   2717:         * record the STE address.  This is so that we can invalidate
                   2718:         * the STE when we remove the mapping for the page.
                   2719:         */
                   2720:        pv = pg_to_pvh(pg);
                   2721:        s = splvm();
                   2722:        if (pv) {
                   2723:                pv->pv_flags |= PV_PTPAGE;
                   2724:                do {
                   2725:                        if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va)
                   2726:                                break;
                   2727:                } while ((pv = pv->pv_next));
                   2728:        }
                   2729: #ifdef PMAP_DEBUG
                   2730:        if (pv == NULL)
                   2731:                panic("pmap_enter_ptpage: PT page not entered");
                   2732: #endif
                   2733:        pv->pv_ptste = ste;
                   2734:        pv->pv_ptpmap = pmap;
                   2735:
                   2736:        PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
                   2737:            ("enter: new PT page at PA %lx, ste at %p\n", ptpa, ste));
                   2738:
                   2739:        /*
                   2740:         * Map the new PT page into the segment table.
                   2741:         * Also increment the reference count on the segment table if this
                   2742:         * was a user page table page.  Note that we don't use vm_map_pageable
                   2743:         * to keep the count like we do for PT pages, this is mostly because
                   2744:         * it would be difficult to identify ST pages in pmap_pageable to
                   2745:         * release them.  We also avoid the overhead of vm_map_pageable.
                   2746:         */
                   2747: #if defined(M68040) || defined(M68060)
                   2748:        if (mmutype <= MMU_68040) {
                   2749:                st_entry_t *este;
                   2750:
                   2751:                for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) {
                   2752:                        *ste = ptpa | SG_U | SG_RW | SG_V;
                   2753:                        ptpa += SG4_LEV3SIZE * sizeof(st_entry_t);
                   2754:                }
                   2755:        } else
                   2756: #endif
                   2757:        *ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
                   2758:        if (pmap != pmap_kernel()) {
                   2759:                PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2760:                    ("enter: stab %p refcnt %d\n",
                   2761:                    pmap->pm_stab, pmap->pm_sref));
                   2762:        }
                   2763:
                   2764: #if defined(M68060)
                   2765:        if (mmutype == MMU_68060) {
                   2766:                /*
                   2767:                 * Flush stale TLB info.
                   2768:                 */
                   2769:                if (pmap == pmap_kernel())
                   2770:                        TBIAS();
                   2771:                else
                   2772:                        TBIAU();
                   2773:        }
                   2774: #endif
                   2775:        pmap->pm_ptpages++;
                   2776:        splx(s);
                   2777:        return (0);
                   2778: }
                   2779:
                   2780: /*
                   2781:  * pmap_ptpage_addref:
                   2782:  *
                   2783:  *     Add a reference to the specified PT page.
                   2784:  */
                   2785: void
                   2786: pmap_ptpage_addref(ptpva)
                   2787:        vaddr_t ptpva;
                   2788: {
                   2789:        struct vm_page *pg;
                   2790:
                   2791:        simple_lock(&uvm.kernel_object->vmobjlock);
                   2792:        pg = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
                   2793:        pg->wire_count++;
                   2794:        PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2795:            ("ptpage addref: pg %p now %d\n", pg, pg->wire_count));
                   2796:        simple_unlock(&uvm.kernel_object->vmobjlock);
                   2797: }
                   2798:
                   2799: /*
                   2800:  * pmap_ptpage_delref:
                   2801:  *
                   2802:  *     Delete a reference to the specified PT page.
                   2803:  */
                   2804: int
                   2805: pmap_ptpage_delref(ptpva)
                   2806:        vaddr_t ptpva;
                   2807: {
                   2808:        struct vm_page *pg;
                   2809:        int rv;
                   2810:
                   2811:        simple_lock(&uvm.kernel_object->vmobjlock);
                   2812:        pg = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
                   2813:        rv = --pg->wire_count;
                   2814:        PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2815:            ("ptpage delref: pg %p now %d\n", pg, pg->wire_count));
                   2816:        simple_unlock(&uvm.kernel_object->vmobjlock);
                   2817:        return (rv);
                   2818: }
                   2819:
                   2820: void
                   2821: pmap_proc_iflush(p, va, len)
                   2822:        struct proc     *p;
                   2823:        vaddr_t         va;
                   2824:        vsize_t         len;
                   2825: {
                   2826:        (void)cachectl(p, CC_EXTPURGE | CC_IPURGE, va, len);
                   2827: }
                   2828:
                   2829: #ifdef PMAP_DEBUG
                   2830: /*
                   2831:  * pmap_pvdump:
                   2832:  *
                   2833:  *     Dump the contents of the PV list for the specified physical page.
                   2834:  */
                   2835: void
                   2836: pmap_pvdump(pa)
                   2837:        paddr_t pa;
                   2838: {
                   2839:        struct pv_entry *pv;
                   2840:
                   2841:        printf("pa %lx", pa);
                   2842:        for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next)
                   2843:                printf(" -> pmap %p, va %lx, ptste %p, ptpmap %p, flags %x",
                   2844:                       pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap,
                   2845:                       pv->pv_flags);
                   2846:        printf("\n");
                   2847: }
                   2848:
                   2849: /*
                   2850:  * pmap_check_wiring:
                   2851:  *
                   2852:  *     Count the number of valid mappings in the specified PT page,
                   2853:  *     and ensure that it is consistent with the number of wirings
                   2854:  *     to that page that the VM system has.
                   2855:  */
                   2856: void
                   2857: pmap_check_wiring(str, va)
                   2858:        char *str;
                   2859:        vaddr_t va;
                   2860: {
                   2861:        pt_entry_t *pte;
                   2862:        paddr_t pa;
                   2863:        struct vm_page *pg;
                   2864:        int count;
                   2865:
                   2866:        if (!pmap_ste_v(pmap_kernel(), va) ||
                   2867:            !pmap_pte_v(pmap_pte(pmap_kernel(), va)))
                   2868:                return;
                   2869:
                   2870:        pa = pmap_pte_pa(pmap_pte(pmap_kernel(), va));
                   2871:        pg = PHYS_TO_VM_PAGE(pa);
                   2872:        if (pg->wire_count >= PAGE_SIZE / sizeof(pt_entry_t)) {
                   2873:                printf("*%s*: 0x%lx: wire count %d\n", str, va, pg->wire_count);
                   2874:                return;
                   2875:        }
                   2876:
                   2877:        count = 0;
                   2878:        for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + PAGE_SIZE);
                   2879:            pte++)
                   2880:                if (*pte)
                   2881:                        count++;
                   2882:        if (pg->wire_count != count)
                   2883:                printf("*%s*: 0x%lx: w%d/a%d\n",
                   2884:                       str, va, pg->wire_count, count);
                   2885: }
                   2886: #endif /* PMAP_DEBUG */

CVSweb