[BACK]Return to pmap.c CVS log [TXT][DIR] Up to [local] / sys / arch / powerpc / powerpc

Annotation of sys/arch/powerpc/powerpc/pmap.c, Revision 1.1.1.1

1.1       nbrk        1: /*     $OpenBSD: pmap.c,v 1.101 2007/05/27 15:46:02 drahn Exp $ */
                      2:
                      3: /*
                      4:  * Copyright (c) 2001, 2002, 2007 Dale Rahn.
                      5:  * All rights reserved.
                      6:  *
                      7:  *
                      8:  * Redistribution and use in source and binary forms, with or without
                      9:  * modification, are permitted provided that the following conditions
                     10:  * are met:
                     11:  * 1. Redistributions of source code must retain the above copyright
                     12:  *    notice, this list of conditions and the following disclaimer.
                     13:  * 2. Redistributions in binary form must reproduce the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer in the
                     15:  *    documentation and/or other materials provided with the distribution.
                     16:  *
                     17:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
                     18:  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
                     19:  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
                     20:  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
                     21:  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
                     22:  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
                     23:  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
                     24:  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
                     25:  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
                     26:  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
                     27:  *
                     28:  * Effort sponsored in part by the Defense Advanced Research Projects
                     29:  * Agency (DARPA) and Air Force Research Laboratory, Air Force
                     30:  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
                     31:  */
                     32:
                     33: #include <sys/param.h>
                     34: #include <sys/malloc.h>
                     35: #include <sys/proc.h>
                     36: #include <sys/queue.h>
                     37: #include <sys/user.h>
                     38: #include <sys/systm.h>
                     39: #include <sys/pool.h>
                     40:
                     41: #include <uvm/uvm.h>
                     42:
                     43: #include <machine/pcb.h>
                     44: #include <machine/powerpc.h>
                     45: #include <machine/pmap.h>
                     46:
                     47: #include <machine/db_machdep.h>
                     48: #include <ddb/db_extern.h>
                     49: #include <ddb/db_output.h>
                     50:
                     51: struct pmap kernel_pmap_;
                     52: static struct mem_region *pmap_mem, *pmap_avail;
                     53: struct mem_region pmap_allocated[10];
                     54: int pmap_cnt_avail;
                     55: int pmap_cnt_allocated;
                     56:
                     57: struct pte_64  *pmap_ptable64;
                     58: struct pte_32  *pmap_ptable32;
                     59: int    pmap_ptab_cnt;
                     60: u_int  pmap_ptab_mask;
                     61:
                     62: #define HTABSIZE_32    (pmap_ptab_cnt * 64)
                     63: #define HTABMEMSZ_64   (pmap_ptab_cnt * 8 * sizeof(struct pte_64))
                     64: #define HTABSIZE_64    (ffs(pmap_ptab_cnt) - 12)
                     65:
                     66: static u_int usedsr[NPMAPS / sizeof(u_int) / 8];
                     67: paddr_t zero_page;
                     68: paddr_t copy_src_page;
                     69: paddr_t copy_dst_page;
                     70:
                     71: struct pte_desc {
                     72:        /* Linked list of phys -> virt entries */
                     73:        LIST_ENTRY(pte_desc) pted_pv_list;
                     74:        union {
                     75:        struct pte_32 pted_pte32;
                     76:        struct pte_64 pted_pte64;
                     77:        }p;
                     78:        pmap_t pted_pmap;
                     79:        vaddr_t pted_va;
                     80: };
                     81:
                     82: void print_pteg(pmap_t pm, vaddr_t va);
                     83:
                     84: static inline void tlbsync(void);
                     85: static inline void tlbie(vaddr_t ea);
                     86: void tlbia(void);
                     87:
                     88: void pmap_attr_save(paddr_t pa, u_int32_t bits);
                     89: void pmap_page_ro64(pmap_t pm, vaddr_t va, vm_prot_t prot);
                     90: void pmap_page_ro32(pmap_t pm, vaddr_t va);
                     91:
                     92: /*
                     93:  * LOCKING structures.
                     94:  * This may not be correct, and doesn't do anything yet.
                     95:  */
                     96: #define pmap_simplelock_pm(pm)
                     97: #define pmap_simpleunlock_pm(pm)
                     98: #define pmap_simplelock_pv(pm)
                     99: #define pmap_simpleunlock_pv(pm)
                    100:
                    101:
                    102: /* VP routines */
                    103: void pmap_vp_enter(pmap_t pm, vaddr_t va, struct pte_desc *pted);
                    104: struct pte_desc *pmap_vp_remove(pmap_t pm, vaddr_t va);
                    105: void pmap_vp_destroy(pmap_t pm);
                    106: struct pte_desc *pmap_vp_lookup(pmap_t pm, vaddr_t va);
                    107:
                    108: /* PV routines */
                    109: void pmap_enter_pv(struct pte_desc *pted, struct vm_page *);
                    110: void pmap_remove_pv(struct pte_desc *pted);
                    111:
                    112:
                    113: /* pte hash table routines */
                    114: void pte_insert32(struct pte_desc *pted);
                    115: void pte_insert64(struct pte_desc *pted);
                    116: void pmap_hash_remove(struct pte_desc *pted);
                    117: void pmap_fill_pte64(pmap_t pm, vaddr_t va, paddr_t pa,
                    118:     struct pte_desc *pted, vm_prot_t prot, int flags, int cache);
                    119: void pmap_fill_pte32(pmap_t pm, vaddr_t va, paddr_t pa,
                    120:     struct pte_desc *pted, vm_prot_t prot, int flags, int cache);
                    121:
                    122: void pmap_syncicache_user_virt(pmap_t pm, vaddr_t va);
                    123:
                    124: void _pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, int flags,
                    125:     int cache);
                    126: void pmap_remove_pg(pmap_t pm, vaddr_t va);
                    127: void pmap_kremove_pg(vaddr_t va);
                    128:
                    129: /* setup/initialization functions */
                    130: void pmap_avail_setup(void);
                    131: void pmap_avail_fixup(void);
                    132: void pmap_remove_avail(paddr_t base, paddr_t end);
                    133: void *pmap_steal_avail(size_t size, int align);
                    134:
                    135: /* asm interface */
                    136: int pte_spill_r(u_int32_t va, u_int32_t msr, u_int32_t access_type,
                    137:     int exec_fault);
                    138:
                    139: u_int32_t pmap_setusr(pmap_t pm, vaddr_t va);
                    140: void pmap_popusr(u_int32_t oldsr);
                    141:
                    142: /* pte invalidation */
                    143: void pte_zap(void *ptp, struct pte_desc *pted);
                    144:
                    145: /* debugging */
                    146: void pmap_print_pted(struct pte_desc *pted, int(*print)(const char *, ...));
                    147:
                    148: /* XXX - panic on pool get failures? */
                    149: struct pool pmap_pmap_pool;
                    150: struct pool pmap_vp_pool;
                    151: struct pool pmap_pted_pool;
                    152:
                    153: int pmap_initialized = 0;
                    154: int physmem;
                    155: int physmaxaddr;
                    156:
                    157: /* virtual to physical helpers */
                    158: static inline int
                    159: VP_SR(vaddr_t va)
                    160: {
                    161:        return (va >>VP_SR_POS) & VP_SR_MASK;
                    162: }
                    163:
                    164: static inline int
                    165: VP_IDX1(vaddr_t va)
                    166: {
                    167:        return (va >> VP_IDX1_POS) & VP_IDX1_MASK;
                    168: }
                    169:
                    170: static inline int
                    171: VP_IDX2(vaddr_t va)
                    172: {
                    173:        return (va >> VP_IDX2_POS) & VP_IDX2_MASK;
                    174: }
                    175:
                    176: #if VP_IDX1_SIZE != VP_IDX2_SIZE
                    177: #error pmap allocation code expects IDX1 and IDX2 size to be same
                    178: #endif
                    179: struct pmapvp {
                    180:        void *vp[VP_IDX1_SIZE];
                    181: };
                    182:
                    183:
                    184: /*
                    185:  * VP routines, virtual to physical translation information.
                    186:  * These data structures are based off of the pmap, per process.
                    187:  */
                    188:
                    189: /*
                    190:  * This is used for pmap_kernel() mappings, they are not to be removed
                    191:  * from the vp table because they were statically initialized at the
                    192:  * initial pmap initialization. This is so that memory allocation
                    193:  * is not necessary in the pmap_kernel() mappings.
                    194:  * Otherwise bad race conditions can appear.
                    195:  */
                    196: struct pte_desc *
                    197: pmap_vp_lookup(pmap_t pm, vaddr_t va)
                    198: {
                    199:        struct pmapvp *vp1;
                    200:        struct pmapvp *vp2;
                    201:        struct pte_desc *pted;
                    202:
                    203:        vp1 = pm->pm_vp[VP_SR(va)];
                    204:        if (vp1 == NULL) {
                    205:                return NULL;
                    206:        }
                    207:
                    208:        vp2 = vp1->vp[VP_IDX1(va)];
                    209:        if (vp2 == NULL) {
                    210:                return NULL;
                    211:        }
                    212:
                    213:        pted = vp2->vp[VP_IDX2(va)];
                    214:
                    215:        return pted;
                    216: }
                    217:
                    218: /*
                    219:  * Remove, and return, pted at specified address, NULL if not present
                    220:  */
                    221: struct pte_desc *
                    222: pmap_vp_remove(pmap_t pm, vaddr_t va)
                    223: {
                    224:        struct pmapvp *vp1;
                    225:        struct pmapvp *vp2;
                    226:        struct pte_desc *pted;
                    227:
                    228:        vp1 = pm->pm_vp[VP_SR(va)];
                    229:        if (vp1 == NULL) {
                    230:                return NULL;
                    231:        }
                    232:
                    233:        vp2 = vp1->vp[VP_IDX1(va)];
                    234:        if (vp2 == NULL) {
                    235:                return NULL;
                    236:        }
                    237:
                    238:        pted = vp2->vp[VP_IDX2(va)];
                    239:        vp2->vp[VP_IDX2(va)] = NULL;
                    240:
                    241:        return pted;
                    242: }
                    243:
                    244: /*
                    245:  * Create a V -> P mapping for the given pmap and virtual address
                    246:  * with reference to the pte descriptor that is used to map the page.
                    247:  * This code should track allocations of vp table allocations
                    248:  * so they can be freed efficiently.
                    249:  *
                    250:  * Should this be called under splvm?
                    251:  */
                    252: void
                    253: pmap_vp_enter(pmap_t pm, vaddr_t va, struct pte_desc *pted)
                    254: {
                    255:        struct pmapvp *vp1;
                    256:        struct pmapvp *vp2;
                    257:        int s;
                    258:
                    259:        pmap_simplelock_pm(pm);
                    260:
                    261:        vp1 = pm->pm_vp[VP_SR(va)];
                    262:        if (vp1 == NULL) {
                    263:                s = splvm();
                    264:                vp1 = pool_get(&pmap_vp_pool, PR_NOWAIT);
                    265:                splx(s);
                    266:                bzero(vp1, sizeof (struct pmapvp));
                    267:                pm->pm_vp[VP_SR(va)] = vp1;
                    268:        }
                    269:
                    270:        vp2 = vp1->vp[VP_IDX1(va)];
                    271:        if (vp2 == NULL) {
                    272:                s = splvm();
                    273:                vp2 = pool_get(&pmap_vp_pool, PR_NOWAIT);
                    274:                splx(s);
                    275:                bzero(vp2, sizeof (struct pmapvp));
                    276:                vp1->vp[VP_IDX1(va)] = vp2;
                    277:        }
                    278:
                    279:        vp2->vp[VP_IDX2(va)] = pted;
                    280:
                    281:        pmap_simpleunlock_pm(pm);
                    282: }
                    283:
                    284: /* PTE manipulation/calculations */
                    285: static inline void
                    286: tlbie(vaddr_t va)
                    287: {
                    288:        __asm volatile ("tlbie %0" :: "r"(va));
                    289: }
                    290:
                    291: static inline void
                    292: tlbsync(void)
                    293: {
                    294:        __asm volatile ("sync; tlbsync; sync");
                    295: }
                    296:
                    297: void
                    298: tlbia()
                    299: {
                    300:        vaddr_t va;
                    301:
                    302:        __asm volatile ("sync");
                    303:        for (va = 0; va < 0x00040000; va += 0x00001000)
                    304:                tlbie(va);
                    305:        tlbsync();
                    306: }
                    307:
                    308: static inline int
                    309: ptesr(sr_t *sr, vaddr_t va)
                    310: {
                    311:        return sr[(u_int)va >> ADDR_SR_SHIFT];
                    312: }
                    313:
                    314: static inline int
                    315: pteidx(sr_t sr, vaddr_t va)
                    316: {
                    317:        int hash;
                    318:        hash = (sr & SR_VSID) ^ (((u_int)va & ADDR_PIDX) >> ADDR_PIDX_SHIFT);
                    319:        return hash & pmap_ptab_mask;
                    320: }
                    321:
                    322: #define PTED_VA_PTEGIDX_M      0x07
                    323: #define PTED_VA_HID_M          0x08
                    324: #define PTED_VA_MANAGED_M      0x10
                    325: #define PTED_VA_WIRED_M                0x20
                    326: #define PTED_VA_EXEC_M         0x40
                    327:
                    328: static inline u_int32_t
                    329: PTED_HID(struct pte_desc *pted)
                    330: {
                    331:        return (pted->pted_va & PTED_VA_HID_M);
                    332: }
                    333:
                    334: static inline u_int32_t
                    335: PTED_PTEGIDX(struct pte_desc *pted)
                    336: {
                    337:        return (pted->pted_va & PTED_VA_PTEGIDX_M);
                    338: }
                    339:
                    340: static inline u_int32_t
                    341: PTED_MANAGED(struct pte_desc *pted)
                    342: {
                    343:        return (pted->pted_va & PTED_VA_MANAGED_M);
                    344: }
                    345:
                    346: static inline u_int32_t
                    347: PTED_WIRED(struct pte_desc *pted)
                    348: {
                    349:        return (pted->pted_va & PTED_VA_WIRED_M);
                    350: }
                    351:
                    352: static inline u_int32_t
                    353: PTED_VALID(struct pte_desc *pted)
                    354: {
                    355:        if (ppc_proc_is_64b)
                    356:                return (pted->p.pted_pte64.pte_hi & PTE_VALID_64);
                    357:        else
                    358:                return (pted->p.pted_pte32.pte_hi & PTE_VALID_32);
                    359: }
                    360:
                    361: /*
                    362:  * PV entries -
                    363:  * manipulate the physical to virtual translations for the entire system.
                    364:  *
                    365:  * QUESTION: should all mapped memory be stored in PV tables? Or
                    366:  * is it alright to only store "ram" memory. Currently device mappings
                    367:  * are not stored.
                    368:  * It makes sense to pre-allocate mappings for all of "ram" memory, since
                    369:  * it is likely that it will be mapped at some point, but would it also
                    370:  * make sense to use a tree/table like is use for pmap to store device
                    371:  * mappings?
                    372:  * Futher notes: It seems that the PV table is only used for pmap_protect
                    373:  * and other paging related operations. Given this, it is not necessary
                    374:  * to store any pmap_kernel() entries in PV tables and does not make
                    375:  * sense to store device mappings in PV either.
                    376:  *
                    377:  * Note: unlike other powerpc pmap designs, the array is only an array
                    378:  * of pointers. Since the same structure is used for holding information
                    379:  * in the VP table, the PV table, and for kernel mappings, the wired entries.
                    380:  * Allocate one data structure to hold all of the info, instead of replicating
                    381:  * it multiple times.
                    382:  *
                    383:  * One issue of making this a single data structure is that two pointers are
                    384:  * wasted for every page which does not map ram (device mappings), this
                    385:  * should be a low percentage of mapped pages in the system, so should not
                    386:  * have too noticable unnecessary ram consumption.
                    387:  */
                    388:
                    389: void
                    390: pmap_enter_pv(struct pte_desc *pted, struct vm_page *pg)
                    391: {
                    392:        if (__predict_false(!pmap_initialized)) {
                    393:                return;
                    394:        }
                    395:
                    396:        LIST_INSERT_HEAD(&(pg->mdpage.pv_list), pted, pted_pv_list);
                    397:        pted->pted_va |= PTED_VA_MANAGED_M;
                    398: }
                    399:
                    400: void
                    401: pmap_remove_pv(struct pte_desc *pted)
                    402: {
                    403:        LIST_REMOVE(pted, pted_pv_list);
                    404: }
                    405:
                    406:
                    407: /* PTE_CHG_32 == PTE_CHG_64 */
                    408: /* PTE_REF_32 == PTE_REF_64 */
                    409: static __inline u_int
                    410: pmap_pte2flags(u_int32_t pte)
                    411: {
                    412:        return (((pte & PTE_REF_32) ? PG_PMAP_REF : 0) |
                    413:            ((pte & PTE_CHG_32) ? PG_PMAP_MOD : 0));
                    414: }
                    415:
                    416: static __inline u_int
                    417: pmap_flags2pte(u_int32_t flags)
                    418: {
                    419:        return (((flags & PG_PMAP_REF) ? PTE_REF_32 : 0) |
                    420:            ((flags & PG_PMAP_MOD) ? PTE_CHG_32 : 0));
                    421: }
                    422:
                    423: void
                    424: pmap_attr_save(paddr_t pa, u_int32_t bits)
                    425: {
                    426:        struct vm_page *pg;
                    427:
                    428:        pg = PHYS_TO_VM_PAGE(pa);
                    429:        if (pg == NULL)
                    430:                return;
                    431:
                    432:        atomic_setbits_int(&pg->pg_flags,  pmap_pte2flags(bits));
                    433: }
                    434:
                    435: int
                    436: pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
                    437: {
                    438:        struct pte_desc *pted;
                    439:        struct vm_page *pg;
                    440:        int s;
                    441:        int need_sync = 0;
                    442:        int cache;
                    443:
                    444:        /* MP - Acquire lock for this pmap */
                    445:
                    446:        s = splvm();
                    447:        pted = pmap_vp_lookup(pm, va);
                    448:        if (pted && PTED_VALID(pted)) {
                    449:                pmap_remove_pg(pm, va);
                    450:                /* we lost our pted if it was user */
                    451:                if (pm != pmap_kernel())
                    452:                        pted = pmap_vp_lookup(pm, va);
                    453:        }
                    454:
                    455:        pm->pm_stats.resident_count++;
                    456:
                    457:        /* Do not have pted for this, get one and put it in VP */
                    458:        if (pted == NULL) {
                    459:                pted = pool_get(&pmap_pted_pool, PR_NOWAIT);
                    460:                bzero(pted, sizeof (*pted));
                    461:                pmap_vp_enter(pm, va, pted);
                    462:        }
                    463:
                    464:        /* Calculate PTE */
                    465:        pg = PHYS_TO_VM_PAGE(pa);
                    466:        if (pg != NULL)
                    467:                cache = PMAP_CACHE_WB; /* managed memory is cacheable */
                    468:        else
                    469:                cache = PMAP_CACHE_CI;
                    470:
                    471:        if (ppc_proc_is_64b)
                    472:                pmap_fill_pte64(pm, va, pa, pted, prot, flags, cache);
                    473:        else
                    474:                pmap_fill_pte32(pm, va, pa, pted, prot, flags, cache);
                    475:
                    476:        if (pg != NULL) {
                    477:                pmap_enter_pv(pted, pg); /* only managed mem */
                    478:        }
                    479:
                    480:        /*
                    481:         * Insert into HTAB
                    482:         * We were told to map the page, probably called from vm_fault,
                    483:         * so map the page!
                    484:         */
                    485:        if (ppc_proc_is_64b)
                    486:                pte_insert64(pted);
                    487:        else
                    488:                pte_insert32(pted);
                    489:
                    490:         if (prot & VM_PROT_EXECUTE) {
                    491:                u_int sn = VP_SR(va);
                    492:
                    493:                pm->pm_exec[sn]++;
                    494:                if (pm->pm_sr[sn] & SR_NOEXEC)
                    495:                        pm->pm_sr[sn] &= ~SR_NOEXEC;
                    496:
                    497:                if (pg != NULL) {
                    498:                        need_sync = ((pg->pg_flags & PG_PMAP_EXE) == 0);
                    499:                        atomic_setbits_int(&pg->pg_flags, PG_PMAP_EXE);
                    500:                } else
                    501:                        need_sync = 1;
                    502:        } else {
                    503:                /*
                    504:                 * Should we be paranoid about writeable non-exec
                    505:                 * mappings ? if so, clear the exec tag
                    506:                 */
                    507:                if ((prot & VM_PROT_WRITE) && (pg != NULL))
                    508:                        atomic_clearbits_int(&pg->pg_flags, PG_PMAP_EXE);
                    509:        }
                    510:
                    511:        splx(s);
                    512:
                    513:        /* only instruction sync executable pages */
                    514:        if (need_sync)
                    515:                pmap_syncicache_user_virt(pm, va);
                    516:
                    517:        /* MP - free pmap lock */
                    518:        return 0;
                    519: }
                    520:
                    521: /*
                    522:  * Remove the given range of mapping entries.
                    523:  */
                    524: void
                    525: pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva)
                    526: {
                    527:        int i_sr, s_sr, e_sr;
                    528:        int i_vp1, s_vp1, e_vp1;
                    529:        int i_vp2, s_vp2, e_vp2;
                    530:        struct pmapvp *vp1;
                    531:        struct pmapvp *vp2;
                    532:
                    533:        /* I suspect that if this loop were unrolled better
                    534:         * it would have better performance, testing i_sr and i_vp1
                    535:         * in the middle loop seems excessive
                    536:         */
                    537:
                    538:        s_sr = VP_SR(va);
                    539:        e_sr = VP_SR(endva);
                    540:        for (i_sr = s_sr; i_sr <= e_sr; i_sr++) {
                    541:                vp1 = pm->pm_vp[i_sr];
                    542:                if (vp1 == NULL)
                    543:                        continue;
                    544:
                    545:                if (i_sr == s_sr)
                    546:                        s_vp1 = VP_IDX1(va);
                    547:                else
                    548:                        s_vp1 = 0;
                    549:
                    550:                if (i_sr == e_sr)
                    551:                        e_vp1 = VP_IDX1(endva);
                    552:                else
                    553:                        e_vp1 = VP_IDX1_SIZE-1;
                    554:
                    555:                for (i_vp1 = s_vp1; i_vp1 <= e_vp1; i_vp1++) {
                    556:                        vp2 = vp1->vp[i_vp1];
                    557:                        if (vp2 == NULL)
                    558:                                continue;
                    559:
                    560:                        if ((i_sr == s_sr) && (i_vp1 == s_vp1))
                    561:                                s_vp2 = VP_IDX2(va);
                    562:                        else
                    563:                                s_vp2 = 0;
                    564:
                    565:                        if ((i_sr == e_sr) && (i_vp1 == e_vp1))
                    566:                                e_vp2 = VP_IDX2(endva);
                    567:                        else
                    568:                                e_vp2 = VP_IDX2_SIZE;
                    569:
                    570:                        for (i_vp2 = s_vp2; i_vp2 < e_vp2; i_vp2++) {
                    571:                                if (vp2->vp[i_vp2] != NULL) {
                    572:                                        pmap_remove_pg(pm,
                    573:                                            (i_sr << VP_SR_POS) |
                    574:                                            (i_vp1 << VP_IDX1_POS) |
                    575:                                            (i_vp2 << VP_IDX2_POS));
                    576:                                }
                    577:                        }
                    578:                }
                    579:        }
                    580: }
                    581: /*
                    582:  * remove a single mapping, notice that this code is O(1)
                    583:  */
                    584: void
                    585: pmap_remove_pg(pmap_t pm, vaddr_t va)
                    586: {
                    587:        struct pte_desc *pted;
                    588:        int s;
                    589:
                    590:        /*
                    591:         * HASH needs to be locked here as well as pmap, and pv list.
                    592:         * so that we know the mapping information is either valid,
                    593:         * or that the mapping is not present in the hash table.
                    594:         */
                    595:        s = splvm();
                    596:        if (pm == pmap_kernel()) {
                    597:                pted = pmap_vp_lookup(pm, va);
                    598:                if (pted == NULL || !PTED_VALID(pted)) {
                    599:                        splx(s);
                    600:                        return;
                    601:                }
                    602:        } else {
                    603:                pted = pmap_vp_remove(pm, va);
                    604:                if (pted == NULL || !PTED_VALID(pted)) {
                    605:                        splx(s);
                    606:                        return;
                    607:                }
                    608:        }
                    609:        pm->pm_stats.resident_count--;
                    610:
                    611:        pmap_hash_remove(pted);
                    612:
                    613:        if (pted->pted_va & PTED_VA_EXEC_M) {
                    614:                u_int sn = VP_SR(va);
                    615:
                    616:                pted->pted_va &= ~PTED_VA_EXEC_M;
                    617:                pm->pm_exec[sn]--;
                    618:                if (pm->pm_exec[sn] == 0)
                    619:                        pm->pm_sr[sn] |= SR_NOEXEC;
                    620:        }
                    621:
                    622:        if (ppc_proc_is_64b)
                    623:                pted->p.pted_pte64.pte_hi &= ~PTE_VALID_64;
                    624:        else
                    625:                pted->p.pted_pte32.pte_hi &= ~PTE_VALID_32;
                    626:
                    627:        if (PTED_MANAGED(pted))
                    628:                pmap_remove_pv(pted);
                    629:
                    630:        if (pm != pmap_kernel())
                    631:                pool_put(&pmap_pted_pool, pted);
                    632:
                    633:        splx(s);
                    634: }
                    635:
                    636: /*
                    637:  * Enter a kernel mapping for the given page.
                    638:  * kernel mappings have a larger set of prerequisites than normal mappings.
                    639:  *
                    640:  * 1. no memory should be allocated to create a kernel mapping.
                    641:  * 2. a vp mapping should already exist, even if invalid. (see 1)
                    642:  * 3. all vp tree mappings should already exist (see 1)
                    643:  *
                    644:  */
                    645: void
                    646: _pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, int flags, int cache)
                    647: {
                    648:        struct pte_desc *pted;
                    649:        int s;
                    650:        pmap_t pm;
                    651:
                    652:        pm = pmap_kernel();
                    653:
                    654:        /* MP - lock pmap. */
                    655:        s = splvm();
                    656:
                    657:        pted = pmap_vp_lookup(pm, va);
                    658:        if (pted && PTED_VALID(pted))
                    659:                pmap_kremove_pg(va); /* pted is reused */
                    660:
                    661:        pm->pm_stats.resident_count++;
                    662:
                    663:        /* Do not have pted for this, get one and put it in VP */
                    664:        if (pted == NULL) {
                    665:                /* XXX - future panic? */
                    666:                printf("pted not preallocated in pmap_kernel() va %lx pa %lx\n",
                    667:                    va, pa);
                    668:                pted = pool_get(&pmap_pted_pool, PR_NOWAIT);
                    669:                bzero(pted, sizeof (*pted));
                    670:                pmap_vp_enter(pm, va, pted);
                    671:        }
                    672:
                    673:        if (cache == PMAP_CACHE_DEFAULT) {
                    674:                if (PHYS_TO_VM_PAGE(pa) != NULL)
                    675:                        cache = PMAP_CACHE_WB; /* managed memory is cacheable */
                    676:                else
                    677:                        cache = PMAP_CACHE_CI;
                    678:        }
                    679:
                    680:        /* Calculate PTE */
                    681:        if (ppc_proc_is_64b)
                    682:                pmap_fill_pte64(pm, va, pa, pted, prot, flags, cache);
                    683:        else
                    684:                pmap_fill_pte32(pm, va, pa, pted, prot, flags, cache);
                    685:
                    686:        /*
                    687:         * Insert into HTAB
                    688:         * We were told to map the page, probably called from vm_fault,
                    689:         * so map the page!
                    690:         */
                    691:        if (ppc_proc_is_64b)
                    692:                pte_insert64(pted);
                    693:        else
                    694:                pte_insert32(pted);
                    695:
                    696:        pted->pted_va |= PTED_VA_WIRED_M;
                    697:
                    698:         if (prot & VM_PROT_EXECUTE) {
                    699:                u_int sn = VP_SR(va);
                    700:
                    701:                pm->pm_exec[sn]++;
                    702:                if (pm->pm_sr[sn] & SR_NOEXEC)
                    703:                        pm->pm_sr[sn] &= ~SR_NOEXEC;
                    704:        }
                    705:
                    706:        splx(s);
                    707: }
                    708:
                    709: void
                    710: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
                    711: {
                    712:        _pmap_kenter_pa(va, pa, prot, 0, PMAP_CACHE_DEFAULT);
                    713: }
                    714:
                    715: void
                    716: pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cacheable)
                    717: {
                    718:        _pmap_kenter_pa(va, pa, prot, 0, cacheable);
                    719: }
                    720:
                    721:
                    722: /*
                    723:  * remove kernel (pmap_kernel()) mapping, one page
                    724:  */
                    725: void
                    726: pmap_kremove_pg(vaddr_t va)
                    727: {
                    728:        struct pte_desc *pted;
                    729:        pmap_t pm;
                    730:        int s;
                    731:
                    732:        pm = pmap_kernel();
                    733:        pted = pmap_vp_lookup(pm, va);
                    734:        if (pted == NULL)
                    735:                return;
                    736:
                    737:        if (!PTED_VALID(pted))
                    738:                return; /* not mapped */
                    739:
                    740:        s = splvm();
                    741:
                    742:        pm->pm_stats.resident_count--;
                    743:
                    744:        /*
                    745:         * HASH needs to be locked here as well as pmap, and pv list.
                    746:         * so that we know the mapping information is either valid,
                    747:         * or that the mapping is not present in the hash table.
                    748:         */
                    749:        pmap_hash_remove(pted);
                    750:
                    751:        if (pted->pted_va & PTED_VA_EXEC_M) {
                    752:                u_int sn = VP_SR(va);
                    753:
                    754:                pted->pted_va &= ~PTED_VA_EXEC_M;
                    755:                pm->pm_exec[sn]--;
                    756:                if (pm->pm_exec[sn] == 0)
                    757:                        pm->pm_sr[sn] |= SR_NOEXEC;
                    758:        }
                    759:
                    760:        if (PTED_MANAGED(pted))
                    761:                pmap_remove_pv(pted);
                    762:
                    763:        /* invalidate pted; */
                    764:        if (ppc_proc_is_64b)
                    765:                pted->p.pted_pte64.pte_hi &= ~PTE_VALID_64;
                    766:        else
                    767:                pted->p.pted_pte32.pte_hi &= ~PTE_VALID_32;
                    768:
                    769:        splx(s);
                    770:
                    771: }
                    772: /*
                    773:  * remove kernel (pmap_kernel()) mappings
                    774:  */
                    775: void
                    776: pmap_kremove(vaddr_t va, vsize_t len)
                    777: {
                    778:        for (len >>= PAGE_SHIFT; len >0; len--, va += PAGE_SIZE)
                    779:                pmap_kremove_pg(va);
                    780: }
                    781:
                    782: void
                    783: pte_zap(void *ptp, struct pte_desc *pted)
                    784: {
                    785:
                    786:        struct pte_64 *ptp64 = (void*) ptp;
                    787:        struct pte_32 *ptp32 = (void*) ptp;
                    788:
                    789:        if (ppc_proc_is_64b)
                    790:                ptp64->pte_hi &= ~PTE_VALID_64;
                    791:        else
                    792:                ptp32->pte_hi &= ~PTE_VALID_32;
                    793:
                    794:        __asm volatile ("sync");
                    795:        tlbie(pted->pted_va);
                    796:        __asm volatile ("sync");
                    797:        tlbsync();
                    798:        __asm volatile ("sync");
                    799:        if (ppc_proc_is_64b) {
                    800:                if (PTED_MANAGED(pted))
                    801:                        pmap_attr_save(pted->p.pted_pte64.pte_lo & PTE_RPGN_64,
                    802:                            ptp64->pte_lo & (PTE_REF_64|PTE_CHG_64));
                    803:        } else {
                    804:                if (PTED_MANAGED(pted))
                    805:                        pmap_attr_save(pted->p.pted_pte32.pte_lo & PTE_RPGN_32,
                    806:                            ptp32->pte_lo & (PTE_REF_32|PTE_CHG_32));
                    807:        }
                    808: }
                    809:
                    810: /*
                    811:  * remove specified entry from hash table.
                    812:  * all information is present in pted to look up entry
                    813:  * LOCKS... should the caller lock?
                    814:  */
                    815: void
                    816: pmap_hash_remove(struct pte_desc *pted)
                    817: {
                    818:        vaddr_t va = pted->pted_va;
                    819:        pmap_t pm = pted->pted_pmap;
                    820:        struct pte_64 *ptp64;
                    821:        struct pte_32 *ptp32;
                    822:        int sr, idx;
                    823:
                    824:        sr = ptesr(pm->pm_sr, va);
                    825:        idx = pteidx(sr, va);
                    826:
                    827:        idx =  (idx ^ (PTED_HID(pted) ? pmap_ptab_mask : 0));
                    828:        /* determine which pteg mapping is present in */
                    829:
                    830:        if (ppc_proc_is_64b) {
                    831:                ptp64 = pmap_ptable64 + (idx * 8);
                    832:                ptp64 += PTED_PTEGIDX(pted); /* increment by index into pteg */
                    833:                /*
                    834:                 * We now have the pointer to where it will be, if it is
                    835:                 * currently mapped. If the mapping was thrown away in
                    836:                 * exchange for another page mapping, then this page is not
                    837:                 * currently in the HASH.
                    838:                 */
                    839:                if ((pted->p.pted_pte64.pte_hi |
                    840:                    (PTED_HID(pted) ? PTE_HID_64 : 0)) == ptp64->pte_hi) {
                    841:                        pte_zap((void*)ptp64, pted);
                    842:                }
                    843:        } else {
                    844:                ptp32 = pmap_ptable32 + (idx * 8);
                    845:                ptp32 += PTED_PTEGIDX(pted); /* increment by index into pteg */
                    846:                /*
                    847:                 * We now have the pointer to where it will be, if it is
                    848:                 * currently mapped. If the mapping was thrown away in
                    849:                 * exchange for another page mapping, then this page is not
                    850:                 * currently in the HASH.
                    851:                 */
                    852:                if ((pted->p.pted_pte32.pte_hi |
                    853:                    (PTED_HID(pted) ? PTE_HID_32 : 0)) == ptp32->pte_hi) {
                    854:                        pte_zap((void*)ptp32, pted);
                    855:                }
                    856:        }
                    857: }
                    858:
                    859: /*
                    860:  * What about execution control? Even at only a segment granularity.
                    861:  */
                    862: void
                    863: pmap_fill_pte64(pmap_t pm, vaddr_t va, paddr_t pa, struct pte_desc *pted,
                    864:        vm_prot_t prot, int flags, int cache)
                    865: {
                    866:        sr_t sr;
                    867:        struct pte_64 *pte64;
                    868:
                    869:        sr = ptesr(pm->pm_sr, va);
                    870:        pte64 = &pted->p.pted_pte64;
                    871:
                    872:        pte64->pte_hi = (((u_int64_t)sr & SR_VSID) <<
                    873:           PTE_VSID_SHIFT_64) |
                    874:            ((va >> ADDR_API_SHIFT_64) & PTE_API_64) | PTE_VALID_64;
                    875:        pte64->pte_lo = (pa & PTE_RPGN_64);
                    876:
                    877:
                    878:        if ((cache == PMAP_CACHE_WB))
                    879:                pte64->pte_lo |= PTE_M_64;
                    880:        else if ((cache == PMAP_CACHE_WT))
                    881:                pte64->pte_lo |= (PTE_W_64 | PTE_M_64);
                    882:        else
                    883:                pte64->pte_lo |= (PTE_M_64 | PTE_I_64 | PTE_G_64);
                    884:
                    885:        if (prot & VM_PROT_WRITE)
                    886:                pte64->pte_lo |= PTE_RW_64;
                    887:        else
                    888:                pte64->pte_lo |= PTE_RO_64;
                    889:
                    890:        pted->pted_va = va & ~PAGE_MASK;
                    891:
                    892:        if (prot & VM_PROT_EXECUTE)
                    893:                pted->pted_va  |= PTED_VA_EXEC_M;
                    894:        else
                    895:                pte64->pte_lo |= PTE_N_64;
                    896:
                    897:        pted->pted_pmap = pm;
                    898: }
                    899: /*
                    900:  * What about execution control? Even at only a segment granularity.
                    901:  */
                    902: void
                    903: pmap_fill_pte32(pmap_t pm, vaddr_t va, paddr_t pa, struct pte_desc *pted,
                    904:        vm_prot_t prot, int flags, int cache)
                    905: {
                    906:        sr_t sr;
                    907:        struct pte_32 *pte32;
                    908:
                    909:        sr = ptesr(pm->pm_sr, va);
                    910:        pte32 = &pted->p.pted_pte32;
                    911:
                    912:        pte32->pte_hi = ((sr & SR_VSID) << PTE_VSID_SHIFT_32) |
                    913:            ((va >> ADDR_API_SHIFT_32) & PTE_API_32) | PTE_VALID_32;
                    914:        pte32->pte_lo = (pa & PTE_RPGN_32);
                    915:
                    916:        if ((cache == PMAP_CACHE_WB))
                    917:                pte32->pte_lo |= PTE_M_32;
                    918:        else if ((cache == PMAP_CACHE_WT))
                    919:                pte32->pte_lo |= (PTE_W_32 | PTE_M_32);
                    920:        else
                    921:                pte32->pte_lo |= (PTE_M_32 | PTE_I_32 | PTE_G_32);
                    922:
                    923:        if (prot & VM_PROT_WRITE)
                    924:                pte32->pte_lo |= PTE_RW_32;
                    925:        else
                    926:                pte32->pte_lo |= PTE_RO_32;
                    927:
                    928:        pted->pted_va = va & ~PAGE_MASK;
                    929:
                    930:        /* XXX Per-page execution control. */
                    931:        if (prot & VM_PROT_EXECUTE)
                    932:                pted->pted_va  |= PTED_VA_EXEC_M;
                    933:
                    934:        pted->pted_pmap = pm;
                    935: }
                    936:
                    937: /*
                    938:  * read/clear bits from pte/attr cache, for reference/change
                    939:  * ack, copied code in the pte flush code....
                    940:  */
                    941: int
                    942: pteclrbits(struct vm_page *pg, u_int flagbit, u_int clear)
                    943: {
                    944:        u_int bits;
                    945:        int s;
                    946:        struct pte_desc *pted;
                    947:        u_int ptebit = pmap_flags2pte(flagbit);
                    948:
                    949:        /* PTE_CHG_32 == PTE_CHG_64 */
                    950:        /* PTE_REF_32 == PTE_REF_64 */
                    951:
                    952:        /*
                    953:         *  First try the attribute cache
                    954:         */
                    955:        bits = pg->pg_flags & flagbit;
                    956:        if ((bits == flagbit) && (clear == 0))
                    957:                return bits;
                    958:
                    959:        /* cache did not contain all necessary bits,
                    960:         * need to walk thru pv table to collect all mappings for this
                    961:         * page, copying bits to the attribute cache
                    962:         * then reread the attribute cache.
                    963:         */
                    964:        /* need lock for this pv */
                    965:        s = splvm();
                    966:
                    967:        LIST_FOREACH(pted, &(pg->mdpage.pv_list), pted_pv_list) {
                    968:                vaddr_t va = pted->pted_va & PAGE_MASK;
                    969:                pmap_t pm = pted->pted_pmap;
                    970:                struct pte_64 *ptp64;
                    971:                struct pte_32 *ptp32;
                    972:                int sr, idx;
                    973:
                    974:                sr = ptesr(pm->pm_sr, va);
                    975:                idx = pteidx(sr, va);
                    976:
                    977:                /* determine which pteg mapping is present in */
                    978:                if (ppc_proc_is_64b) {
                    979:                        ptp64 = pmap_ptable64 +
                    980:                                (idx ^ (PTED_HID(pted) ? pmap_ptab_mask : 0)) * 8;
                    981:                        ptp64 += PTED_PTEGIDX(pted); /* increment by index into pteg */
                    982:
                    983:                        /*
                    984:                         * We now have the pointer to where it will be, if it is
                    985:                         * currently mapped. If the mapping was thrown away in
                    986:                         * exchange for another page mapping, then this page is
                    987:                         * not currently in the HASH.
                    988:                         *
                    989:                         * if we are not clearing bits, and have found all of the
                    990:                         * bits we want, we can stop
                    991:                         */
                    992:                        if ((pted->p.pted_pte64.pte_hi |
                    993:                            (PTED_HID(pted) ? PTE_HID_64 : 0)) == ptp64->pte_hi) {
                    994:                                bits |= pmap_pte2flags(ptp64->pte_lo & ptebit);
                    995:                                if (clear) {
                    996:                                        ptp64->pte_hi &= ~PTE_VALID_64;
                    997:                                        __asm__ volatile ("sync");
                    998:                                        tlbie(va);
                    999:                                        tlbsync();
                   1000:                                        ptp64->pte_lo &= ~ptebit;
                   1001:                                        __asm__ volatile ("sync");
                   1002:                                        ptp64->pte_hi |= PTE_VALID_64;
                   1003:                                } else if (bits == flagbit)
                   1004:                                        break;
                   1005:                        }
                   1006:                } else {
                   1007:                        ptp32 = pmap_ptable32 +
                   1008:                                (idx ^ (PTED_HID(pted) ? pmap_ptab_mask : 0)) * 8;
                   1009:                        ptp32 += PTED_PTEGIDX(pted); /* increment by index into pteg */
                   1010:
                   1011:                        /*
                   1012:                         * We now have the pointer to where it will be, if it is
                   1013:                         * currently mapped. If the mapping was thrown away in
                   1014:                         * exchange for another page mapping, then this page is
                   1015:                         * not currently in the HASH.
                   1016:                         *
                   1017:                         * if we are not clearing bits, and have found all of the
                   1018:                         * bits we want, we can stop
                   1019:                         */
                   1020:                        if ((pted->p.pted_pte32.pte_hi |
                   1021:                            (PTED_HID(pted) ? PTE_HID_32 : 0)) == ptp32->pte_hi) {
                   1022:                                bits |= pmap_pte2flags(ptp32->pte_lo & ptebit);
                   1023:                                if (clear) {
                   1024:                                        ptp32->pte_hi &= ~PTE_VALID_32;
                   1025:                                        __asm__ volatile ("sync");
                   1026:                                        tlbie(va);
                   1027:                                        tlbsync();
                   1028:                                        ptp32->pte_lo &= ~ptebit;
                   1029:                                        __asm__ volatile ("sync");
                   1030:                                        ptp32->pte_hi |= PTE_VALID_32;
                   1031:                                } else if (bits == flagbit)
                   1032:                                        break;
                   1033:                        }
                   1034:                }
                   1035:        }
                   1036:
                   1037:        if (clear) {
                   1038:                /*
                   1039:                 * this is done a second time, because while walking the list
                   1040:                 * a bit could have been promoted via pmap_attr_save()
                   1041:                 */
                   1042:                bits |= pg->pg_flags & flagbit;
                   1043:                atomic_clearbits_int(&pg->pg_flags,  flagbit);
                   1044:        } else
                   1045:                atomic_setbits_int(&pg->pg_flags,  bits);
                   1046:
                   1047:        splx(s);
                   1048:        return bits;
                   1049: }
                   1050:
                   1051: /*
                   1052:  * Garbage collects the physical map system for pages which are
                   1053:  * no longer used. Success need not be guaranteed -- that is, there
                   1054:  * may well be pages which are not referenced, but others may be collected
                   1055:  * Called by the pageout daemon when pages are scarce.
                   1056:  */
                   1057: void
                   1058: pmap_collect(pmap_t pm)
                   1059: {
                   1060:        /* This could return unused v->p table layers which
                   1061:         * are empty.
                   1062:         * could malicious programs allocate memory and eat
                   1063:         * these wired pages? These are allocated via pool.
                   1064:         * Are there pool functions which could be called
                   1065:         * to lower the pool usage here?
                   1066:         */
                   1067: }
                   1068:
                   1069: /*
                   1070:  * Fill the given physical page with zeros.
                   1071:  */
                   1072: void
                   1073: pmap_zero_page(struct vm_page *pg)
                   1074: {
                   1075:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   1076: #ifdef USE_DCBZ
                   1077:        int i;
                   1078:        paddr_t addr = zero_page;
                   1079: #endif
                   1080:
                   1081:        /* simple_lock(&pmap_zero_page_lock); */
                   1082:        pmap_kenter_pa(zero_page, pa, VM_PROT_READ|VM_PROT_WRITE);
                   1083: #ifdef USE_DCBZ
                   1084:        for (i = PAGE_SIZE/CACHELINESIZE; i>0; i--) {
                   1085:                __asm volatile ("dcbz 0,%0" :: "r"(addr));
                   1086:                addr += CACHELINESIZE;
                   1087:        }
                   1088: #else
                   1089:        bzero((void *)zero_page, PAGE_SIZE);
                   1090: #endif
                   1091:        pmap_kremove_pg(zero_page);
                   1092:
                   1093:        /* simple_unlock(&pmap_zero_page_lock); */
                   1094: }
                   1095:
                   1096: /*
                   1097:  * copy the given physical page with zeros.
                   1098:  */
                   1099: void
                   1100: pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg)
                   1101: {
                   1102:        paddr_t srcpa = VM_PAGE_TO_PHYS(srcpg);
                   1103:        paddr_t dstpa = VM_PAGE_TO_PHYS(dstpg);
                   1104:        /* simple_lock(&pmap_copy_page_lock); */
                   1105:
                   1106:        pmap_kenter_pa(copy_src_page, srcpa, VM_PROT_READ);
                   1107:        pmap_kenter_pa(copy_dst_page, dstpa, VM_PROT_READ|VM_PROT_WRITE);
                   1108:
                   1109:        bcopy((void *)copy_src_page, (void *)copy_dst_page, PAGE_SIZE);
                   1110:
                   1111:        pmap_kremove_pg(copy_src_page);
                   1112:        pmap_kremove_pg(copy_dst_page);
                   1113:        /* simple_unlock(&pmap_copy_page_lock); */
                   1114: }
                   1115:
                   1116: int pmap_id_avail = 0;
                   1117:
                   1118: void
                   1119: pmap_pinit(pmap_t pm)
                   1120: {
                   1121:        int i, k, try, tblidx, tbloff;
                   1122:        int s, seg;
                   1123:
                   1124:        bzero(pm, sizeof (struct pmap));
                   1125:
                   1126:        pmap_reference(pm);
                   1127:
                   1128:        /*
                   1129:         * Allocate segment registers for this pmap.
                   1130:         * Try not to reuse pmap ids, to spread the hash table usage.
                   1131:         */
                   1132: again:
                   1133:        for (i = 0; i < NPMAPS; i++) {
                   1134:                try = pmap_id_avail + i;
                   1135:                try = try % NPMAPS; /* truncate back into bounds */
                   1136:                tblidx = try / (8 * sizeof usedsr[0]);
                   1137:                tbloff = try % (8 * sizeof usedsr[0]);
                   1138:                if ((usedsr[tblidx] & (1 << tbloff)) == 0) {
                   1139:                        /* pmap create lock? */
                   1140:                        s = splvm();
                   1141:                        if ((usedsr[tblidx] & (1 << tbloff)) == 1) {
                   1142:                                /* entry was stolen out from under us, retry */
                   1143:                                splx(s); /* pmap create unlock */
                   1144:                                goto again;
                   1145:                        }
                   1146:                        usedsr[tblidx] |= (1 << tbloff);
                   1147:                        pmap_id_avail = try + 1;
                   1148:                        splx(s); /* pmap create unlock */
                   1149:
                   1150:                        seg = try << 4;
                   1151:                        for (k = 0; k < 16; k++)
                   1152:                                pm->pm_sr[k] = (seg + k) | SR_NOEXEC;
                   1153:                        return;
                   1154:                }
                   1155:        }
                   1156:        panic("out of pmap slots");
                   1157: }
                   1158:
                   1159: /*
                   1160:  * Create and return a physical map.
                   1161:  */
                   1162: pmap_t
                   1163: pmap_create()
                   1164: {
                   1165:        pmap_t pmap;
                   1166:        int s;
                   1167:
                   1168:        s = splvm();
                   1169:        pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
                   1170:        splx(s);
                   1171:        pmap_pinit(pmap);
                   1172:        return (pmap);
                   1173: }
                   1174:
                   1175: /*
                   1176:  * Add a reference to a given pmap.
                   1177:  */
                   1178: void
                   1179: pmap_reference(pmap_t pm)
                   1180: {
                   1181:        /* simple_lock(&pmap->pm_obj.vmobjlock); */
                   1182:        pm->pm_refs++;
                   1183:        /* simple_unlock(&pmap->pm_obj.vmobjlock); */
                   1184: }
                   1185:
                   1186: /*
                   1187:  * Retire the given pmap from service.
                   1188:  * Should only be called if the map contains no valid mappings.
                   1189:  */
                   1190: void
                   1191: pmap_destroy(pmap_t pm)
                   1192: {
                   1193:        int refs;
                   1194:        int s;
                   1195:
                   1196:        /* simple_lock(&pmap->pm_obj.vmobjlock); */
                   1197:        refs = --pm->pm_refs;
                   1198:        /* simple_unlock(&pmap->pm_obj.vmobjlock); */
                   1199:        if (refs > 0)
                   1200:                return;
                   1201:
                   1202:        /*
                   1203:         * reference count is zero, free pmap resources and free pmap.
                   1204:         */
                   1205:        pmap_release(pm);
                   1206:        s = splvm();
                   1207:        pool_put(&pmap_pmap_pool, pm);
                   1208:        splx(s);
                   1209: }
                   1210:
                   1211: /*
                   1212:  * Release any resources held by the given physical map.
                   1213:  * Called when a pmap initialized by pmap_pinit is being released.
                   1214:  */
                   1215: void
                   1216: pmap_release(pmap_t pm)
                   1217: {
                   1218:        int i, tblidx, tbloff;
                   1219:        int s;
                   1220:
                   1221:        pmap_vp_destroy(pm);
                   1222:        i = (pm->pm_sr[0] & SR_VSID) >> 4;
                   1223:        tblidx = i / (8  * sizeof usedsr[0]);
                   1224:        tbloff = i % (8  * sizeof usedsr[0]);
                   1225:
                   1226:        /* LOCK? */
                   1227:        s = splvm();
                   1228:        usedsr[tblidx] &= ~(1 << tbloff);
                   1229:        splx(s);
                   1230: }
                   1231:
                   1232: void
                   1233: pmap_vp_destroy(pmap_t pm)
                   1234: {
                   1235:        int i, j;
                   1236:        int s;
                   1237:        struct pmapvp *vp1;
                   1238:        struct pmapvp *vp2;
                   1239:
                   1240:        for (i = 0; i < VP_SR_SIZE; i++) {
                   1241:                vp1 = pm->pm_vp[i];
                   1242:                if (vp1 == NULL)
                   1243:                        continue;
                   1244:
                   1245:                for (j = 0; j < VP_IDX1_SIZE; j++) {
                   1246:                        vp2 = vp1->vp[j];
                   1247:                        if (vp2 == NULL)
                   1248:                                continue;
                   1249:
                   1250:                        s = splvm();
                   1251:                        pool_put(&pmap_vp_pool, vp2);
                   1252:                        splx(s);
                   1253:                }
                   1254:                pm->pm_vp[i] = NULL;
                   1255:                s = splvm();
                   1256:                pool_put(&pmap_vp_pool, vp1);
                   1257:                splx(s);
                   1258:        }
                   1259: }
                   1260:
                   1261: void
                   1262: pmap_avail_setup(void)
                   1263: {
                   1264:        struct mem_region *mp;
                   1265:
                   1266:        (fw->mem_regions) (&pmap_mem, &pmap_avail);
                   1267:        pmap_cnt_avail = 0;
                   1268:        physmem = 0;
                   1269:
                   1270:        ndumpmem = 0;
                   1271:        for (mp = pmap_mem; mp->size !=0; mp++, ndumpmem++) {
                   1272:                physmem += btoc(mp->size);
                   1273:                dumpmem[ndumpmem].start = atop(mp->start);
                   1274:                dumpmem[ndumpmem].end = atop(mp->start + mp->size);
                   1275:        }
                   1276:
                   1277:        for (mp = pmap_avail; mp->size !=0 ; mp++) {
                   1278:                if (physmaxaddr <  mp->start + mp->size)
                   1279:                        physmaxaddr = mp->start + mp->size;
                   1280:        }
                   1281:
                   1282:        for (mp = pmap_avail; mp->size !=0; mp++)
                   1283:                pmap_cnt_avail += 1;
                   1284: }
                   1285:
                   1286: void
                   1287: pmap_avail_fixup(void)
                   1288: {
                   1289:        struct mem_region *mp;
                   1290:        u_int32_t align;
                   1291:        u_int32_t end;
                   1292:
                   1293:        mp = pmap_avail;
                   1294:        while(mp->size !=0) {
                   1295:                align = round_page(mp->start);
                   1296:                if (mp->start != align) {
                   1297:                        pmap_remove_avail(mp->start, align);
                   1298:                        mp = pmap_avail;
                   1299:                        continue;
                   1300:                }
                   1301:                end = mp->start+mp->size;
                   1302:                align = trunc_page(end);
                   1303:                if (end != align) {
                   1304:                        pmap_remove_avail(align, end);
                   1305:                        mp = pmap_avail;
                   1306:                        continue;
                   1307:                }
                   1308:                mp++;
                   1309:        }
                   1310: }
                   1311:
                   1312: /* remove a given region from avail memory */
                   1313: void
                   1314: pmap_remove_avail(paddr_t base, paddr_t end)
                   1315: {
                   1316:        struct mem_region *mp;
                   1317:        int i;
                   1318:        int mpend;
                   1319:
                   1320:        /* remove given region from available */
                   1321:        for (mp = pmap_avail; mp->size; mp++) {
                   1322:                /*
                   1323:                 * Check if this region holds all of the region
                   1324:                 */
                   1325:                mpend = mp->start + mp->size;
                   1326:                if (base > mpend) {
                   1327:                        continue;
                   1328:                }
                   1329:                if (base <= mp->start) {
                   1330:                        if (end <= mp->start)
                   1331:                                break; /* region not present -??? */
                   1332:
                   1333:                        if (end >= mpend) {
                   1334:                                /* covers whole region */
                   1335:                                /* shorten */
                   1336:                                for (i = mp - pmap_avail;
                   1337:                                    i < pmap_cnt_avail;
                   1338:                                    i++) {
                   1339:                                        pmap_avail[i] = pmap_avail[i+1];
                   1340:                                }
                   1341:                                pmap_cnt_avail--;
                   1342:                                pmap_avail[pmap_cnt_avail].size = 0;
                   1343:                        } else {
                   1344:                                mp->start = end;
                   1345:                                mp->size = mpend - end;
                   1346:                        }
                   1347:                } else {
                   1348:                        /* start after the beginning */
                   1349:                        if (end >= mpend) {
                   1350:                                /* just truncate */
                   1351:                                mp->size = base - mp->start;
                   1352:                        } else {
                   1353:                                /* split */
                   1354:                                for (i = pmap_cnt_avail;
                   1355:                                    i > (mp - pmap_avail);
                   1356:                                    i--) {
                   1357:                                        pmap_avail[i] = pmap_avail[i - 1];
                   1358:                                }
                   1359:                                pmap_cnt_avail++;
                   1360:                                mp->size = base - mp->start;
                   1361:                                mp++;
                   1362:                                mp->start = end;
                   1363:                                mp->size = mpend - end;
                   1364:                        }
                   1365:                }
                   1366:        }
                   1367:        for (mp = pmap_allocated; mp->size != 0; mp++) {
                   1368:                if (base < mp->start) {
                   1369:                        if (end == mp->start) {
                   1370:                                mp->start = base;
                   1371:                                mp->size += end - base;
                   1372:                                break;
                   1373:                        }
                   1374:                        /* lengthen */
                   1375:                        for (i = pmap_cnt_allocated; i > (mp - pmap_allocated);
                   1376:                            i--) {
                   1377:                                pmap_allocated[i] = pmap_allocated[i - 1];
                   1378:                        }
                   1379:                        pmap_cnt_allocated++;
                   1380:                        mp->start = base;
                   1381:                        mp->size = end - base;
                   1382:                        return;
                   1383:                }
                   1384:                if (base == (mp->start + mp->size)) {
                   1385:                        mp->size += end - base;
                   1386:                        return;
                   1387:                }
                   1388:        }
                   1389:        if (mp->size == 0) {
                   1390:                mp->start = base;
                   1391:                mp->size  = end - base;
                   1392:                pmap_cnt_allocated++;
                   1393:        }
                   1394: }
                   1395:
                   1396: void *
                   1397: pmap_steal_avail(size_t size, int align)
                   1398: {
                   1399:        struct mem_region *mp;
                   1400:        int start;
                   1401:        int remsize;
                   1402:
                   1403:        for (mp = pmap_avail; mp->size; mp++) {
                   1404:                if (mp->size > size) {
                   1405:                        start = (mp->start + (align -1)) & ~(align -1);
                   1406:                        remsize = mp->size - (start - mp->start);
                   1407:                        if (remsize >= 0) {
                   1408:                                pmap_remove_avail(start, start+size);
                   1409:                                return (void *)start;
                   1410:                        }
                   1411:                }
                   1412:        }
                   1413:        panic ("unable to allocate region with size %x align %x",
                   1414:            size, align);
                   1415: }
                   1416:
                   1417: /*
                   1418:  * Similar to pmap_steal_avail, but operating on vm_physmem since
                   1419:  * uvm_page_physload() has been called.
                   1420:  */
                   1421: vaddr_t
                   1422: pmap_steal_memory(vsize_t size, vaddr_t *start, vaddr_t *end)
                   1423: {
                   1424:        int segno;
                   1425:        u_int npg;
                   1426:        vaddr_t va;
                   1427:        paddr_t pa;
                   1428:        struct vm_physseg *seg;
                   1429:
                   1430:        size = round_page(size);
                   1431:        npg = atop(size);
                   1432:
                   1433:        for (segno = 0, seg = vm_physmem; segno < vm_nphysseg; segno++, seg++) {
                   1434:                if (seg->avail_end - seg->avail_start < npg)
                   1435:                        continue;
                   1436:                /*
                   1437:                 * We can only steal at an ``unused'' segment boundary,
                   1438:                 * i.e. either at the start or at the end.
                   1439:                 */
                   1440:                if (seg->avail_start == seg->start ||
                   1441:                    seg->avail_end == seg->end)
                   1442:                        break;
                   1443:        }
                   1444:        if (segno == vm_nphysseg)
                   1445:                va = 0;
                   1446:        else {
                   1447:                if (seg->avail_start == seg->start) {
                   1448:                        pa = ptoa(seg->avail_start);
                   1449:                        seg->avail_start += npg;
                   1450:                        seg->start += npg;
                   1451:                } else {
                   1452:                        pa = ptoa(seg->avail_end) - size;
                   1453:                        seg->avail_end -= npg;
                   1454:                        seg->end -= npg;
                   1455:                }
                   1456:                /*
                   1457:                 * If all the segment has been consumed now, remove it.
                   1458:                 * Note that the crash dump code still knows about it
                   1459:                 * and will dump it correctly.
                   1460:                 */
                   1461:                if (seg->start == seg->end) {
                   1462:                        if (vm_nphysseg-- == 1)
                   1463:                                panic("pmap_steal_memory: out of memory");
                   1464:                        while (segno < vm_nphysseg) {
                   1465:                                seg[0] = seg[1]; /* struct copy */
                   1466:                                seg++;
                   1467:                                segno++;
                   1468:                        }
                   1469:                }
                   1470:
                   1471:                va = (vaddr_t)pa;       /* 1:1 mapping */
                   1472:                bzero((void *)va, size);
                   1473:        }
                   1474:
                   1475:        if (start != NULL)
                   1476:                *start = VM_MIN_KERNEL_ADDRESS;
                   1477:        if (end != NULL)
                   1478:                *end = VM_MAX_KERNEL_ADDRESS;
                   1479:
                   1480:        return (va);
                   1481: }
                   1482:
                   1483: void *msgbuf_addr;
                   1484:
                   1485: /*
                   1486:  * Initialize pmap setup.
                   1487:  * ALL of the code which deals with avail needs rewritten as an actual
                   1488:  * memory allocation.
                   1489:  */
                   1490: void
                   1491: pmap_bootstrap(u_int kernelstart, u_int kernelend)
                   1492: {
                   1493:        struct mem_region *mp;
                   1494:        int i, k;
                   1495:        struct pmapvp *vp1;
                   1496:        struct pmapvp *vp2;
                   1497:
                   1498:        ppc_check_procid();
                   1499:
                   1500:        /*
                   1501:         * Get memory.
                   1502:         */
                   1503:        pmap_avail_setup();
                   1504:
                   1505:        /*
                   1506:         * Page align all regions.
                   1507:         * Non-page memory isn't very interesting to us.
                   1508:         * Also, sort the entries for ascending addresses.
                   1509:         */
                   1510:        kernelstart = trunc_page(kernelstart);
                   1511:        kernelend = round_page(kernelend);
                   1512:        pmap_remove_avail(kernelstart, kernelend);
                   1513:
                   1514:        msgbuf_addr = pmap_steal_avail(MSGBUFSIZE,4);
                   1515:
                   1516:        for (mp = pmap_avail; mp->size; mp++) {
                   1517:                bzero((void *)mp->start, mp->size);
                   1518:        }
                   1519:
                   1520: #define HTABENTS_32 1024
                   1521: #define HTABENTS_64 2048
                   1522:
                   1523:        if (ppc_proc_is_64b) {
                   1524:                pmap_ptab_cnt = HTABENTS_64;
                   1525:                while (pmap_ptab_cnt * 2 < physmem)
                   1526:                        pmap_ptab_cnt <<= 1;
                   1527:        } else {
                   1528:                pmap_ptab_cnt = HTABENTS_32;
                   1529:                while (HTABSIZE_32 < (ctob(physmem) >> 7))
                   1530:                        pmap_ptab_cnt <<= 1;
                   1531:        }
                   1532:        /*
                   1533:         * allocate suitably aligned memory for HTAB
                   1534:         */
                   1535:        if (ppc_proc_is_64b) {
                   1536:                pmap_ptable64 = pmap_steal_avail(HTABMEMSZ_64, HTABMEMSZ_64);
                   1537:                bzero((void *)pmap_ptable64, HTABMEMSZ_64);
                   1538:                pmap_ptab_mask = pmap_ptab_cnt - 1;
                   1539:        } else {
                   1540:                pmap_ptable32 = pmap_steal_avail(HTABSIZE_32, HTABSIZE_32);
                   1541:                bzero((void *)pmap_ptable32, HTABSIZE_32);
                   1542:                pmap_ptab_mask = pmap_ptab_cnt - 1;
                   1543:        }
                   1544:
                   1545:        /* allocate v->p mappings for pmap_kernel() */
                   1546:        for (i = 0; i < VP_SR_SIZE; i++) {
                   1547:                pmap_kernel()->pm_vp[i] = NULL;
                   1548:        }
                   1549:        vp1 = pmap_steal_avail(sizeof (struct pmapvp), 4);
                   1550:        bzero (vp1, sizeof(struct pmapvp));
                   1551:        pmap_kernel()->pm_vp[PPC_KERNEL_SR] = vp1;
                   1552:        for (i = 0; i < VP_IDX1_SIZE; i++) {
                   1553:                vp2 = vp1->vp[i] = pmap_steal_avail(sizeof (struct pmapvp), 4);
                   1554:                bzero (vp2, sizeof(struct pmapvp));
                   1555:                for (k = 0; k < VP_IDX2_SIZE; k++) {
                   1556:                        struct pte_desc *pted;
                   1557:                        pted = pmap_steal_avail(sizeof (struct pte_desc), 4);
                   1558:                        bzero (pted, sizeof (struct pte_desc));
                   1559:                        vp2->vp[k] = pted;
                   1560:                }
                   1561:        }
                   1562:
                   1563:        if (ppc_proc_is_64b) {
                   1564:                vp1 = pmap_steal_avail(sizeof (struct pmapvp), 4);
                   1565:                bzero (vp1, sizeof(struct pmapvp));
                   1566:                pmap_kernel()->pm_vp[0] = vp1;
                   1567:                for (i = 0; i < VP_IDX1_SIZE; i++) {
                   1568:                        vp2 = vp1->vp[i] =
                   1569:                            pmap_steal_avail(sizeof (struct pmapvp), 4);
                   1570:                        bzero (vp2, sizeof(struct pmapvp));
                   1571:                        for (k = 0; k < VP_IDX2_SIZE; k++) {
                   1572:                                struct pte_desc *pted;
                   1573:                                pted = pmap_steal_avail(sizeof (struct pte_desc), 4);
                   1574:                                bzero (pted, sizeof (struct pte_desc));
                   1575:                                vp2->vp[k] = pted;
                   1576:                        }
                   1577:                }
                   1578:        }
                   1579:
                   1580:        zero_page = VM_MIN_KERNEL_ADDRESS + ppc_kvm_stolen;
                   1581:        ppc_kvm_stolen += PAGE_SIZE;
                   1582:        copy_src_page = VM_MIN_KERNEL_ADDRESS + ppc_kvm_stolen;
                   1583:        ppc_kvm_stolen += PAGE_SIZE;
                   1584:        copy_dst_page = VM_MIN_KERNEL_ADDRESS + ppc_kvm_stolen;
                   1585:        ppc_kvm_stolen += PAGE_SIZE;
                   1586:        ppc_kvm_stolen += reserve_dumppages( (caddr_t)(VM_MIN_KERNEL_ADDRESS +
                   1587:            ppc_kvm_stolen));
                   1588:
                   1589:
                   1590:        /*
                   1591:         * Initialize kernel pmap and hardware.
                   1592:         */
                   1593: #if NPMAPS >= PPC_KERNEL_SEGMENT / 16
                   1594:        usedsr[PPC_KERNEL_SEGMENT / 16 / (sizeof usedsr[0] * 8)]
                   1595:                |= 1 << ((PPC_KERNEL_SEGMENT / 16) % (sizeof usedsr[0] * 8));
                   1596: #endif
                   1597:        for (i = 0; i < 16; i++) {
                   1598:                pmap_kernel()->pm_sr[i] = (PPC_KERNEL_SEG0 + i) | SR_NOEXEC;
                   1599:                ppc_mtsrin(PPC_KERNEL_SEG0 + i, i << ADDR_SR_SHIFT);
                   1600:        }
                   1601:
                   1602:        if (ppc_proc_is_64b) {
                   1603:                for(i = 0; i < 0x10000; i++)
                   1604:                        pmap_kenter_cache(ctob(i), ctob(i), VM_PROT_ALL,
                   1605:                            PMAP_CACHE_WB);
                   1606:                asm volatile ("sync; mtsdr1 %0; isync"
                   1607:                    :: "r"((u_int)pmap_ptable64 | HTABSIZE_64));
                   1608:        } else
                   1609:                asm volatile ("sync; mtsdr1 %0; isync"
                   1610:                    :: "r"((u_int)pmap_ptable32 | (pmap_ptab_mask >> 10)));
                   1611:
                   1612:        pmap_avail_fixup();
                   1613:
                   1614:
                   1615:        tlbia();
                   1616:
                   1617:        pmap_avail_fixup();
                   1618:        for (mp = pmap_avail; mp->size; mp++) {
                   1619:                if (mp->start > 0x80000000)
                   1620:                        continue;
                   1621:                if (mp->start + mp->size > 0x80000000)
                   1622:                        mp->size = 0x80000000 - mp->start;
                   1623:                uvm_page_physload(atop(mp->start), atop(mp->start+mp->size),
                   1624:                    atop(mp->start), atop(mp->start+mp->size),
                   1625:                    VM_FREELIST_DEFAULT);
                   1626:        }
                   1627: }
                   1628:
                   1629: /*
                   1630:  * activate a pmap entry
                   1631:  * NOOP on powerpc, all PTE entries exist in the same hash table.
                   1632:  * Segment registers are filled on exit to user mode.
                   1633:  */
                   1634: void
                   1635: pmap_activate(struct proc *p)
                   1636: {
                   1637: }
                   1638:
                   1639: /*
                   1640:  * deactivate a pmap entry
                   1641:  * NOOP on powerpc
                   1642:  */
                   1643: void
                   1644: pmap_deactivate(struct proc *p)
                   1645: {
                   1646: }
                   1647:
                   1648: /*
                   1649:  * Get the physical page address for the given pmap/virtual address.
                   1650:  */
                   1651: boolean_t
                   1652: pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pa)
                   1653: {
                   1654:        struct pte_desc *pted;
                   1655:
                   1656:        pted = pmap_vp_lookup(pm, va);
                   1657:        if (pted == NULL || !PTED_VALID(pted)) {
                   1658:                if (pm == pmap_kernel() && va < 0x80000000) {
                   1659:                        /* XXX - this is not true without BATs */
                   1660:                        /* if in kernel, va==pa for 0-0x80000000 */
                   1661:                        *pa = va;
                   1662:                        return TRUE;
                   1663:                }
                   1664:                return FALSE;
                   1665:        }
                   1666:        if (ppc_proc_is_64b)
                   1667:                *pa = (pted->p.pted_pte64.pte_lo & PTE_RPGN_64) |
                   1668:                    (va & ~PTE_RPGN_64);
                   1669:        else
                   1670:                *pa = (pted->p.pted_pte32.pte_lo & PTE_RPGN_32) |
                   1671:                    (va & ~PTE_RPGN_32);
                   1672:        return TRUE;
                   1673: }
                   1674:
                   1675: u_int32_t
                   1676: pmap_setusr(pmap_t pm, vaddr_t va)
                   1677: {
                   1678:        u_int32_t sr;
                   1679:        u_int32_t oldsr;
                   1680:
                   1681:        sr = pm->pm_sr[(u_int)va >> ADDR_SR_SHIFT];
                   1682:
                   1683:        /* user address range lock?? */
                   1684:        asm volatile ("mfsr %0,%1"
                   1685:            : "=r" (oldsr): "n"(PPC_USER_SR));
                   1686:        asm volatile ("isync; mtsr %0,%1; isync"
                   1687:            :: "n"(PPC_USER_SR), "r"(sr));
                   1688:        return oldsr;
                   1689: }
                   1690:
                   1691: void
                   1692: pmap_popusr(u_int32_t sr)
                   1693: {
                   1694:        asm volatile ("isync; mtsr %0,%1; isync"
                   1695:            :: "n"(PPC_USER_SR), "r"(sr));
                   1696: }
                   1697:
                   1698: int
                   1699: copyin(const void *udaddr, void *kaddr, size_t len)
                   1700: {
                   1701:        void *p;
                   1702:        size_t l;
                   1703:        u_int32_t oldsr;
                   1704:        faultbuf env;
                   1705:        void *oldh = curpcb->pcb_onfault;
                   1706:
                   1707:        while (len > 0) {
                   1708:                p = PPC_USER_ADDR + ((u_int)udaddr & ~PPC_SEGMENT_MASK);
                   1709:                l = (PPC_USER_ADDR + PPC_SEGMENT_LENGTH) - p;
                   1710:                if (l > len)
                   1711:                        l = len;
                   1712:                oldsr = pmap_setusr(curpcb->pcb_pm, (vaddr_t)udaddr);
                   1713:                if (setfault(&env)) {
                   1714:                        pmap_popusr(oldsr);
                   1715:                        curpcb->pcb_onfault = oldh;
                   1716:                        return EFAULT;
                   1717:                }
                   1718:                bcopy(p, kaddr, l);
                   1719:                pmap_popusr(oldsr);
                   1720:                udaddr += l;
                   1721:                kaddr += l;
                   1722:                len -= l;
                   1723:        }
                   1724:        curpcb->pcb_onfault = oldh;
                   1725:        return 0;
                   1726: }
                   1727:
                   1728: int
                   1729: copyout(const void *kaddr, void *udaddr, size_t len)
                   1730: {
                   1731:        void *p;
                   1732:        size_t l;
                   1733:        u_int32_t oldsr;
                   1734:        faultbuf env;
                   1735:        void *oldh = curpcb->pcb_onfault;
                   1736:
                   1737:        while (len > 0) {
                   1738:                p = PPC_USER_ADDR + ((u_int)udaddr & ~PPC_SEGMENT_MASK);
                   1739:                l = (PPC_USER_ADDR + PPC_SEGMENT_LENGTH) - p;
                   1740:                if (l > len)
                   1741:                        l = len;
                   1742:                oldsr = pmap_setusr(curpcb->pcb_pm, (vaddr_t)udaddr);
                   1743:                if (setfault(&env)) {
                   1744:                        pmap_popusr(oldsr);
                   1745:                        curpcb->pcb_onfault = oldh;
                   1746:                        return EFAULT;
                   1747:                }
                   1748:
                   1749:                bcopy(kaddr, p, l);
                   1750:                pmap_popusr(oldsr);
                   1751:                udaddr += l;
                   1752:                kaddr += l;
                   1753:                len -= l;
                   1754:        }
                   1755:        curpcb->pcb_onfault = oldh;
                   1756:        return 0;
                   1757: }
                   1758:
                   1759: int
                   1760: copyinstr(const void *udaddr, void *kaddr, size_t len, size_t *done)
                   1761: {
                   1762:        const u_char *uaddr = udaddr;
                   1763:        u_char *kp    = kaddr;
                   1764:        u_char *up;
                   1765:        u_char c;
                   1766:        void   *p;
                   1767:        size_t   l;
                   1768:        u_int32_t oldsr;
                   1769:        int cnt = 0;
                   1770:        faultbuf env;
                   1771:        void *oldh = curpcb->pcb_onfault;
                   1772:
                   1773:        while (len > 0) {
                   1774:                p = PPC_USER_ADDR + ((u_int)uaddr & ~PPC_SEGMENT_MASK);
                   1775:                l = (PPC_USER_ADDR + PPC_SEGMENT_LENGTH) - p;
                   1776:                up = p;
                   1777:                if (l > len)
                   1778:                        l = len;
                   1779:                len -= l;
                   1780:                oldsr = pmap_setusr(curpcb->pcb_pm, (vaddr_t)uaddr);
                   1781:                if (setfault(&env)) {
                   1782:                        if (done != NULL)
                   1783:                                *done =  cnt;
                   1784:
                   1785:                        curpcb->pcb_onfault = oldh;
                   1786:                        pmap_popusr(oldsr);
                   1787:                        return EFAULT;
                   1788:                }
                   1789:                while (l > 0) {
                   1790:                        c = *up;
                   1791:                        *kp = c;
                   1792:                        if (c == 0) {
                   1793:                                if (done != NULL)
                   1794:                                        *done = cnt + 1;
                   1795:
                   1796:                                curpcb->pcb_onfault = oldh;
                   1797:                                pmap_popusr(oldsr);
                   1798:                                return 0;
                   1799:                        }
                   1800:                        up++;
                   1801:                        kp++;
                   1802:                        l--;
                   1803:                        cnt++;
                   1804:                        uaddr++;
                   1805:                }
                   1806:                pmap_popusr(oldsr);
                   1807:        }
                   1808:        curpcb->pcb_onfault = oldh;
                   1809:        if (done != NULL)
                   1810:                *done = cnt;
                   1811:
                   1812:        return ENAMETOOLONG;
                   1813: }
                   1814:
                   1815: int
                   1816: copyoutstr(const void *kaddr, void *udaddr, size_t len, size_t *done)
                   1817: {
                   1818:        u_char *uaddr = (void *)udaddr;
                   1819:        const u_char *kp    = kaddr;
                   1820:        u_char *up;
                   1821:        u_char c;
                   1822:        void   *p;
                   1823:        size_t   l;
                   1824:        u_int32_t oldsr;
                   1825:        int cnt = 0;
                   1826:        faultbuf env;
                   1827:        void *oldh = curpcb->pcb_onfault;
                   1828:
                   1829:        while (len > 0) {
                   1830:                p = PPC_USER_ADDR + ((u_int)uaddr & ~PPC_SEGMENT_MASK);
                   1831:                l = (PPC_USER_ADDR + PPC_SEGMENT_LENGTH) - p;
                   1832:                up = p;
                   1833:                if (l > len)
                   1834:                        l = len;
                   1835:                len -= l;
                   1836:                oldsr = pmap_setusr(curpcb->pcb_pm, (vaddr_t)uaddr);
                   1837:                if (setfault(&env)) {
                   1838:                        if (done != NULL)
                   1839:                                *done =  cnt;
                   1840:
                   1841:                        curpcb->pcb_onfault = oldh;
                   1842:                        pmap_popusr(oldsr);
                   1843:                        return EFAULT;
                   1844:                }
                   1845:                while (l > 0) {
                   1846:                        c = *kp;
                   1847:                        *up = c;
                   1848:                        if (c == 0) {
                   1849:                                if (done != NULL)
                   1850:                                        *done = cnt + 1;
                   1851:
                   1852:                                curpcb->pcb_onfault = oldh;
                   1853:                                pmap_popusr(oldsr);
                   1854:                                return 0;
                   1855:                        }
                   1856:                        up++;
                   1857:                        kp++;
                   1858:                        l--;
                   1859:                        cnt++;
                   1860:                        uaddr++;
                   1861:                }
                   1862:                pmap_popusr(oldsr);
                   1863:        }
                   1864:        curpcb->pcb_onfault = oldh;
                   1865:        if (done != NULL)
                   1866:                *done = cnt;
                   1867:
                   1868:        return ENAMETOOLONG;
                   1869: }
                   1870:
                   1871: /*
                   1872:  * sync instruction cache for user virtual address.
                   1873:  * The address WAS JUST MAPPED, so we have a VALID USERSPACE mapping
                   1874:  */
                   1875: #define CACHELINESIZE   32             /* For now XXX*/
                   1876: void
                   1877: pmap_syncicache_user_virt(pmap_t pm, vaddr_t va)
                   1878: {
                   1879:        vaddr_t p, start;
                   1880:        int oldsr;
                   1881:        int l;
                   1882:
                   1883:        if (pm != pmap_kernel()) {
                   1884:                start = ((u_int)PPC_USER_ADDR + ((u_int)va &
                   1885:                    ~PPC_SEGMENT_MASK));
                   1886:                /* will only ever be page size, will not cross segments */
                   1887:
                   1888:                /* USER SEGMENT LOCK - MPXXX */
                   1889:                oldsr = pmap_setusr(pm, va);
                   1890:        } else {
                   1891:                start = va; /* flush mapped page */
                   1892:        }
                   1893:        p = start;
                   1894:        l = PAGE_SIZE;
                   1895:        do {
                   1896:                __asm__ __volatile__ ("dcbst 0,%0" :: "r"(p));
                   1897:                p += CACHELINESIZE;
                   1898:        } while ((l -= CACHELINESIZE) > 0);
                   1899:        p = start;
                   1900:        l = PAGE_SIZE;
                   1901:        do {
                   1902:                __asm__ __volatile__ ("icbi 0,%0" :: "r"(p));
                   1903:                p += CACHELINESIZE;
                   1904:        } while ((l -= CACHELINESIZE) > 0);
                   1905:
                   1906:
                   1907:        if (pm != pmap_kernel()) {
                   1908:                pmap_popusr(oldsr);
                   1909:                /* USER SEGMENT UNLOCK -MPXXX */
                   1910:        }
                   1911: }
                   1912:
                   1913: void
                   1914: pmap_page_ro64(pmap_t pm, vaddr_t va, vm_prot_t prot)
                   1915: {
                   1916:        struct pte_64 *ptp64;
                   1917:        struct pte_desc *pted;
                   1918:        int sr, idx;
                   1919:
                   1920:        pted = pmap_vp_lookup(pm, va);
                   1921:        if (pted == NULL || !PTED_VALID(pted))
                   1922:                return;
                   1923:
                   1924:        pted->p.pted_pte64.pte_lo &= ~PTE_PP_64;
                   1925:        pted->p.pted_pte64.pte_lo |= PTE_RO_64;
                   1926:
                   1927:        if ((prot & VM_PROT_EXECUTE) == 0)
                   1928:                pted->p.pted_pte64.pte_lo |= PTE_N_64;
                   1929:
                   1930:        sr = ptesr(pm->pm_sr, va);
                   1931:        idx = pteidx(sr, va);
                   1932:
                   1933:        /* determine which pteg mapping is present in */
                   1934:        ptp64 = pmap_ptable64 +
                   1935:            (idx ^ (PTED_HID(pted) ? pmap_ptab_mask : 0)) * 8;
                   1936:        ptp64 += PTED_PTEGIDX(pted); /* increment by index into pteg */
                   1937:
                   1938:        /*
                   1939:         * We now have the pointer to where it will be, if it is
                   1940:         * currently mapped. If the mapping was thrown away in
                   1941:         * exchange for another page mapping, then this page is
                   1942:         * not currently in the HASH.
                   1943:         */
                   1944:        if ((pted->p.pted_pte64.pte_hi | (PTED_HID(pted) ? PTE_HID_64 : 0))
                   1945:            == ptp64->pte_hi) {
                   1946:                ptp64->pte_hi &= ~PTE_VALID_64;
                   1947:                __asm__ volatile ("sync");
                   1948:                tlbie(va);
                   1949:                tlbsync();
                   1950:                if (PTED_MANAGED(pted)) { /* XXX */
                   1951:                        pmap_attr_save(ptp64->pte_lo & PTE_RPGN_64,
                   1952:                            ptp64->pte_lo & (PTE_REF_64|PTE_CHG_64));
                   1953:                }
                   1954:                ptp64->pte_lo &= ~PTE_CHG_64;
                   1955:                ptp64->pte_lo &= ~PTE_PP_64;
                   1956:                ptp64->pte_lo |= PTE_RO_64;
                   1957:                __asm__ volatile ("sync");
                   1958:                ptp64->pte_hi |= PTE_VALID_64;
                   1959:        }
                   1960: }
                   1961: void
                   1962: pmap_page_ro32(pmap_t pm, vaddr_t va)
                   1963: {
                   1964:        struct pte_32 *ptp32;
                   1965:        struct pte_desc *pted;
                   1966:        int sr, idx;
                   1967:
                   1968:        pted = pmap_vp_lookup(pm, va);
                   1969:        if (pted == NULL || !PTED_VALID(pted))
                   1970:                return;
                   1971:
                   1972:        pted->p.pted_pte32.pte_lo &= ~PTE_PP_32;
                   1973:        pted->p.pted_pte32.pte_lo |= PTE_RO_32;
                   1974:
                   1975:        sr = ptesr(pm->pm_sr, va);
                   1976:        idx = pteidx(sr, va);
                   1977:
                   1978:        /* determine which pteg mapping is present in */
                   1979:        ptp32 = pmap_ptable32 +
                   1980:            (idx ^ (PTED_HID(pted) ? pmap_ptab_mask : 0)) * 8;
                   1981:        ptp32 += PTED_PTEGIDX(pted); /* increment by index into pteg */
                   1982:
                   1983:        /*
                   1984:         * We now have the pointer to where it will be, if it is
                   1985:         * currently mapped. If the mapping was thrown away in
                   1986:         * exchange for another page mapping, then this page is
                   1987:         * not currently in the HASH.
                   1988:         */
                   1989:        if ((pted->p.pted_pte32.pte_hi | (PTED_HID(pted) ? PTE_HID_32 : 0))
                   1990:            == ptp32->pte_hi) {
                   1991:                ptp32->pte_hi &= ~PTE_VALID_32;
                   1992:                __asm__ volatile ("sync");
                   1993:                tlbie(va);
                   1994:                tlbsync();
                   1995:                if (PTED_MANAGED(pted)) { /* XXX */
                   1996:                        pmap_attr_save(ptp32->pte_lo & PTE_RPGN_32,
                   1997:                            ptp32->pte_lo & (PTE_REF_32|PTE_CHG_32));
                   1998:                }
                   1999:                ptp32->pte_lo &= ~PTE_CHG_32;
                   2000:                ptp32->pte_lo &= ~PTE_PP_32;
                   2001:                ptp32->pte_lo |= PTE_RO_32;
                   2002:                __asm__ volatile ("sync");
                   2003:                ptp32->pte_hi |= PTE_VALID_32;
                   2004:        }
                   2005: }
                   2006:
                   2007: /*
                   2008:  * Lower the protection on the specified physical page.
                   2009:  *
                   2010:  * There are only two cases, either the protection is going to 0,
                   2011:  * or it is going to read-only.
                   2012:  */
                   2013: void
                   2014: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
                   2015: {
                   2016:        int s;
                   2017:        struct pte_desc *pted;
                   2018:
                   2019:        /* need to lock for this pv */
                   2020:        s = splvm();
                   2021:
                   2022:        if (prot == VM_PROT_NONE) {
                   2023:                while (!LIST_EMPTY(&(pg->mdpage.pv_list))) {
                   2024:                        pted = LIST_FIRST(&(pg->mdpage.pv_list));
                   2025:                        pmap_remove_pg(pted->pted_pmap, pted->pted_va);
                   2026:                }
                   2027:                /* page is being reclaimed, sync icache next use */
                   2028:                atomic_clearbits_int(&pg->pg_flags, PG_PMAP_EXE);
                   2029:                splx(s);
                   2030:                return;
                   2031:        }
                   2032:
                   2033:        LIST_FOREACH(pted, &(pg->mdpage.pv_list), pted_pv_list) {
                   2034:                if (ppc_proc_is_64b)
                   2035:                        pmap_page_ro64(pted->pted_pmap, pted->pted_va, prot);
                   2036:                else
                   2037:                        pmap_page_ro32(pted->pted_pmap, pted->pted_va);
                   2038:        }
                   2039:        splx(s);
                   2040: }
                   2041:
                   2042: void
                   2043: pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
                   2044: {
                   2045:        int s;
                   2046:        if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
                   2047:                s = splvm();
                   2048:                if (ppc_proc_is_64b) {
                   2049:                        while (sva < eva) {
                   2050:                                pmap_page_ro64(pm, sva, prot);
                   2051:                                sva += PAGE_SIZE;
                   2052:                        }
                   2053:                } else {
                   2054:                        while (sva < eva) {
                   2055:                                pmap_page_ro32(pm, sva);
                   2056:                                sva += PAGE_SIZE;
                   2057:                        }
                   2058:                }
                   2059:                splx(s);
                   2060:                return;
                   2061:        }
                   2062:        pmap_remove(pm, sva, eva);
                   2063: }
                   2064:
                   2065: /*
                   2066:  * Restrict given range to physical memory
                   2067:  */
                   2068: void
                   2069: pmap_real_memory(paddr_t *start, vsize_t *size)
                   2070: {
                   2071:        struct mem_region *mp;
                   2072:
                   2073:        for (mp = pmap_mem; mp->size; mp++) {
                   2074:                if (((*start + *size) > mp->start)
                   2075:                        && (*start < (mp->start + mp->size)))
                   2076:                {
                   2077:                        if (*start < mp->start) {
                   2078:                                *size -= mp->start - *start;
                   2079:                                *start = mp->start;
                   2080:                        }
                   2081:                        if ((*start + *size) > (mp->start + mp->size))
                   2082:                                *size = mp->start + mp->size - *start;
                   2083:                        return;
                   2084:                }
                   2085:        }
                   2086:        *size = 0;
                   2087: }
                   2088:
                   2089: void
                   2090: pmap_init()
                   2091: {
                   2092:        pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmap", NULL);
                   2093:        pool_setlowat(&pmap_pmap_pool, 2);
                   2094:        pool_init(&pmap_vp_pool, sizeof(struct pmapvp), 0, 0, 0, "vp", NULL);
                   2095:        pool_setlowat(&pmap_vp_pool, 10);
                   2096:        pool_init(&pmap_pted_pool, sizeof(struct pte_desc), 0, 0, 0, "pted",
                   2097:            NULL);
                   2098:        pool_setlowat(&pmap_pted_pool, 20);
                   2099:
                   2100:        pmap_initialized = 1;
                   2101: }
                   2102:
                   2103: void
                   2104: pmap_proc_iflush(struct proc *p, vaddr_t addr, vsize_t len)
                   2105: {
                   2106:        paddr_t pa;
                   2107:        vsize_t clen;
                   2108:
                   2109:        while (len > 0) {
                   2110:                /* add one to always round up to the next page */
                   2111:                clen = round_page(addr + 1) - addr;
                   2112:                if (clen > len)
                   2113:                        clen = len;
                   2114:
                   2115:                if (pmap_extract(p->p_vmspace->vm_map.pmap, addr, &pa)) {
                   2116:                        syncicache((void *)pa, clen);
                   2117:                }
                   2118:
                   2119:                len -= clen;
                   2120:                addr += clen;
                   2121:        }
                   2122: }
                   2123:
                   2124: /*
                   2125:  * There are two routines, pte_spill_r and pte_spill_v
                   2126:  * the _r version only handles kernel faults which are not user
                   2127:  * accesses. The _v version handles all user faults and kernel copyin/copyout
                   2128:  * "user" accesses.
                   2129:  */
                   2130: int
                   2131: pte_spill_r(u_int32_t va, u_int32_t msr, u_int32_t dsisr, int exec_fault)
                   2132: {
                   2133:        pmap_t pm;
                   2134:        struct pte_desc *pted;
                   2135:        struct pte_desc pted_store;
                   2136:
                   2137:        /* lookup is done physical to prevent faults */
                   2138:
                   2139:        /*
                   2140:         * This function only handles kernel faults, not supervisor copyins.
                   2141:         */
                   2142:        if (msr & PSL_PR)
                   2143:                return 0;
                   2144:
                   2145:        /* if copyin, throw to full excption handler */
                   2146:        if (VP_SR(va) == PPC_USER_SR)
                   2147:                return 0;
                   2148:
                   2149:        pm = pmap_kernel();
                   2150:
                   2151:
                   2152:        if (va < physmaxaddr) {
                   2153:                u_int32_t aligned_va;
                   2154:                pted =  &pted_store;
                   2155:                /* 0 - physmaxaddr mapped 1-1 */
                   2156:                /* XXX - no WRX control */
                   2157:
                   2158:                aligned_va = trunc_page(va);
                   2159:                if (ppc_proc_is_64b) {
                   2160:                        pmap_fill_pte64(pm, aligned_va, aligned_va,
                   2161:                            pted, VM_PROT_READ | VM_PROT_WRITE |
                   2162:                            VM_PROT_EXECUTE, 0, PMAP_CACHE_WB);
                   2163:                        pte_insert64(pted);
                   2164:                        return 1;
                   2165:                } else {
                   2166:                        pmap_fill_pte32(pm, aligned_va, aligned_va,
                   2167:                            &pted_store, VM_PROT_READ | VM_PROT_WRITE |
                   2168:                            VM_PROT_EXECUTE, 0, PMAP_CACHE_WB);
                   2169:                        pte_insert32(pted);
                   2170:                        return 1;
                   2171:                }
                   2172:                /* NOTREACHED */
                   2173:        }
                   2174:
                   2175:        pted = pmap_vp_lookup(pm, va);
                   2176:        if (pted == NULL) {
                   2177:                return 0;
                   2178:        }
                   2179:
                   2180:        /* if the current mapping is RO and the access was a write
                   2181:         * we return 0
                   2182:         */
                   2183:        if (!PTED_VALID(pted)) {
                   2184:                return 0;
                   2185:        }
                   2186:
                   2187:        if (ppc_proc_is_64b) {
                   2188:                /* check write fault and we have a readonly mapping */
                   2189:                if ((dsisr & (1 << (31-6))) &&
                   2190:                    (pted->p.pted_pte64.pte_lo & 0x1))
                   2191:                        return 0;
                   2192:                if ((exec_fault != 0)
                   2193:                    && ((pted->pted_va & PTED_VA_EXEC_M) == 0)) {
                   2194:                        /* attempted to execute non-executable page */
                   2195:                        return 0;
                   2196:                }
                   2197:                pte_insert64(pted);
                   2198:        } else {
                   2199:                /* check write fault and we have a readonly mapping */
                   2200:                if ((dsisr & (1 << (31-6))) &&
                   2201:                    (pted->p.pted_pte32.pte_lo & 0x1))
                   2202:                        return 0;
                   2203:                if ((exec_fault != 0)
                   2204:                    && ((pted->pted_va & PTED_VA_EXEC_M) == 0)) {
                   2205:                        /* attempted to execute non-executable page */
                   2206:                        return 0;
                   2207:                }
                   2208:                pte_insert32(pted);
                   2209:        }
                   2210:
                   2211:        return 1;
                   2212: }
                   2213:
                   2214: int
                   2215: pte_spill_v(pmap_t pm, u_int32_t va, u_int32_t dsisr, int exec_fault)
                   2216: {
                   2217:        struct pte_desc *pted;
                   2218:
                   2219:        pted = pmap_vp_lookup(pm, va);
                   2220:        if (pted == NULL) {
                   2221:                return 0;
                   2222:        }
                   2223:
                   2224:        /*
                   2225:         * if the current mapping is RO and the access was a write
                   2226:         * we return 0
                   2227:         */
                   2228:        if (!PTED_VALID(pted)) {
                   2229:                return 0;
                   2230:        }
                   2231:        if (ppc_proc_is_64b) {
                   2232:                /* check write fault and we have a readonly mapping */
                   2233:                if ((dsisr & (1 << (31-6))) &&
                   2234:                    (pted->p.pted_pte64.pte_lo & 0x1))
                   2235:                        return 0;
                   2236:        } else {
                   2237:                /* check write fault and we have a readonly mapping */
                   2238:                if ((dsisr & (1 << (31-6))) &&
                   2239:                    (pted->p.pted_pte32.pte_lo & 0x1))
                   2240:                        return 0;
                   2241:        }
                   2242:        if ((exec_fault != 0)
                   2243:            && ((pted->pted_va & PTED_VA_EXEC_M) == 0)) {
                   2244:                /* attempted to execute non-executable page */
                   2245:                return 0;
                   2246:        }
                   2247:        if (ppc_proc_is_64b)
                   2248:                pte_insert64(pted);
                   2249:        else
                   2250:                pte_insert32(pted);
                   2251:        return 1;
                   2252: }
                   2253:
                   2254:
                   2255: /*
                   2256:  * should pte_insert code avoid wired mappings?
                   2257:  * is the stack safe?
                   2258:  * is the pted safe? (physical)
                   2259:  * -ugh
                   2260:  */
                   2261: void
                   2262: pte_insert64(struct pte_desc *pted)
                   2263: {
                   2264:        int off;
                   2265:        int secondary;
                   2266:        struct pte_64 *ptp64;
                   2267:        int sr, idx;
                   2268:        int i;
                   2269:
                   2270:        /* HASH lock? */
                   2271:
                   2272:        sr = ptesr(pted->pted_pmap->pm_sr, pted->pted_va);
                   2273:        idx = pteidx(sr, pted->pted_va);
                   2274:
                   2275:        ptp64 = pmap_ptable64 +
                   2276:            (idx ^ (PTED_HID(pted) ? pmap_ptab_mask : 0)) * 8;
                   2277:        ptp64 += PTED_PTEGIDX(pted); /* increment by index into pteg */
                   2278:        if ((pted->p.pted_pte64.pte_hi |
                   2279:            (PTED_HID(pted) ? PTE_HID_64 : 0)) == ptp64->pte_hi)
                   2280:                pte_zap(ptp64,pted);
                   2281:
                   2282:        pted->pted_va &= ~(PTED_VA_HID_M|PTED_VA_PTEGIDX_M);
                   2283:
                   2284:        /*
                   2285:         * instead of starting at the beginning of each pteg,
                   2286:         * the code should pick a random location with in the primary
                   2287:         * then search all of the entries, then if not yet found,
                   2288:         * do the same for the secondary.
                   2289:         * this would reduce the frontloading of the pteg.
                   2290:         */
                   2291:        /* first just try fill of primary hash */
                   2292:        ptp64 = pmap_ptable64 + (idx) * 8;
                   2293:        for (i = 0; i < 8; i++) {
                   2294:                if (ptp64[i].pte_hi & PTE_VALID_64)
                   2295:                        continue;
                   2296:
                   2297:                /* not valid, just load */
                   2298:                pted->pted_va |= i;
                   2299:                ptp64[i].pte_hi =
                   2300:                    pted->p.pted_pte64.pte_hi & ~PTE_VALID_64;
                   2301:                ptp64[i].pte_lo = pted->p.pted_pte64.pte_lo;
                   2302:                __asm__ volatile ("sync");
                   2303:                ptp64[i].pte_hi |= PTE_VALID_64;
                   2304:                __asm volatile ("sync");
                   2305:                return;
                   2306:        }
                   2307:        /* try fill of secondary hash */
                   2308:        ptp64 = pmap_ptable64 + (idx ^ pmap_ptab_mask) * 8;
                   2309:        for (i = 0; i < 8; i++) {
                   2310:                if (ptp64[i].pte_hi & PTE_VALID_64)
                   2311:                        continue;
                   2312:
                   2313:                pted->pted_va |= (i | PTED_VA_HID_M);
                   2314:                ptp64[i].pte_hi =
                   2315:                    (pted->p.pted_pte64.pte_hi | PTE_HID_64) & ~PTE_VALID_64;
                   2316:                ptp64[i].pte_lo = pted->p.pted_pte64.pte_lo;
                   2317:                __asm__ volatile ("sync");
                   2318:                ptp64[i].pte_hi |= PTE_VALID_64;
                   2319:                __asm volatile ("sync");
                   2320:                return;
                   2321:        }
                   2322:
                   2323:        /* need decent replacement algorithm */
                   2324:        __asm__ volatile ("mftb %0" : "=r"(off));
                   2325:        secondary = off & 8;
                   2326:        pted->pted_va |= off & (PTED_VA_PTEGIDX_M|PTED_VA_HID_M);
                   2327:
                   2328:        idx = (idx ^ (PTED_HID(pted) ? pmap_ptab_mask : 0));
                   2329:
                   2330:        ptp64 = pmap_ptable64 + (idx * 8);
                   2331:        ptp64 += PTED_PTEGIDX(pted); /* increment by index into pteg */
                   2332:        if (ptp64->pte_hi & PTE_VALID_64) {
                   2333:                vaddr_t va;
                   2334:                ptp64->pte_hi &= ~PTE_VALID_64;
                   2335:                __asm volatile ("sync");
                   2336:
                   2337:                /* Bits 9-19 */
                   2338:                idx = (idx ^ ((ptp64->pte_hi & PTE_HID_64) ?
                   2339:                    pmap_ptab_mask : 0));
                   2340:                va = (ptp64->pte_hi >> PTE_VSID_SHIFT_64) ^ idx;
                   2341:                va <<= ADDR_PIDX_SHIFT;
                   2342:                /* Bits 4-8 */
                   2343:                va |= (ptp64->pte_hi & PTE_API_64) << ADDR_API_SHIFT_32;
                   2344:                /* Bits 0-3 */
                   2345:                va |= (ptp64->pte_hi >> PTE_VSID_SHIFT_64)
                   2346:                    << ADDR_SR_SHIFT;
                   2347:                tlbie(va);
                   2348:
                   2349:                tlbsync();
                   2350:                pmap_attr_save(ptp64->pte_lo & PTE_RPGN_64,
                   2351:                    ptp64->pte_lo & (PTE_REF_64|PTE_CHG_64));
                   2352:        }
                   2353:
                   2354:        if (secondary)
                   2355:                ptp64->pte_hi =
                   2356:                    (pted->p.pted_pte64.pte_hi | PTE_HID_64) &
                   2357:                    ~PTE_VALID_64;
                   2358:         else
                   2359:                ptp64->pte_hi = pted->p.pted_pte64.pte_hi &
                   2360:                    ~PTE_VALID_64;
                   2361:
                   2362:        ptp64->pte_lo = pted->p.pted_pte64.pte_lo;
                   2363:        __asm__ volatile ("sync");
                   2364:        ptp64->pte_hi |= PTE_VALID_64;
                   2365: }
                   2366:
                   2367: void
                   2368: pte_insert32(struct pte_desc *pted)
                   2369: {
                   2370:        int off;
                   2371:        int secondary;
                   2372:        struct pte_32 *ptp32;
                   2373:        int sr, idx;
                   2374:        int i;
                   2375:
                   2376:        /* HASH lock? */
                   2377:
                   2378:        sr = ptesr(pted->pted_pmap->pm_sr, pted->pted_va);
                   2379:        idx = pteidx(sr, pted->pted_va);
                   2380:
                   2381:        /* determine if ptp is already mapped */
                   2382:        ptp32 = pmap_ptable32 +
                   2383:            (idx ^ (PTED_HID(pted) ? pmap_ptab_mask : 0)) * 8;
                   2384:        ptp32 += PTED_PTEGIDX(pted); /* increment by index into pteg */
                   2385:        if ((pted->p.pted_pte32.pte_hi |
                   2386:            (PTED_HID(pted) ? PTE_HID_32 : 0)) == ptp32->pte_hi)
                   2387:                pte_zap(ptp32,pted);
                   2388:
                   2389:        pted->pted_va &= ~(PTED_VA_HID_M|PTED_VA_PTEGIDX_M);
                   2390:
                   2391:        /*
                   2392:         * instead of starting at the beginning of each pteg,
                   2393:         * the code should pick a random location with in the primary
                   2394:         * then search all of the entries, then if not yet found,
                   2395:         * do the same for the secondary.
                   2396:         * this would reduce the frontloading of the pteg.
                   2397:         */
                   2398:
                   2399:        /* first just try fill of primary hash */
                   2400:        ptp32 = pmap_ptable32 + (idx) * 8;
                   2401:        for (i = 0; i < 8; i++) {
                   2402:                if (ptp32[i].pte_hi & PTE_VALID_32)
                   2403:                        continue;
                   2404:
                   2405:                /* not valid, just load */
                   2406:                pted->pted_va |= i;
                   2407:                ptp32[i].pte_hi = pted->p.pted_pte32.pte_hi & ~PTE_VALID_32;
                   2408:                ptp32[i].pte_lo = pted->p.pted_pte32.pte_lo;
                   2409:                __asm__ volatile ("sync");
                   2410:                ptp32[i].pte_hi |= PTE_VALID_32;
                   2411:                __asm volatile ("sync");
                   2412:                return;
                   2413:        }
                   2414:        /* try fill of secondary hash */
                   2415:        ptp32 = pmap_ptable32 + (idx ^ pmap_ptab_mask) * 8;
                   2416:        for (i = 0; i < 8; i++) {
                   2417:                if (ptp32[i].pte_hi & PTE_VALID_32)
                   2418:                        continue;
                   2419:
                   2420:                pted->pted_va |= (i | PTED_VA_HID_M);
                   2421:                ptp32[i].pte_hi =
                   2422:                    (pted->p.pted_pte32.pte_hi | PTE_HID_32) & ~PTE_VALID_32;
                   2423:                ptp32[i].pte_lo = pted->p.pted_pte32.pte_lo;
                   2424:                __asm__ volatile ("sync");
                   2425:                ptp32[i].pte_hi |= PTE_VALID_32;
                   2426:                __asm volatile ("sync");
                   2427:                return;
                   2428:        }
                   2429:
                   2430:        /* need decent replacement algorithm */
                   2431:        __asm__ volatile ("mftb %0" : "=r"(off));
                   2432:        secondary = off & 8;
                   2433:        pted->pted_va |= off & (PTED_VA_PTEGIDX_M|PTED_VA_HID_M);
                   2434:
                   2435:        idx = (idx ^ (PTED_HID(pted) ? pmap_ptab_mask : 0));
                   2436:
                   2437:        ptp32 = pmap_ptable32 + (idx * 8);
                   2438:        ptp32 += PTED_PTEGIDX(pted); /* increment by index into pteg */
                   2439:        if (ptp32->pte_hi & PTE_VALID_32) {
                   2440:                vaddr_t va;
                   2441:                ptp32->pte_hi &= ~PTE_VALID_32;
                   2442:                __asm volatile ("sync");
                   2443:
                   2444:                va = ((ptp32->pte_hi & PTE_API_32) << ADDR_API_SHIFT_32) |
                   2445:                     ((((ptp32->pte_hi >> PTE_VSID_SHIFT_32) & SR_VSID)
                   2446:                        ^(idx ^ ((ptp32->pte_hi & PTE_HID_32) ? 0x3ff : 0)))
                   2447:                            & 0x3ff) << PAGE_SHIFT;
                   2448:                tlbie(va);
                   2449:
                   2450:                tlbsync();
                   2451:                pmap_attr_save(ptp32->pte_lo & PTE_RPGN_32,
                   2452:                    ptp32->pte_lo & (PTE_REF_32|PTE_CHG_32));
                   2453:        }
                   2454:        if (secondary)
                   2455:                ptp32->pte_hi =
                   2456:                    (pted->p.pted_pte32.pte_hi | PTE_HID_32) & ~PTE_VALID_32;
                   2457:        else
                   2458:                ptp32->pte_hi = pted->p.pted_pte32.pte_hi & ~PTE_VALID_32;
                   2459:        ptp32->pte_lo = pted->p.pted_pte32.pte_lo;
                   2460:        __asm__ volatile ("sync");
                   2461:        ptp32->pte_hi |= PTE_VALID_32;
                   2462:
                   2463: }
                   2464:
                   2465: #ifdef DEBUG_PMAP
                   2466: void
                   2467: print_pteg(pmap_t pm, vaddr_t va)
                   2468: {
                   2469:        int sr, idx;
                   2470:        struct pte *ptp;
                   2471:
                   2472:        sr = ptesr(pm->pm_sr, va);
                   2473:        idx = pteidx(sr,  va);
                   2474:
                   2475:        ptp = pmap_ptable + idx  * 8;
                   2476:        db_printf("va %x, sr %x, idx %x\n", va, sr, idx);
                   2477:
                   2478:        db_printf("%08x %08x %08x %08x  %08x %08x %08x %08x\n",
                   2479:            ptp[0].pte_hi, ptp[1].pte_hi, ptp[2].pte_hi, ptp[3].pte_hi,
                   2480:            ptp[4].pte_hi, ptp[5].pte_hi, ptp[6].pte_hi, ptp[7].pte_hi);
                   2481:        db_printf("%08x %08x %08x %08x  %08x %08x %08x %08x\n",
                   2482:            ptp[0].pte_lo, ptp[1].pte_lo, ptp[2].pte_lo, ptp[3].pte_lo,
                   2483:            ptp[4].pte_lo, ptp[5].pte_lo, ptp[6].pte_lo, ptp[7].pte_lo);
                   2484:        ptp = pmap_ptable + (idx ^ pmap_ptab_mask) * 8;
                   2485:        db_printf("%08x %08x %08x %08x  %08x %08x %08x %08x\n",
                   2486:            ptp[0].pte_hi, ptp[1].pte_hi, ptp[2].pte_hi, ptp[3].pte_hi,
                   2487:            ptp[4].pte_hi, ptp[5].pte_hi, ptp[6].pte_hi, ptp[7].pte_hi);
                   2488:        db_printf("%08x %08x %08x %08x  %08x %08x %08x %08x\n",
                   2489:            ptp[0].pte_lo, ptp[1].pte_lo, ptp[2].pte_lo, ptp[3].pte_lo,
                   2490:            ptp[4].pte_lo, ptp[5].pte_lo, ptp[6].pte_lo, ptp[7].pte_lo);
                   2491: }
                   2492:
                   2493:
                   2494: /* debugger assist function */
                   2495: int pmap_prtrans(u_int pid, vaddr_t va);
                   2496:
                   2497: void
                   2498: pmap_print_pted(struct pte_desc *pted, int(*print)(const char *, ...))
                   2499: {
                   2500:        vaddr_t va;
                   2501:        va = pted->pted_va & ~PAGE_MASK;
                   2502:        print("\n pted %x", pted);
                   2503:        if (PTED_VALID(pted)) {
                   2504:                print(" va %x:", pted->pted_va & ~PAGE_MASK);
                   2505:                print(" HID %d", PTED_HID(pted) ? 1: 0);
                   2506:                print(" PTEGIDX %x", PTED_PTEGIDX(pted));
                   2507:                print(" MANAGED %d", PTED_MANAGED(pted) ? 1: 0);
                   2508:                print(" WIRED %d\n", PTED_WIRED(pted) ? 1: 0);
                   2509:                if (ppc_proc_is_64b) {
                   2510:                        print("ptehi %x ptelo %x ptp %x Aptp %x\n",
                   2511:                            pted->p.pted_pte64.pte_hi,
                   2512:                            pted->p.pted_pte64.pte_lo,
                   2513:                            pmap_ptable +
                   2514:                                8*pteidx(ptesr(pted->pted_pmap->pm_sr, va), va),
                   2515:                            pmap_ptable +
                   2516:                                8*(pteidx(ptesr(pted->pted_pmap->pm_sr, va), va)
                   2517:                                    ^ pmap_ptab_mask)
                   2518:                            );
                   2519:                } else {
                   2520:                        print("ptehi %x ptelo %x ptp %x Aptp %x\n",
                   2521:                            pted->p.pted_pte32.pte_hi,
                   2522:                            pted->p.pted_pte32.pte_lo,
                   2523:                            pmap_ptable +
                   2524:                                8*pteidx(ptesr(pted->pted_pmap->pm_sr, va), va),
                   2525:                            pmap_ptable +
                   2526:                                8*(pteidx(ptesr(pted->pted_pmap->pm_sr, va), va)
                   2527:                                    ^ pmap_ptab_mask)
                   2528:                            );
                   2529:                }
                   2530:        }
                   2531: }
                   2532:
                   2533: int pmap_user_read(int size, vaddr_t va);
                   2534: int
                   2535: pmap_user_read(int size, vaddr_t va)
                   2536: {
                   2537:        unsigned char  read1;
                   2538:        unsigned short read2;
                   2539:        unsigned int   read4;
                   2540:        int err;
                   2541:
                   2542:        if (size == 1) {
                   2543:                err = copyin((void *)va, &read1, 1);
                   2544:                if (err == 0) {
                   2545:                        db_printf("byte read %x\n", read1);
                   2546:                }
                   2547:        } else if (size == 2) {
                   2548:                err = copyin((void *)va, &read2, 2);
                   2549:                if (err == 0) {
                   2550:                        db_printf("short read %x\n", read2);
                   2551:                }
                   2552:        } else if (size == 4) {
                   2553:                err = copyin((void *)va, &read4, 4);
                   2554:                if (err == 0) {
                   2555:                        db_printf("int read %x\n", read4);
                   2556:                }
                   2557:        } else {
                   2558:                return 1;
                   2559:        }
                   2560:
                   2561:
                   2562:        return 0;
                   2563: }
                   2564:
                   2565: int pmap_dump_pmap(u_int pid);
                   2566: int
                   2567: pmap_dump_pmap(u_int pid)
                   2568: {
                   2569:        pmap_t pm;
                   2570:        struct proc *p;
                   2571:        if (pid == 0) {
                   2572:                pm = pmap_kernel();
                   2573:        } else {
                   2574:                p = pfind(pid);
                   2575:
                   2576:                if (p == NULL) {
                   2577:                        db_printf("invalid pid %d", pid);
                   2578:                        return 1;
                   2579:                }
                   2580:                pm = p->p_vmspace->vm_map.pmap;
                   2581:        }
                   2582:        printf("pmap %x:\n", pm);
                   2583:        printf("segid %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x",
                   2584:            pm->pm_sr[0], pm->pm_sr[1], pm->pm_sr[2], pm->pm_sr[3],
                   2585:            pm->pm_sr[4], pm->pm_sr[5], pm->pm_sr[6], pm->pm_sr[7],
                   2586:            pm->pm_sr[8], pm->pm_sr[9], pm->pm_sr[10], pm->pm_sr[11],
                   2587:            pm->pm_sr[12], pm->pm_sr[13], pm->pm_sr[14], pm->pm_sr[15]);
                   2588:
                   2589:        return 0;
                   2590: }
                   2591:
                   2592: int
                   2593: pmap_prtrans(u_int pid, vaddr_t va)
                   2594: {
                   2595:        struct proc *p;
                   2596:        pmap_t pm;
                   2597:        struct pmapvp *vp1;
                   2598:        struct pmapvp *vp2;
                   2599:        struct pte_desc *pted;
                   2600:
                   2601:        if (pid == 0) {
                   2602:                pm = pmap_kernel();
                   2603:        } else {
                   2604:                p = pfind(pid);
                   2605:
                   2606:                if (p == NULL) {
                   2607:                        db_printf("invalid pid %d", pid);
                   2608:                        return 1;
                   2609:                }
                   2610:                pm = p->p_vmspace->vm_map.pmap;
                   2611:        }
                   2612:
                   2613:        db_printf(" pid %d, va 0x%x pmap %x\n", pid, va, pm);
                   2614:        vp1 = pm->pm_vp[VP_SR(va)];
                   2615:        db_printf("sr %x id %x vp1 %x", VP_SR(va), pm->pm_sr[VP_SR(va)],
                   2616:            vp1);
                   2617:
                   2618:        if (vp1) {
                   2619:                vp2 = vp1->vp[VP_IDX1(va)];
                   2620:                db_printf(" vp2 %x", vp2);
                   2621:
                   2622:                if (vp2) {
                   2623:                        pted = vp2->vp[VP_IDX2(va)];
                   2624:                        pmap_print_pted(pted, db_printf);
                   2625:
                   2626:                }
                   2627:        }
                   2628:        print_pteg(pm, va);
                   2629:
                   2630:        return 0;
                   2631: }
                   2632: int pmap_show_mappings(paddr_t pa);
                   2633:
                   2634: int
                   2635: pmap_show_mappings(paddr_t pa)
                   2636: {
                   2637:        struct pte_desc *pted;
                   2638:        struct vm_page *pg;
                   2639:
                   2640:        pg = PHYS_TO_VM_PAGE(pa);
                   2641:        if (pg == NULL) {
                   2642:                db_printf("pa %x: unmanaged\n");
                   2643:        } else {
                   2644:                LIST_FOREACH(pted, &(pg->mdpage.pv_list), pted_pv_list) {
                   2645:                        pmap_print_pted(pted, db_printf);
                   2646:                }
                   2647:        }
                   2648:        return 0;
                   2649: }
                   2650: #endif

CVSweb