Annotation of sys/arch/mips64/mips64/pmap.c, Revision 1.1
1.1 ! nbrk 1: /* $OpenBSD: pmap.c,v 1.29 2007/07/18 20:06:07 miod Exp $ */
! 2:
! 3: /*
! 4: * Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
! 5: *
! 6: * Redistribution and use in source and binary forms, with or without
! 7: * modification, are permitted provided that the following conditions
! 8: * are met:
! 9: * 1. Redistributions of source code must retain the above copyright
! 10: * notice, this list of conditions and the following disclaimer.
! 11: * 2. Redistributions in binary form must reproduce the above copyright
! 12: * notice, this list of conditions and the following disclaimer in the
! 13: * documentation and/or other materials provided with the distribution.
! 14: *
! 15: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
! 16: * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
! 17: * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
! 18: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
! 19: * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
! 20: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
! 21: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
! 22: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
! 23: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
! 24: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
! 25: * SUCH DAMAGE.
! 26: *
! 27: * XXX This code needs some major rewriting.
! 28: */
! 29:
! 30: #include <sys/param.h>
! 31: #include <sys/systm.h>
! 32: #include <sys/proc.h>
! 33: #include <sys/malloc.h>
! 34: #include <sys/user.h>
! 35: #include <sys/buf.h>
! 36: #include <sys/pool.h>
! 37: #ifdef SYSVSHM
! 38: #include <sys/shm.h>
! 39: #endif
! 40:
! 41: #include <machine/pte.h>
! 42: #include <machine/cpu.h>
! 43: #include <machine/autoconf.h>
! 44: #include <machine/memconf.h>
! 45: #include <machine/vmparam.h>
! 46: #include <mips64/archtype.h>
! 47:
! 48: #include <uvm/uvm.h>
! 49:
! 50: extern void mem_zero_page(vaddr_t);
! 51:
! 52: struct pool pmap_pmap_pool;
! 53: struct pool pmap_pv_pool;
! 54:
! 55: #define pmap_pv_alloc() (pv_entry_t)pool_get(&pmap_pv_pool, PR_NOWAIT)
! 56: #define pmap_pv_free(pv) pool_put(&pmap_pv_pool, (pv))
! 57:
! 58: #ifndef PMAP_PV_LOWAT
! 59: #define PMAP_PV_LOWAT 16
! 60: #endif
! 61: int pmap_pv_lowat = PMAP_PV_LOWAT;
! 62:
! 63: int pmap_alloc_tlbpid(struct proc *);
! 64: int pmap_enter_pv(pmap_t, vaddr_t, vm_page_t, u_int *);
! 65: int pmap_page_alloc(vaddr_t *);
! 66: void pmap_page_free(vaddr_t);
! 67: void pmap_page_cache(vm_page_t, int);
! 68: void pmap_remove_pv(pmap_t, vaddr_t, paddr_t);
! 69:
! 70: #ifdef PMAPDEBUG
! 71: struct {
! 72: int kernel; /* entering kernel mapping */
! 73: int user; /* entering user mapping */
! 74: int ptpneeded; /* needed to allocate a PT page */
! 75: int pwchange; /* no mapping change, just wiring or protection */
! 76: int wchange; /* no mapping change, just wiring */
! 77: int mchange; /* was mapped but mapping to different page */
! 78: int managed; /* a managed page */
! 79: int firstpv; /* first mapping for this PA */
! 80: int secondpv; /* second mapping for this PA */
! 81: int ci; /* cache inhibited */
! 82: int unmanaged; /* not a managed page */
! 83: int flushes; /* cache flushes */
! 84: int cachehit; /* new entry forced valid entry out */
! 85: } enter_stats;
! 86: struct {
! 87: int calls;
! 88: int removes;
! 89: int flushes;
! 90: int pidflushes; /* HW pid stolen */
! 91: int pvfirst;
! 92: int pvsearch;
! 93: } remove_stats;
! 94:
! 95: #define PDB_FOLLOW 0x0001
! 96: #define PDB_INIT 0x0002
! 97: #define PDB_ENTER 0x0004
! 98: #define PDB_REMOVE 0x0008
! 99: #define PDB_CREATE 0x0010
! 100: #define PDB_PTPAGE 0x0020
! 101: #define PDB_PVENTRY 0x0040
! 102: #define PDB_BITS 0x0080
! 103: #define PDB_COLLECT 0x0100
! 104: #define PDB_PROTECT 0x0200
! 105: #define PDB_TLBPID 0x0400
! 106: #define PDB_PARANOIA 0x2000
! 107: #define PDB_WIRING 0x4000
! 108: #define PDB_PVDUMP 0x8000
! 109:
! 110: #define DPRINTF(flag, printdata) \
! 111: if (pmapdebug & (flag)) \
! 112: printf printdata;
! 113:
! 114: #define stat_count(what) (what)++
! 115: int pmapdebug = PDB_ENTER|PDB_FOLLOW;
! 116:
! 117: #else
! 118:
! 119: #define DPRINTF(flag, printdata)
! 120: #define stat_count(what)
! 121:
! 122: #endif /* PMAPDEBUG */
! 123:
! 124:
! 125: struct pmap kernel_pmap_store;
! 126:
! 127: psize_t mem_size; /* memory size in bytes */
! 128: vaddr_t virtual_start; /* VA of first avail page (after kernel bss)*/
! 129: vaddr_t virtual_end; /* VA of last avail page (end of kernel AS) */
! 130:
! 131: u_int tlbpid_gen = 1; /* TLB PID generation count */
! 132: int tlbpid_cnt = 2; /* next available TLB PID */
! 133:
! 134: pt_entry_t *Sysmap; /* kernel pte table */
! 135: u_int Sysmapsize; /* number of pte's in Sysmap */
! 136:
! 137:
! 138: /*
! 139: * Bootstrap the system enough to run with virtual memory.
! 140: */
! 141: void
! 142: pmap_bootstrap()
! 143: {
! 144: u_int i;
! 145: pt_entry_t *spte;
! 146:
! 147: /*
! 148: * Create a mapping table for kernel virtual memory. This
! 149: * table is a linear table in contrast to the user process
! 150: * mapping tables which are built with segment/page tables.
! 151: * Create 1GB of map (this will only use 1MB of memory).
! 152: */
! 153: virtual_start = VM_MIN_KERNEL_ADDRESS;
! 154: virtual_end = VM_MAX_KERNEL_ADDRESS;
! 155:
! 156: Sysmapsize = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) /
! 157: PAGE_SIZE;
! 158: if (Sysmapsize & 1)
! 159: Sysmapsize++; /* force even number of pages */
! 160:
! 161: Sysmap = (pt_entry_t *)
! 162: uvm_pageboot_alloc(sizeof(pt_entry_t) * Sysmapsize);
! 163:
! 164: pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0,"pmappl", NULL);
! 165: pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0,"pvpl", NULL);
! 166:
! 167: simple_lock_init(&pmap_kernel()->pm_lock);
! 168: pmap_kernel()->pm_count = 1;
! 169:
! 170: /*
! 171: * The 64 bit Mips architecture stores the AND result
! 172: * of the Global bits in the pte pair in the on chip
! 173: * translation lookaside buffer. Thus invalid entries
! 174: * must have the Global bit set so when Entry LO and
! 175: * Entry HI G bits are ANDed together they will produce
! 176: * a global bit to store in the tlb.
! 177: */
! 178: for (i = 0, spte = Sysmap; i < Sysmapsize; i++, spte++)
! 179: spte->pt_entry = PG_G;
! 180: }
! 181:
! 182: /*
! 183: * Page steal allocator used during bootup.
! 184: */
! 185: vaddr_t
! 186: pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
! 187: {
! 188: int i, j, x;
! 189: int npg;
! 190: vaddr_t va;
! 191: paddr_t pa;
! 192:
! 193: #ifdef DIAGNOSTIC
! 194: if (uvm.page_init_done) {
! 195: panic("pmap_steal_memory: too late, vm is running!");
! 196: }
! 197: #endif
! 198:
! 199: size = round_page(size);
! 200: npg = atop(size);
! 201: va = 0;
! 202:
! 203: for(i = 0; i < vm_nphysseg && va == 0; i++) {
! 204: if (vm_physmem[i].avail_start != vm_physmem[i].start ||
! 205: vm_physmem[i].avail_start >= vm_physmem[i].avail_end) {
! 206: continue;
! 207: }
! 208:
! 209: if ((vm_physmem[i].avail_end - vm_physmem[i].avail_start) < npg)
! 210: continue;
! 211:
! 212: pa = ptoa(vm_physmem[i].avail_start);
! 213: vm_physmem[i].avail_start += npg;
! 214: vm_physmem[i].start += npg;
! 215:
! 216: if (vm_physmem[i].avail_start == vm_physmem[i].end) {
! 217: if (vm_nphysseg == 1)
! 218: panic("pmap_steal_memory: out of memory!");
! 219:
! 220: vm_nphysseg--;
! 221: for (j = i; j < vm_nphysseg; x++)
! 222: vm_physmem[x] = vm_physmem[x + 1];
! 223: }
! 224: if (vstartp)
! 225: *vstartp = round_page(virtual_start);
! 226: if (vendp)
! 227: *vendp = virtual_end;
! 228:
! 229: /*
! 230: * Prefer KSEG0 addresses for now, whenever possible.
! 231: */
! 232: if (pa + size < KSEG_SIZE)
! 233: va = PHYS_TO_KSEG0(pa);
! 234: else
! 235: va = PHYS_TO_XKPHYS(pa, CCA_NONCOHERENT);
! 236:
! 237: bzero((void *)va, size);
! 238: return (va);
! 239: }
! 240:
! 241: panic("pmap_steal_memory: no memory to steal");
! 242: }
! 243:
! 244: /*
! 245: * Initialize the pmap module.
! 246: * Called by vm_init, to initialize any structures that the pmap
! 247: * system needs to map virtual memory.
! 248: */
! 249: void
! 250: pmap_init()
! 251: {
! 252:
! 253: DPRINTF(PDB_FOLLOW|PDB_INIT, ("pmap_init()\n"));
! 254:
! 255: #if 0 /* too early */
! 256: pool_setlowat(&pmap_pv_pool, pmap_pv_lowat);
! 257: #endif
! 258: }
! 259:
! 260: static pv_entry_t pg_to_pvh(struct vm_page *);
! 261: static __inline pv_entry_t
! 262: pg_to_pvh(struct vm_page *pg)
! 263: {
! 264: return &pg->mdpage.pv_ent;
! 265: }
! 266:
! 267: /*
! 268: * Create and return a physical map.
! 269: */
! 270: pmap_t
! 271: pmap_create()
! 272: {
! 273: pmap_t pmap;
! 274: vaddr_t va;
! 275: int s;
! 276:
! 277: extern struct vmspace vmspace0;
! 278: extern struct user *proc0paddr;
! 279:
! 280: DPRINTF(PDB_FOLLOW|PDB_CREATE, ("pmap_create()\n"));
! 281:
! 282: s = splvm();
! 283: pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
! 284: splx(s);
! 285: bzero(pmap, sizeof(*pmap));
! 286:
! 287: simple_lock_init(&pmap->pm_lock);
! 288: pmap->pm_count = 1;
! 289:
! 290: while (pmap_page_alloc(&va) != 0) {
! 291: /* XXX What else can we do? Deadlocks? */
! 292: uvm_wait("pmap_create");
! 293: }
! 294:
! 295: pmap->pm_segtab = (struct segtab *)va;
! 296:
! 297: if (pmap == vmspace0.vm_map.pmap) {
! 298: /*
! 299: * The initial process has already been allocated a TLBPID
! 300: * in mach_init().
! 301: */
! 302: pmap->pm_tlbpid = 1;
! 303: pmap->pm_tlbgen = tlbpid_gen;
! 304: proc0paddr->u_pcb.pcb_segtab = pmap->pm_segtab;
! 305: } else {
! 306: pmap->pm_tlbpid = 0;
! 307: pmap->pm_tlbgen = 0;
! 308: }
! 309:
! 310: return (pmap);
! 311: }
! 312:
! 313: /*
! 314: * Retire the given physical map from service.
! 315: * Should only be called if the map contains
! 316: * no valid mappings.
! 317: */
! 318: void
! 319: pmap_destroy(pmap_t pmap)
! 320: {
! 321: int s, count;
! 322:
! 323: DPRINTF(PDB_FOLLOW|PDB_CREATE, ("pmap_destroy(%x)\n", pmap));
! 324:
! 325: simple_lock(&pmap->pm_lock);
! 326: count = --pmap->pm_count;
! 327: simple_unlock(&pmap->pm_lock);
! 328: if (count > 0)
! 329: return;
! 330:
! 331: if (pmap->pm_segtab) {
! 332: pt_entry_t *pte;
! 333: int i;
! 334: #ifdef PARANOIA
! 335: int j;
! 336: #endif
! 337:
! 338: for (i = 0; i < PMAP_SEGTABSIZE; i++) {
! 339: /* get pointer to segment map */
! 340: pte = pmap->pm_segtab->seg_tab[i];
! 341: if (!pte)
! 342: continue;
! 343: #ifdef PARANOIA
! 344: for (j = 0; j < NPTEPG; j++) {
! 345: if ((pte+j)->pt_entry)
! 346: panic("pmap_destroy: segmap not empty");
! 347: }
! 348: #endif
! 349: Mips_HitInvalidateDCache((vaddr_t)pte, PAGE_SIZE);
! 350: pmap_page_free((vaddr_t)pte);
! 351: #ifdef PARANOIA
! 352: pmap->pm_segtab->seg_tab[i] = NULL;
! 353: #endif
! 354: }
! 355: pmap_page_free((vaddr_t)pmap->pm_segtab);
! 356: #ifdef PARANOIA
! 357: pmap->pm_segtab = NULL;
! 358: #endif
! 359: }
! 360:
! 361: s = splvm();
! 362: pool_put(&pmap_pmap_pool, pmap);
! 363: splx(s);
! 364: }
! 365:
! 366: /*
! 367: * Add a reference to the specified pmap.
! 368: */
! 369: void
! 370: pmap_reference(pmap_t pmap)
! 371: {
! 372:
! 373: DPRINTF(PDB_FOLLOW, ("pmap_reference(%x)\n", pmap));
! 374:
! 375: if (pmap) {
! 376: simple_lock(&pmap->pm_lock);
! 377: pmap->pm_count++;
! 378: simple_unlock(&pmap->pm_lock);
! 379: }
! 380: }
! 381:
! 382: /*
! 383: * Make a new pmap (vmspace) active for the given process.
! 384: */
! 385: void
! 386: pmap_activate(struct proc *p)
! 387: {
! 388: pmap_t pmap = p->p_vmspace->vm_map.pmap;
! 389: p->p_addr->u_pcb.pcb_segtab = pmap->pm_segtab;
! 390: pmap_alloc_tlbpid(p);
! 391: }
! 392:
! 393: /*
! 394: * Make a previously active pmap (vmspace) inactive.
! 395: */
! 396: void
! 397: pmap_deactivate(struct proc *p)
! 398: {
! 399: /* Empty */
! 400: }
! 401:
! 402: /*
! 403: * Remove the given range of addresses from the specified map.
! 404: *
! 405: * It is assumed that the start and end are properly
! 406: * rounded to the page size.
! 407: */
! 408: void
! 409: pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
! 410: {
! 411: vaddr_t nssva;
! 412: pt_entry_t *pte;
! 413: unsigned entry;
! 414:
! 415: DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
! 416: ("pmap_remove(%x, %x, %x)\n", pmap, sva, eva));
! 417:
! 418: stat_count(remove_stats.calls);
! 419:
! 420: if (pmap == NULL)
! 421: return;
! 422:
! 423: if (pmap == pmap_kernel()) {
! 424: pt_entry_t *pte;
! 425:
! 426: /* remove entries from kernel pmap */
! 427: #ifdef DIAGNOSTIC
! 428: if (sva < VM_MIN_KERNEL_ADDRESS || eva < sva)
! 429: panic("pmap_remove: kva not in range");
! 430: #endif
! 431: pte = kvtopte(sva);
! 432: for(; sva < eva; sva += NBPG, pte++) {
! 433: entry = pte->pt_entry;
! 434: if (!(entry & PG_V))
! 435: continue;
! 436: pmap->pm_stats.resident_count--;
! 437: pmap_remove_pv(pmap, sva, pfn_to_pad(entry));
! 438: pte->pt_entry = PG_NV | PG_G;
! 439: /*
! 440: * Flush the TLB for the given address.
! 441: */
! 442: tlb_flush_addr(sva);
! 443: stat_count(remove_stats.flushes);
! 444: }
! 445: return;
! 446: }
! 447:
! 448: #ifdef DIAGNOSTIC
! 449: if (eva > VM_MAXUSER_ADDRESS)
! 450: panic("pmap_remove: uva not in range");
! 451: #endif
! 452: while (sva < eva) {
! 453: nssva = mips_trunc_seg(sva) + NBSEG;
! 454: if (nssva == 0 || nssva > eva)
! 455: nssva = eva;
! 456: /*
! 457: * If VA belongs to an unallocated segment,
! 458: * skip to the next segment boundary.
! 459: */
! 460: if (!(pte = pmap_segmap(pmap, sva))) {
! 461: sva = nssva;
! 462: continue;
! 463: }
! 464: /*
! 465: * Invalidate every valid mapping within this segment.
! 466: */
! 467: pte += uvtopte(sva);
! 468: for (; sva < nssva; sva += NBPG, pte++) {
! 469: entry = pte->pt_entry;
! 470: if (!(entry & PG_V))
! 471: continue;
! 472: pmap->pm_stats.resident_count--;
! 473: pmap_remove_pv(pmap, sva, pfn_to_pad(entry));
! 474: pte->pt_entry = PG_NV;
! 475: /*
! 476: * Flush the TLB for the given address.
! 477: */
! 478: if (pmap->pm_tlbgen == tlbpid_gen) {
! 479: tlb_flush_addr(sva | (pmap->pm_tlbpid <<
! 480: VMTLB_PID_SHIFT));
! 481: stat_count(remove_stats.flushes);
! 482: }
! 483: }
! 484: }
! 485: }
! 486:
! 487: /*
! 488: * pmap_page_protect:
! 489: *
! 490: * Lower the permission for all mappings to a given page.
! 491: */
! 492: void
! 493: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
! 494: {
! 495: pv_entry_t pv;
! 496: vaddr_t va;
! 497: int s;
! 498:
! 499: if (prot == VM_PROT_NONE) {
! 500: DPRINTF(PDB_REMOVE, ("pmap_page_protect(%p, %p)\n", pg, prot));
! 501: } else {
! 502: DPRINTF(PDB_FOLLOW|PDB_PROTECT,
! 503: ("pmap_page_protect(%p, %p)\n", pg, prot));
! 504: }
! 505:
! 506: switch (prot) {
! 507: case VM_PROT_READ|VM_PROT_WRITE:
! 508: case VM_PROT_ALL:
! 509: break;
! 510:
! 511: /* copy_on_write */
! 512: case VM_PROT_READ:
! 513: case VM_PROT_READ|VM_PROT_EXECUTE:
! 514: pv = pg_to_pvh(pg);
! 515: s = splvm();
! 516: /*
! 517: * Loop over all current mappings setting/clearing as apropos.
! 518: */
! 519: if (pv->pv_pmap != NULL) {
! 520: for (; pv; pv = pv->pv_next) {
! 521: va = pv->pv_va;
! 522: pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,
! 523: prot);
! 524: }
! 525: }
! 526: splx(s);
! 527: break;
! 528:
! 529: /* remove_all */
! 530: default:
! 531: pv = pg_to_pvh(pg);
! 532: s = splvm();
! 533: while (pv->pv_pmap != NULL) {
! 534: va = pv->pv_va;
! 535: pmap_remove(pv->pv_pmap, va, va + PAGE_SIZE);
! 536: }
! 537: splx(s);
! 538: }
! 539: }
! 540:
! 541: /*
! 542: * Set the physical protection on the
! 543: * specified range of this map as requested.
! 544: */
! 545: void
! 546: pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
! 547: {
! 548: vaddr_t nssva;
! 549: pt_entry_t *pte;
! 550: u_int entry;
! 551: u_int p;
! 552:
! 553: DPRINTF(PDB_FOLLOW|PDB_PROTECT,
! 554: ("pmap_protect(%p, %p, %p, %p)\n", pmap, sva, eva, prot));
! 555:
! 556: if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
! 557: pmap_remove(pmap, sva, eva);
! 558: return;
! 559: }
! 560:
! 561: p = (prot & VM_PROT_WRITE) ? PG_M : PG_RO;
! 562:
! 563: if (pmap == pmap_kernel()) {
! 564: /*
! 565: * Change entries in kernel pmap.
! 566: * This will trap if the page is writeable (in order to set
! 567: * the dirty bit) even if the dirty bit is already set. The
! 568: * optimization isn't worth the effort since this code isn't
! 569: * executed much. The common case is to make a user page
! 570: * read-only.
! 571: */
! 572: #ifdef DIAGNOSTIC
! 573: if (sva < VM_MIN_KERNEL_ADDRESS || eva < sva)
! 574: panic("pmap_protect: kva not in range");
! 575: #endif
! 576: pte = kvtopte(sva);
! 577: for (; sva < eva; sva += NBPG, pte++) {
! 578: entry = pte->pt_entry;
! 579: if (!(entry & PG_V))
! 580: continue;
! 581: entry = (entry & ~(PG_M | PG_RO)) | p;
! 582: pte->pt_entry = entry;
! 583: /*
! 584: * Update the TLB if the given address is in the cache.
! 585: */
! 586: tlb_update(sva, entry);
! 587: }
! 588: return;
! 589: }
! 590:
! 591: #ifdef DIAGNOSTIC
! 592: if (eva > VM_MAXUSER_ADDRESS)
! 593: panic("pmap_protect: uva not in range");
! 594: #endif
! 595: while (sva < eva) {
! 596: nssva = mips_trunc_seg(sva) + NBSEG;
! 597: if (nssva == 0 || nssva > eva)
! 598: nssva = eva;
! 599: /*
! 600: * If VA belongs to an unallocated segment,
! 601: * skip to the next segment boundary.
! 602: */
! 603: if (!(pte = pmap_segmap(pmap, sva))) {
! 604: sva = nssva;
! 605: continue;
! 606: }
! 607: /*
! 608: * Change protection on every valid mapping within this segment.
! 609: */
! 610: pte += uvtopte(sva);
! 611: for (; sva < nssva; sva += NBPG, pte++) {
! 612: entry = pte->pt_entry;
! 613: if (!(entry & PG_V))
! 614: continue;
! 615: entry = (entry & ~(PG_M | PG_RO)) | p;
! 616: pte->pt_entry = entry;
! 617: if (pmap->pm_tlbgen == tlbpid_gen)
! 618: tlb_update(sva | (pmap->pm_tlbpid <<
! 619: VMTLB_PID_SHIFT), entry);
! 620: }
! 621: }
! 622: }
! 623:
! 624: /*
! 625: * Insert the given physical page (p) at
! 626: * the specified virtual address (v) in the
! 627: * target physical map with the protection requested.
! 628: *
! 629: * NB: This is the only routine which MAY NOT lazy-evaluate
! 630: * or lose information. That is, this routine must actually
! 631: * insert this page into the given map NOW.
! 632: */
! 633: int
! 634: pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
! 635: {
! 636: pt_entry_t *pte;
! 637: u_int npte;
! 638: vm_page_t pg;
! 639:
! 640: DPRINTF(PDB_FOLLOW|PDB_ENTER,
! 641: ("pmap_enter(%p, %p, %p, %p, %p)\n", pmap, va, pa, prot, flags));
! 642:
! 643: #ifdef DIAGNOSTIC
! 644: if (pmap == pmap_kernel()) {
! 645: stat_count(enter_stats.kernel);
! 646: if (va < VM_MIN_KERNEL_ADDRESS)
! 647: panic("pmap_enter: kva %p", va);
! 648: } else {
! 649: stat_count(enter_stats.user);
! 650: if (va >= VM_MAXUSER_ADDRESS)
! 651: panic("pmap_enter: uva %p", va);
! 652: }
! 653: #endif
! 654:
! 655: pg = PHYS_TO_VM_PAGE(pa);
! 656:
! 657: if (pg != NULL) {
! 658: if (!(prot & VM_PROT_WRITE)) {
! 659: npte = PG_ROPAGE;
! 660: } else {
! 661: if (pmap == pmap_kernel()) {
! 662: /*
! 663: * Don't bother to trap on kernel writes,
! 664: * just record page as dirty.
! 665: */
! 666: npte = PG_RWPAGE;
! 667: } else {
! 668: if (pg->pg_flags & PV_ATTR_MOD) {
! 669: npte = PG_RWPAGE;
! 670: } else {
! 671: npte = PG_CWPAGE;
! 672: }
! 673: }
! 674: }
! 675:
! 676: /* Set page referenced/modified status based on flags */
! 677: if (flags & VM_PROT_WRITE)
! 678: atomic_setbits_int(&pg->pg_flags,
! 679: PV_ATTR_MOD | PV_ATTR_REF);
! 680: else if (flags & VM_PROT_ALL)
! 681: atomic_setbits_int(&pg->pg_flags, PV_ATTR_REF);
! 682:
! 683: stat_count(enter_stats.managed);
! 684: } else {
! 685: /*
! 686: * Assumption: if it is not part of our managed memory
! 687: * then it must be device memory which may be volatile.
! 688: */
! 689: stat_count(enter_stats.unmanaged);
! 690: if (prot & VM_PROT_WRITE) {
! 691: npte = PG_IOPAGE & ~PG_G;
! 692: } else {
! 693: npte = (PG_IOPAGE | PG_RO) & ~(PG_G | PG_M);
! 694: }
! 695: }
! 696:
! 697: if (pmap == pmap_kernel()) {
! 698: if (pg != NULL) {
! 699: if (pmap_enter_pv(pmap, va, pg, &npte) != 0) {
! 700: if (flags & PMAP_CANFAIL)
! 701: return ENOMEM;
! 702: panic("pmap_enter: pmap_enter_pv() failed");
! 703: }
! 704: }
! 705:
! 706: pte = kvtopte(va);
! 707: npte |= vad_to_pfn(pa) | PG_G;
! 708: if (!(pte->pt_entry & PG_V)) {
! 709: pmap->pm_stats.resident_count++;
! 710: }
! 711: if ((pte->pt_entry & PG_V) && pa != pfn_to_pad(pte->pt_entry)) {
! 712: pmap_remove(pmap, va, va + NBPG);
! 713: stat_count(enter_stats.mchange);
! 714: }
! 715:
! 716: /*
! 717: * Update the same virtual address entry.
! 718: */
! 719: pte->pt_entry = npte;
! 720: tlb_update(va, npte);
! 721: return 0;
! 722: }
! 723:
! 724: /*
! 725: * User space mapping. Do table build.
! 726: */
! 727: if (!(pte = pmap_segmap(pmap, va))) {
! 728: vaddr_t nva;
! 729:
! 730: while (pmap_page_alloc(&nva) != 0) {
! 731: if (flags & PMAP_CANFAIL)
! 732: return ENOMEM;
! 733: uvm_wait("pmap_enter");
! 734: }
! 735:
! 736: pmap_segmap(pmap, va) = pte = (pt_entry_t *)nva;
! 737: }
! 738:
! 739: if (pg != NULL) {
! 740: if (pmap_enter_pv(pmap, va, pg, &npte) != 0) {
! 741: if (flags & PMAP_CANFAIL)
! 742: return ENOMEM;
! 743: panic("pmap_enter: pmap_enter_pv() failed");
! 744: }
! 745: }
! 746:
! 747: pte += uvtopte(va);
! 748:
! 749: /*
! 750: * Now validate mapping with desired protection/wiring.
! 751: * Assume uniform modified and referenced status for all
! 752: * MIPS pages in a OpenBSD page.
! 753: */
! 754: npte |= vad_to_pfn(pa);
! 755: if (pmap->pm_tlbgen == tlbpid_gen) {
! 756: DPRINTF(PDB_ENTER, ("pmap_enter: new pte %x tlbpid %d\n",
! 757: npte, pmap->pm_tlbpid));
! 758: } else {
! 759: DPRINTF(PDB_ENTER, ("pmap_enter: new pte 0x%08x\n", npte));
! 760: }
! 761:
! 762: if ((pte->pt_entry & PG_V) && pa != pfn_to_pad(pte->pt_entry)) {
! 763: pmap_remove(pmap, va, va + NBPG);
! 764: stat_count(enter_stats.mchange);
! 765: }
! 766:
! 767: if (!(pte->pt_entry & PG_V)) {
! 768: pmap->pm_stats.resident_count++;
! 769: }
! 770: pte->pt_entry = npte;
! 771: if (pmap->pm_tlbgen == tlbpid_gen) {
! 772: tlb_update(va | (pmap->pm_tlbpid << VMTLB_PID_SHIFT), npte);
! 773: }
! 774:
! 775: /*
! 776: * If mapping a memory space address invalidate ICache.
! 777: */
! 778: if (pg != NULL && (prot & VM_PROT_EXECUTE))
! 779: Mips_InvalidateICache(va, PAGE_SIZE);
! 780:
! 781: return 0;
! 782: }
! 783:
! 784: void
! 785: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
! 786: {
! 787: pt_entry_t *pte;
! 788: u_int npte;
! 789:
! 790: DPRINTF(PDB_FOLLOW|PDB_ENTER,
! 791: ("pmap_kenter_pa(%p, %p, 0x%x)\n", va, pa, prot));
! 792:
! 793: npte = vad_to_pfn(pa) | PG_G;
! 794: if (prot & VM_PROT_WRITE)
! 795: npte |= PG_RWPAGE;
! 796: else
! 797: npte |= PG_ROPAGE;
! 798: pte = kvtopte(va);
! 799: pte->pt_entry = npte;
! 800: tlb_update(va, npte);
! 801: }
! 802:
! 803: /*
! 804: * Remove a mapping from the kernel map table. When doing this
! 805: * the cache must be synced for the VA mapped since we mapped
! 806: * pages behind the back of the VP tracking system.
! 807: */
! 808: void
! 809: pmap_kremove(vaddr_t va, vsize_t len)
! 810: {
! 811: pt_entry_t *pte;
! 812: vaddr_t eva;
! 813: u_int entry;
! 814:
! 815: DPRINTF(PDB_FOLLOW|PDB_REMOVE, ("pmap_kremove(%p, %p)\n", va, len));
! 816:
! 817: pte = kvtopte(va);
! 818: eva = va + len;
! 819: for (; va < eva; va += PAGE_SIZE, pte++) {
! 820: entry = pte->pt_entry;
! 821: if (!(entry & PG_V))
! 822: continue;
! 823: Mips_HitSyncDCache(va, PAGE_SIZE);
! 824: pte->pt_entry = PG_NV | PG_G;
! 825: tlb_flush_addr(va);
! 826: }
! 827: }
! 828:
! 829: void
! 830: pmap_unwire(pmap_t pmap, vaddr_t va)
! 831: {
! 832: /* XXX this pmap does not handle wired mappings yet... */
! 833: }
! 834:
! 835: /*
! 836: * Routine: pmap_extract
! 837: * Function:
! 838: * Extract the physical page address associated
! 839: * with the given map/virtual_address pair.
! 840: */
! 841: boolean_t
! 842: pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
! 843: {
! 844: boolean_t rv = TRUE;
! 845: paddr_t pa;
! 846: pt_entry_t *pte;
! 847:
! 848: if (pmap == pmap_kernel()) {
! 849: if (IS_XKPHYS(va))
! 850: pa = XKPHYS_TO_PHYS(va);
! 851: else if (va >= (vaddr_t)KSEG0_BASE &&
! 852: va < (vaddr_t)KSEG0_BASE + KSEG_SIZE)
! 853: pa = KSEG0_TO_PHYS(va);
! 854: else if (va >= (vaddr_t)KSEG1_BASE &&
! 855: va < (vaddr_t)KSEG1_BASE + KSEG_SIZE)
! 856: pa = KSEG1_TO_PHYS(va);
! 857: else {
! 858: #ifdef DIAGNOSTIC
! 859: if (va < VM_MIN_KERNEL_ADDRESS ||
! 860: va >= VM_MAX_KERNEL_ADDRESS)
! 861: panic("pmap_extract(%p, %p)", pmap, va);
! 862: #endif
! 863: pte = kvtopte(va);
! 864: if (pte->pt_entry & PG_V)
! 865: pa = pfn_to_pad(pte->pt_entry) |
! 866: (va & PAGE_MASK);
! 867: else
! 868: rv = FALSE;
! 869: }
! 870: } else {
! 871: if (!(pte = pmap_segmap(pmap, va)))
! 872: rv = FALSE;
! 873: else {
! 874: pte += uvtopte(va);
! 875: pa = pfn_to_pad(pte->pt_entry) | (va & PAGE_MASK);
! 876: }
! 877: }
! 878: if (rv != FALSE)
! 879: *pap = pa;
! 880:
! 881: DPRINTF(PDB_FOLLOW, ("pmap_extract(%p, %p)=%p(%d)", pmap, va, pa, rv));
! 882:
! 883: return (rv);
! 884: }
! 885:
! 886: /*
! 887: * Find first virtual address >= *vap that
! 888: * will not cause cache aliases.
! 889: */
! 890: void
! 891: pmap_prefer(paddr_t foff, vaddr_t *vap)
! 892: {
! 893: #if 1
! 894: *vap += (foff - *vap) & (CpuCacheAliasMask | PAGE_MASK);
! 895: #else
! 896: *vap += (*vap ^ foff) & CpuCacheAliasMask;
! 897: #endif
! 898: }
! 899:
! 900: /*
! 901: * Copy the range specified by src_addr/len
! 902: * from the source map to the range dst_addr/len
! 903: * in the destination map.
! 904: *
! 905: * This routine is only advisory and need not do anything.
! 906: */
! 907: void
! 908: pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
! 909: pmap_t dst_pmap;
! 910: pmap_t src_pmap;
! 911: vaddr_t dst_addr;
! 912: vsize_t len;
! 913: vaddr_t src_addr;
! 914: {
! 915:
! 916: DPRINTF(PDB_FOLLOW, ("pmap_copy(%p, %p, %p, %p, %p)\n",
! 917: dst_pmap, src_pmap, dst_addr, len, src_addr));
! 918: }
! 919:
! 920: /*
! 921: * pmap_zero_page zeros the specified (machine independent) page.
! 922: */
! 923: void
! 924: pmap_zero_page(struct vm_page *pg)
! 925: {
! 926: paddr_t phys = VM_PAGE_TO_PHYS(pg);
! 927: vaddr_t va;
! 928: pv_entry_t pv;
! 929:
! 930: DPRINTF(PDB_FOLLOW, ("pmap_zero_page(%p)\n", phys));
! 931:
! 932: va = (vaddr_t)PHYS_TO_XKPHYS(phys, CCA_NONCOHERENT);
! 933: pv = pg_to_pvh(pg);
! 934: if ((pg->pg_flags & PV_CACHED) &&
! 935: ((pv->pv_va ^ va) & CpuCacheAliasMask) != 0) {
! 936: Mips_SyncDCachePage(pv->pv_va);
! 937: }
! 938: mem_zero_page(va);
! 939: Mips_HitSyncDCache(va, PAGE_SIZE);
! 940: }
! 941:
! 942: /*
! 943: * pmap_copy_page copies the specified (machine independent) page.
! 944: *
! 945: * We do the copy phys to phys and need to check if there may be
! 946: * a virtual coherence problem. If so flush the cache for the
! 947: * areas before copying, and flush afterwards.
! 948: */
! 949: void
! 950: pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg)
! 951: {
! 952: paddr_t src, dst;
! 953: vaddr_t s, d;
! 954: int df = 1;
! 955: int sf = 1;
! 956: pv_entry_t pv;
! 957:
! 958: src = VM_PAGE_TO_PHYS(srcpg);
! 959: dst = VM_PAGE_TO_PHYS(dstpg);
! 960: s = (vaddr_t)PHYS_TO_XKPHYS(src, CCA_NONCOHERENT);
! 961: d = (vaddr_t)PHYS_TO_XKPHYS(dst, CCA_NONCOHERENT);
! 962:
! 963: DPRINTF(PDB_FOLLOW, ("pmap_copy_page(%p, %p)\n", src, dst));
! 964:
! 965: pv = pg_to_pvh(srcpg);
! 966: if ((srcpg->pg_flags & PV_CACHED) &&
! 967: (sf = ((pv->pv_va ^ (long)s) & CpuCacheAliasMask) != 0)) {
! 968: Mips_SyncDCachePage(pv->pv_va);
! 969: }
! 970: pv = pg_to_pvh(dstpg);
! 971: if ((dstpg->pg_flags & PV_CACHED) &&
! 972: (df = ((pv->pv_va ^ (long)d) & CpuCacheAliasMask) != 0)) {
! 973: Mips_SyncDCachePage(pv->pv_va);
! 974: }
! 975:
! 976: memcpy((void *)d, (void *)s, PAGE_SIZE);
! 977:
! 978: if (sf) {
! 979: Mips_HitSyncDCache(s, PAGE_SIZE);
! 980: }
! 981: Mips_HitSyncDCache(d, PAGE_SIZE);
! 982: }
! 983:
! 984: /*
! 985: * Clear the modify bits on the specified physical page.
! 986: * Also sync the cache so it reflects the new clean state of the page.
! 987: */
! 988: boolean_t
! 989: pmap_clear_modify(struct vm_page *pg)
! 990: {
! 991: pv_entry_t pv;
! 992: pt_entry_t *pte;
! 993: unsigned entry;
! 994: boolean_t rv = FALSE;
! 995: int s;
! 996:
! 997: DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%p)\n", VM_PAGE_TO_PHYS(pg)));
! 998:
! 999: pv = pg_to_pvh(pg);
! 1000: s = splvm();
! 1001: if (pg->pg_flags & PV_ATTR_MOD) {
! 1002: atomic_clearbits_int(&pg->pg_flags, PV_ATTR_MOD);
! 1003: rv = TRUE;
! 1004: }
! 1005: if (pg->pg_flags & PV_CACHED)
! 1006: Mips_SyncDCachePage(pv->pv_va);
! 1007:
! 1008: for (; pv != NULL; pv = pv->pv_next) {
! 1009: if (pv->pv_pmap == pmap_kernel()) {
! 1010: pte = kvtopte(pv->pv_va);
! 1011: entry = pte->pt_entry;
! 1012: if ((entry & PG_V) != 0 && (entry & PG_M) != 0) {
! 1013: rv = TRUE;
! 1014: entry &= ~PG_M;
! 1015: pte->pt_entry = entry;
! 1016: tlb_update(pv->pv_va, entry);
! 1017: }
! 1018: } else if (pv->pv_pmap != NULL) {
! 1019: if ((pte = pmap_segmap(pv->pv_pmap, pv->pv_va)) == NULL)
! 1020: continue;
! 1021: pte += uvtopte(pv->pv_va);
! 1022: entry = pte->pt_entry;
! 1023: if ((entry & PG_V) != 0 && (entry & PG_M) != 0) {
! 1024: rv = TRUE;
! 1025: entry &= ~PG_M;
! 1026: pte->pt_entry = entry;
! 1027: if (pv->pv_pmap->pm_tlbgen == tlbpid_gen)
! 1028: tlb_update(pv->pv_va | (pv->pv_pmap->pm_tlbpid <<
! 1029: VMTLB_PID_SHIFT), entry);
! 1030: }
! 1031: }
! 1032: }
! 1033: splx(s);
! 1034:
! 1035: return rv;
! 1036: }
! 1037:
! 1038: void
! 1039: pmap_set_modify(struct vm_page *pg)
! 1040: {
! 1041: atomic_setbits_int(&pg->pg_flags, PV_ATTR_MOD | PV_ATTR_REF);
! 1042: }
! 1043:
! 1044: /*
! 1045: * pmap_clear_reference:
! 1046: *
! 1047: * Clear the reference bit on the specified physical page.
! 1048: */
! 1049: boolean_t
! 1050: pmap_clear_reference(struct vm_page *pg)
! 1051: {
! 1052: boolean_t rv;
! 1053:
! 1054: DPRINTF(PDB_FOLLOW, ("pmap_clear_reference(%p)\n", VM_PAGE_TO_PHYS(pg)));
! 1055:
! 1056: rv = (pg->pg_flags & PV_ATTR_REF) != 0;
! 1057: atomic_clearbits_int(&pg->pg_flags, PV_ATTR_REF);
! 1058: return rv;
! 1059: }
! 1060:
! 1061: /*
! 1062: * pmap_is_referenced:
! 1063: *
! 1064: * Return whether or not the specified physical page is referenced
! 1065: * by any physical maps.
! 1066: */
! 1067: boolean_t
! 1068: pmap_is_referenced(struct vm_page *pg)
! 1069: {
! 1070: return (pg->pg_flags & PV_ATTR_REF) != 0;
! 1071: }
! 1072:
! 1073: /*
! 1074: * pmap_is_modified:
! 1075: *
! 1076: * Return whether or not the specified physical page is modified
! 1077: * by any physical maps.
! 1078: */
! 1079: boolean_t
! 1080: pmap_is_modified(struct vm_page *pg)
! 1081: {
! 1082: return (pg->pg_flags & PV_ATTR_MOD) != 0;
! 1083: }
! 1084:
! 1085: /*
! 1086: * Miscellaneous support routines not part of the pmap API
! 1087: */
! 1088:
! 1089: /*
! 1090: * Return RO protection of page.
! 1091: */
! 1092: int
! 1093: pmap_is_page_ro(pmap_t pmap, vaddr_t va, int entry)
! 1094: {
! 1095: return (entry & PG_RO);
! 1096: }
! 1097:
! 1098:
! 1099: /*
! 1100: * Walk the PV tree for a physical page and change all its
! 1101: * mappings to cached or uncached.
! 1102: */
! 1103: void
! 1104: pmap_page_cache(vm_page_t pg, int mode)
! 1105: {
! 1106: pv_entry_t pv;
! 1107: pt_entry_t *pte;
! 1108: u_int entry;
! 1109: u_int newmode;
! 1110: int s;
! 1111:
! 1112: DPRINTF(PDB_FOLLOW|PDB_ENTER, ("pmap_page_uncache(%p)\n", pg));
! 1113:
! 1114: newmode = mode & PV_UNCACHED ? PG_UNCACHED : PG_CACHED;
! 1115: pv = pg_to_pvh(pg);
! 1116:
! 1117: s = splvm();
! 1118: for (; pv != NULL; pv = pv->pv_next) {
! 1119: if (pv->pv_pmap == pmap_kernel()) {
! 1120: pte = kvtopte(pv->pv_va);
! 1121: entry = pte->pt_entry;
! 1122: if (entry & PG_V) {
! 1123: entry = (entry & ~PG_CACHEMODE) | newmode;
! 1124: pte->pt_entry = entry;
! 1125: tlb_update(pv->pv_va, entry);
! 1126: }
! 1127: } else {
! 1128: if ((pte = pmap_segmap(pv->pv_pmap, pv->pv_va))) {
! 1129: pte += uvtopte(pv->pv_va);
! 1130: entry = pte->pt_entry;
! 1131: if (entry & PG_V) {
! 1132: entry = (entry & ~PG_CACHEMODE) | newmode;
! 1133: pte->pt_entry = entry;
! 1134: if (pv->pv_pmap->pm_tlbgen == tlbpid_gen)
! 1135: tlb_update(pv->pv_va | (pv->pv_pmap->pm_tlbpid <<
! 1136: VMTLB_PID_SHIFT), entry);
! 1137: }
! 1138: }
! 1139: }
! 1140: }
! 1141: atomic_clearbits_int(&pg->pg_flags, PV_CACHED | PV_UNCACHED);
! 1142: atomic_setbits_int(&pg->pg_flags, mode);
! 1143: splx(s);
! 1144: }
! 1145:
! 1146: /*
! 1147: * Use this function to allocate pages for the mapping tables.
! 1148: * Mapping tables are walked by the TLB miss code and are mapped in
! 1149: * XKPHYS to avoid additional page faults when servicing a TLB miss.
! 1150: */
! 1151: int
! 1152: pmap_page_alloc(vaddr_t *ret)
! 1153: {
! 1154: vm_page_t pg;
! 1155:
! 1156: pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE | UVM_PGA_ZERO);
! 1157: if (pg == NULL)
! 1158: return ENOMEM;
! 1159:
! 1160: *ret = PHYS_TO_XKPHYS(VM_PAGE_TO_PHYS(pg), CCA_NONCOHERENT);
! 1161: return 0;
! 1162: }
! 1163:
! 1164: void
! 1165: pmap_page_free(vaddr_t va)
! 1166: {
! 1167: vm_page_t pg;
! 1168:
! 1169: pg = PHYS_TO_VM_PAGE(XKPHYS_TO_PHYS(va));
! 1170: uvm_pagefree(pg);
! 1171: }
! 1172:
! 1173: /*
! 1174: * Allocate a hardware PID and return it.
! 1175: * It takes almost as much or more time to search the TLB for a
! 1176: * specific PID and flush those entries as it does to flush the entire TLB.
! 1177: * Therefore, when we allocate a new PID, we just take the next number. When
! 1178: * we run out of numbers, we flush the TLB, increment the generation count
! 1179: * and start over. PID zero is reserved for kernel use.
! 1180: * This is called only by switch().
! 1181: */
! 1182: int
! 1183: pmap_alloc_tlbpid(struct proc *p)
! 1184: {
! 1185: pmap_t pmap;
! 1186: int id;
! 1187:
! 1188: pmap = p->p_vmspace->vm_map.pmap;
! 1189: if (pmap->pm_tlbgen != tlbpid_gen) {
! 1190: id = tlbpid_cnt;
! 1191: if (id >= VMNUM_PIDS) {
! 1192: tlb_flush(sys_config.cpu[0].tlbsize);
! 1193: /* reserve tlbpid_gen == 0 to alway mean invalid */
! 1194: if (++tlbpid_gen == 0)
! 1195: tlbpid_gen = 1;
! 1196: id = 1;
! 1197: }
! 1198: tlbpid_cnt = id + 1;
! 1199: pmap->pm_tlbpid = id;
! 1200: pmap->pm_tlbgen = tlbpid_gen;
! 1201: } else {
! 1202: id = pmap->pm_tlbpid;
! 1203: }
! 1204:
! 1205: if (curproc) {
! 1206: DPRINTF(PDB_FOLLOW|PDB_TLBPID,
! 1207: ("pmap_alloc_tlbpid: curproc %d '%s' ",
! 1208: curproc->p_pid, curproc->p_comm));
! 1209: } else {
! 1210: DPRINTF(PDB_FOLLOW|PDB_TLBPID,
! 1211: ("pmap_alloc_tlbpid: curproc <none> "));
! 1212: }
! 1213: DPRINTF(PDB_FOLLOW|PDB_TLBPID, ("segtab %p tlbpid %d pid %d '%s'\n",
! 1214: pmap->pm_segtab, id, p->p_pid, p->p_comm));
! 1215:
! 1216: return (id);
! 1217: }
! 1218:
! 1219: /*
! 1220: * Enter the pmap and virtual address into the physical to virtual map table.
! 1221: */
! 1222: int
! 1223: pmap_enter_pv(pmap_t pmap, vaddr_t va, vm_page_t pg, u_int *npte)
! 1224: {
! 1225: pv_entry_t pv, npv;
! 1226: int s;
! 1227:
! 1228: pv = pg_to_pvh(pg);
! 1229:
! 1230: s = splvm();
! 1231: if (pv->pv_pmap == NULL) {
! 1232: /*
! 1233: * No entries yet, use header as the first entry
! 1234: */
! 1235:
! 1236: DPRINTF(PDB_PVENTRY,
! 1237: ("pmap_enter: first pv: pmap %p va %p pa %p\n",
! 1238: pmap, va, VM_PAGE_TO_PHYS(pg)));
! 1239:
! 1240: stat_count(enter_stats.firstpv);
! 1241:
! 1242: pv->pv_va = va;
! 1243: atomic_setbits_int(&pg->pg_flags, PV_CACHED);
! 1244: pv->pv_pmap = pmap;
! 1245: pv->pv_next = NULL;
! 1246: } else {
! 1247: if (pg->pg_flags & PV_UNCACHED) {
! 1248: /*
! 1249: * If page is mapped uncached it's either because
! 1250: * an uncached mapping was requested or we have a
! 1251: * VAC situation. Map this page uncached as well.
! 1252: */
! 1253: *npte = (*npte & ~PG_CACHEMODE) | PG_UNCACHED;
! 1254: } else if (CpuCacheAliasMask != 0) {
! 1255: /*
! 1256: * We have a VAC possibility. Check if virtual
! 1257: * address of current mappings are compatible
! 1258: * with this new mapping. Only need to check first
! 1259: * since all others have been checked compatible
! 1260: * when added. If they are incompatible, remove
! 1261: * all mappings, flush the cache and set page
! 1262: * to be mapped uncached.
! 1263: */
! 1264: if (((pv->pv_va ^ va) & CpuCacheAliasMask) != 0) {
! 1265: #ifdef PMAP_DEBUG
! 1266: printf("pmap_enter: VAC for pa %p, %p != %p\n",
! 1267: VM_PAGE_TO_PHYS(pg), npv->pv_va, va);
! 1268: #endif
! 1269: pmap_page_cache(pg, PV_UNCACHED);
! 1270: Mips_SyncDCachePage(pv->pv_va);
! 1271: *npte = (*npte & ~PG_CACHEMODE) | PG_UNCACHED;
! 1272: }
! 1273: }
! 1274:
! 1275: /*
! 1276: * There is at least one other VA mapping this page.
! 1277: * Place this entry after the header.
! 1278: *
! 1279: * Note: the entry may already be in the table if
! 1280: * we are only changing the protection bits.
! 1281: */
! 1282: for (npv = pv; npv; npv = npv->pv_next) {
! 1283: if (pmap == npv->pv_pmap && va == npv->pv_va) {
! 1284: return 0;
! 1285: }
! 1286: }
! 1287:
! 1288: DPRINTF(PDB_PVENTRY,
! 1289: ("pmap_enter: new pv: pmap %x va %x pg %p\n",
! 1290: pmap, va, VM_PAGE_TO_PHYS(pg)));
! 1291:
! 1292: npv = pmap_pv_alloc();
! 1293: if (npv == NULL) {
! 1294: splx(s);
! 1295: return ENOMEM;
! 1296: }
! 1297: npv->pv_va = va;
! 1298: npv->pv_pmap = pmap;
! 1299: npv->pv_next = pv->pv_next;
! 1300: pv->pv_next = npv;
! 1301:
! 1302: if (!npv->pv_next)
! 1303: stat_count(enter_stats.secondpv);
! 1304: }
! 1305:
! 1306: splx(s);
! 1307: return 0;
! 1308: }
! 1309:
! 1310: /*
! 1311: * Remove a physical to virtual address translation from the PV table.
! 1312: */
! 1313: void
! 1314: pmap_remove_pv(pmap_t pmap, vaddr_t va, paddr_t pa)
! 1315: {
! 1316: pv_entry_t pv, npv;
! 1317: vm_page_t pg;
! 1318: int s;
! 1319:
! 1320: DPRINTF(PDB_FOLLOW|PDB_PVENTRY,
! 1321: ("pmap_remove_pv(%p, %p, %p)\n", pmap, va, pa));
! 1322:
! 1323: /*
! 1324: * Remove page from the PV table
! 1325: */
! 1326: pg = PHYS_TO_VM_PAGE(pa);
! 1327: if (pg == NULL)
! 1328: return;
! 1329:
! 1330: pv = pg_to_pvh(pg);
! 1331: s = splvm();
! 1332: /*
! 1333: * If we are removing the first entry on the list, copy up
! 1334: * the next entry, if any, and free that pv item since the
! 1335: * first root item can't be freed. Else walk the list.
! 1336: */
! 1337: if (pmap == pv->pv_pmap && va == pv->pv_va) {
! 1338: npv = pv->pv_next;
! 1339: if (npv) {
! 1340: *pv = *npv;
! 1341: pmap_pv_free(npv);
! 1342: } else {
! 1343: pv->pv_pmap = NULL;
! 1344: atomic_clearbits_int(&pg->pg_flags,
! 1345: (PG_PMAP0 | PG_PMAP1 | PG_PMAP2 | PG_PMAP3) &
! 1346: ~PV_PRESERVE);
! 1347: Mips_SyncDCachePage(va);
! 1348: }
! 1349: stat_count(remove_stats.pvfirst);
! 1350: } else {
! 1351: for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
! 1352: stat_count(remove_stats.pvsearch);
! 1353: if (pmap == npv->pv_pmap && va == npv->pv_va)
! 1354: break;
! 1355: }
! 1356: if (npv != NULL) {
! 1357: pv->pv_next = npv->pv_next;
! 1358: pmap_pv_free(npv);
! 1359: } else {
! 1360: #ifdef DIAGNOSTIC
! 1361: panic("pmap_remove_pv(%x, %x, %x) not found",
! 1362: pmap, va, pa);
! 1363: #endif
! 1364: }
! 1365: }
! 1366: splx(s);
! 1367: }
! 1368:
! 1369: /*==================================================================*/
! 1370: /* Bus space map utility functions */
! 1371:
! 1372: int
! 1373: bus_mem_add_mapping(bus_addr_t bpa, bus_size_t size, int cacheable,
! 1374: bus_space_handle_t *bshp)
! 1375: {
! 1376: bus_addr_t vaddr;
! 1377: bus_addr_t spa, epa;
! 1378: bus_size_t off;
! 1379: int len;
! 1380:
! 1381: spa = trunc_page(bpa);
! 1382: epa = bpa + size;
! 1383: off = bpa - spa;
! 1384: len = size+off;
! 1385:
! 1386: vaddr = uvm_km_valloc_wait(kernel_map, len);
! 1387: *bshp = vaddr + off;
! 1388: #ifdef DEBUG_BUS_MEM_ADD_MAPPING
! 1389: printf("map bus %x size %x to %x vbase %x\n", bpa, size, *bshp, spa);
! 1390: #endif
! 1391: for (; len > 0; len -= NBPG) {
! 1392: pt_entry_t *pte;
! 1393: u_int npte;
! 1394:
! 1395: npte = vad_to_pfn(spa) | PG_G;
! 1396: npte |= PG_V | PG_M | PG_IOPAGE;
! 1397: pte = kvtopte(vaddr);
! 1398: pte->pt_entry = npte;
! 1399: tlb_update(vaddr, npte);
! 1400:
! 1401: spa += NBPG;
! 1402: vaddr += NBPG;
! 1403: }
! 1404: return 0;
! 1405: }
CVSweb