Annotation of sys/arch/solbourne/solbourne/pmap.c, Revision 1.1
1.1 ! nbrk 1: /* $OpenBSD: pmap.c,v 1.1 2005/04/19 21:30:18 miod Exp $ */
! 2: /*
! 3: * Copyright (c) 2005, Miodrag Vallat
! 4: *
! 5: * Redistribution and use in source and binary forms, with or without
! 6: * modification, are permitted provided that the following conditions
! 7: * are met:
! 8: * 1. Redistributions of source code must retain the above copyright
! 9: * notice, this list of conditions and the following disclaimer.
! 10: * 2. Redistributions in binary form must reproduce the above copyright
! 11: * notice, this list of conditions and the following disclaimer in the
! 12: * documentation and/or other materials provided with the distribution.
! 13: *
! 14: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
! 15: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
! 16: * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
! 17: * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
! 18: * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
! 19: * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
! 20: * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
! 21: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
! 22: * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
! 23: * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
! 24: * POSSIBILITY OF SUCH DAMAGE.
! 25: */
! 26:
! 27: /*
! 28: * KAP physical memory management code.
! 29: */
! 30:
! 31: #include <sys/param.h>
! 32: #include <sys/systm.h>
! 33: #include <sys/lock.h>
! 34: #include <sys/malloc.h>
! 35: #include <sys/pool.h>
! 36: #include <sys/proc.h>
! 37:
! 38: #include <uvm/uvm.h>
! 39:
! 40: #include <machine/idt.h>
! 41: #include <machine/kap.h>
! 42: #include <machine/prom.h>
! 43:
! 44: #include <sparc/sparc/asm.h>
! 45: #include <sparc/sparc/cache.h>
! 46: #include <sparc/sparc/cpuvar.h>
! 47:
! 48: #include <sparc/dev/if_lereg.h>
! 49:
! 50: #ifdef PMAPDEBUG
! 51: #define PDB_ACTIVATE 0x000001
! 52: #define PDB_CLEAR_M 0x000002
! 53: #define PDB_CLEAR_U 0x000004
! 54: #define PDB_COLLECT 0x000008
! 55: #define PDB_COPY 0x000010
! 56: #define PDB_CREATE 0x000020
! 57: #define PDB_DESTROY 0x000040
! 58: #define PDB_ENTER 0x000080
! 59: #define PDB_EXTRACT 0x000100
! 60: #define PDB_IS_M 0x000200
! 61: #define PDB_IS_U 0x000400
! 62: #define PDB_KENTER 0x000800
! 63: #define PDB_KREMOVE 0x001000
! 64: #define PDB_PROTECT 0x002000
! 65: #define PDB_REFERENCE 0x004000
! 66: #define PDB_RELEASE 0x008000
! 67: #define PDB_REMOVE 0x010000
! 68: #define PDB_UNWIRE 0x020000
! 69: #define PDB_ZERO 0x040000
! 70:
! 71: #define DPRINTF(flg,stmt) \
! 72: do { \
! 73: if (pmapdebug & (flg)) \
! 74: printf stmt; \
! 75: } while (0)
! 76:
! 77: u_int _pmapdebug_cold = 0;
! 78: u_int _pmapdebug = -1;
! 79: #define pmapdebug ((cold) ? _pmapdebug_cold : _pmapdebug)
! 80: #else
! 81: #define DPRINTF(flg,stmt) do { } while (0)
! 82: #endif
! 83:
! 84: /* pmap and pde/pte pool allocators */
! 85: struct pool pmappool, pvpool;
! 86:
! 87: struct pmap kernel_pmap_store;
! 88:
! 89: pt_entry_t *pmap_grow_pte(struct pmap *, vaddr_t);
! 90: static pd_entry_t *pmap_pde(pmap_t, vaddr_t);
! 91: static pt_entry_t *pde_pte(pd_entry_t *, vaddr_t);
! 92: pt_entry_t *pmap_pte(pmap_t, vaddr_t);
! 93:
! 94: void pg_flushcache(struct vm_page *);
! 95:
! 96: void tlb_flush(vaddr_t);
! 97: void tlb_flush_all(void);
! 98:
! 99: vaddr_t virtual_avail;
! 100: vaddr_t virtual_end;
! 101:
! 102: vaddr_t vreserve; /* two reserved pages for copy and zero operations... */
! 103: pt_entry_t *ptereserve; /* ...and their PTEs */
! 104:
! 105: vaddr_t lance_va; /* a fixed buffer for the on-board lance */
! 106:
! 107: /*
! 108: * Attribute caching
! 109: */
! 110:
! 111: typedef struct pvlist *pv_entry_t;
! 112:
! 113: static pv_entry_t pg_to_pvl(struct vm_page *);
! 114:
! 115: static __inline__
! 116: pv_entry_t
! 117: pg_to_pvl(struct vm_page *pg)
! 118: {
! 119: return (&pg->mdpage.pv_head);
! 120: }
! 121:
! 122: /*
! 123: * TLB operations
! 124: */
! 125:
! 126: void
! 127: tlb_flush(vaddr_t va)
! 128: {
! 129: #if 0
! 130: u_int32_t fvar;
! 131:
! 132: fvar = lda(0, ASI_FVAR);
! 133: #endif
! 134:
! 135: sta(0, ASI_PID, 0);
! 136: sta(0, ASI_FVAR, va);
! 137: sta(0, ASI_GTLB_INVAL_ENTRY, 0);
! 138: #if 0
! 139: sta(0, ASI_FVAR, fvar);
! 140: #endif
! 141: }
! 142:
! 143: void
! 144: tlb_flush_all()
! 145: {
! 146: /*
! 147: * Note that loaded TLB for PTEs with PG_G do NOT get invalidated
! 148: * by this command (because they are common to all PID), and need
! 149: * to be invalidated with ASI_GTLB_INVAL_ENTRY.
! 150: * This does not matter to us, as we don't use PG_G for now.
! 151: */
! 152: sta(0, ASI_PID, 0);
! 153: sta(0, ASI_GTLB_INVALIDATE, 0);
! 154: }
! 155:
! 156: /*
! 157: * Simple pde and pte access routines.
! 158: */
! 159:
! 160: #define trunc_seg(va) ((va) & PDT_INDEX_MASK)
! 161:
! 162: static __inline__
! 163: pd_entry_t *
! 164: pmap_pde(pmap_t pmap, vaddr_t va)
! 165: {
! 166: return (&pmap->pm_segtab[va >> PDT_INDEX_SHIFT]);
! 167: }
! 168:
! 169: static __inline__
! 170: pt_entry_t *
! 171: pde_pte(pd_entry_t *pde, vaddr_t va)
! 172: {
! 173: pt_entry_t *pte;
! 174:
! 175: pte = (pt_entry_t *)pde->pde_va;
! 176: pte += (va & PT_INDEX_MASK) >> PT_INDEX_SHIFT;
! 177:
! 178: return (pte);
! 179: }
! 180:
! 181: pt_entry_t *
! 182: pmap_pte(pmap_t pmap, vaddr_t va)
! 183: {
! 184: pd_entry_t *pde;
! 185:
! 186: pde = pmap_pde(pmap, va);
! 187: if (pde->pde_va == NULL)
! 188: return (NULL);
! 189:
! 190: return (pde_pte(pde, va));
! 191: }
! 192:
! 193: /*
! 194: * Setup virtual memory for the kernel. The new tables are not activated yet,
! 195: * they will be in locore.s after bootstrap() returns.
! 196: */
! 197: void
! 198: pmap_bootstrap(size_t parmdata)
! 199: {
! 200: extern caddr_t end;
! 201: extern vaddr_t esym;
! 202: u_int32_t icuconf;
! 203: u_int8_t imcmcr;
! 204: vaddr_t ekern;
! 205: vaddr_t va, eva;
! 206: paddr_t pa;
! 207: unsigned int tabidx;
! 208: pd_entry_t *pde;
! 209: pt_entry_t *pte;
! 210: struct sb_prom *sp;
! 211: paddr_t prompa;
! 212: psize_t promlen;
! 213: extern vaddr_t prom_data;
! 214:
! 215: /*
! 216: * Compute memory size by checking the iCU for the number of iMC,
! 217: * then each iMC for its status.
! 218: */
! 219:
! 220: icuconf = lda(ICU_CONF, ASI_PHYS_IO);
! 221: physmem = 0;
! 222:
! 223: #if 0
! 224: imcmcr = lduba(MC0_MCR, ASI_PHYS_IO);
! 225: #else
! 226: imcmcr = *(u_int8_t *)MC0_MCR;
! 227: #endif
! 228: if (imcmcr & MCR_BANK0_AVAIL)
! 229: physmem += (imcmcr & MCR_BANK0_32M) ? 32 : 8;
! 230: if (imcmcr & MCR_BANK1_AVAIL)
! 231: physmem += (imcmcr & MCR_BANK1_32M) ? 32 : 8;
! 232:
! 233: if ((icuconf & CONF_NO_EXTRA_MEMORY) == 0) {
! 234: #if 0
! 235: imcmcr = lduba(MC1_MCR, ASI_PHYS_IO);
! 236: #else
! 237: imcmcr = *(u_int8_t *)MC1_MCR;
! 238: #endif
! 239: if (imcmcr & MCR_BANK0_AVAIL)
! 240: physmem += (imcmcr & MCR_BANK0_32M) ? 32 : 8;
! 241: if (imcmcr & MCR_BANK1_AVAIL)
! 242: physmem += (imcmcr & MCR_BANK1_32M) ? 32 : 8;
! 243: }
! 244:
! 245: /* scale to pages */
! 246: physmem <<= (20 - PAGE_SHIFT);
! 247:
! 248: /*
! 249: * Get a grip on the PROM communication area.
! 250: */
! 251: sp = (struct sb_prom *)PROM_DATA_VA;
! 252:
! 253: /*
! 254: * Set virtual page size.
! 255: */
! 256: uvmexp.pagesize = PAGE_SIZE;
! 257: uvm_setpagesize();
! 258:
! 259: /*
! 260: * Initialize kernel pmap.
! 261: */
! 262: simple_lock_init(&pmap_kernel()->pm_lock);
! 263: pmap_kernel()->pm_refcount = 1;
! 264:
! 265: /*
! 266: * Compute kernel fixed memory usage.
! 267: */
! 268: ekern = (vaddr_t)&end;
! 269: #if defined(DDB) || NKSYMS > 0
! 270: if (esym != 0)
! 271: ekern = esym;
! 272: #endif
! 273:
! 274: /*
! 275: * Reserve room for the parameter data we're interested in.
! 276: */
! 277: prom_data = ekern;
! 278: ekern += parmdata;
! 279:
! 280: /*
! 281: * From then on, all allocations will be multiples of the
! 282: * page size.
! 283: */
! 284: ekern = round_page(ekern);
! 285:
! 286: /*
! 287: * Reserve buffers for the on-board Lance chip - the whole buffer
! 288: * must be in the same 128KB segment.
! 289: * This should disappear once iCU is tamed...
! 290: */
! 291: if ((ekern >> 17) != ((ekern + MEMSIZE) >> 17))
! 292: ekern = roundup(ekern, 1 << 17);
! 293: lance_va = ekern;
! 294: ekern += MEMSIZE;
! 295:
! 296: /*
! 297: * Initialize fixed mappings.
! 298: * We want to keep the PTW mapping the kernel for now, but all
! 299: * devices needed during early bootstrap needs to have their own
! 300: * mappings.
! 301: */
! 302:
! 303: /*
! 304: * Step 1: reserve memory for the kernel pde.
! 305: */
! 306:
! 307: bzero((caddr_t)ekern, PDT_SIZE);
! 308: pmap_kernel()->pm_segtab = (pd_entry_t *)ekern;
! 309: pmap_kernel()->pm_psegtab = PTW1_TO_PHYS(ekern);
! 310: ekern += PDT_SIZE; /* not rounded anymore ! */
! 311:
! 312: /*
! 313: * Step 2: create as many pages tables as necessary.
! 314: * We'll provide page tables for the kernel virtual memory range
! 315: * and the top of memory (i.e. PTW1, PTW2 and I/O maps), so that
! 316: * we can invoke mapdev() early in the boot process.
! 317: *
! 318: * For the early console, we will also provide an 1:1 mapping
! 319: * of the I/O space.
! 320: */
! 321:
! 322: tabidx = 0;
! 323:
! 324: va = VM_MIN_KERNEL_ADDRESS;
! 325: while (va != 0) {
! 326: pde = pmap_pde(pmap_kernel(), va);
! 327:
! 328: pde->pde_va = (pt_entry_t *)(ekern + tabidx * PT_SIZE);
! 329: pde->pde_pa = PTW1_TO_PHYS((vaddr_t)pde->pde_va);
! 330:
! 331: tabidx++;
! 332: va += NBSEG;
! 333: }
! 334:
! 335: va = (vaddr_t)OBIO_PA_START;
! 336: while (va < (vaddr_t)OBIO_PA_END) {
! 337: pde = pmap_pde(pmap_kernel(), va);
! 338:
! 339: pde->pde_va = (pt_entry_t *)(ekern + tabidx * PT_SIZE);
! 340: pde->pde_pa = PTW1_TO_PHYS((vaddr_t)pde->pde_va);
! 341:
! 342: tabidx++;
! 343: va += NBSEG;
! 344: }
! 345:
! 346: va = IOSPACE_BASE;
! 347: while (va < IOSPACE_BASE + IOSPACE_LEN) {
! 348: pde = pmap_pde(pmap_kernel(), va);
! 349:
! 350: pde->pde_va = (pt_entry_t *)(ekern + tabidx * PT_SIZE);
! 351: pde->pde_pa = PTW1_TO_PHYS((vaddr_t)pde->pde_va);
! 352:
! 353: tabidx++;
! 354: /* watch out for wraparound! */
! 355: if ((va += NBSEG) == 0)
! 356: break;
! 357: }
! 358:
! 359: bzero((caddr_t)ekern, tabidx * PT_SIZE);
! 360: ekern += tabidx * PT_SIZE;
! 361: ekern = round_page(ekern);
! 362:
! 363: /*
! 364: * Step 3: fill them. We fill the page tables backing PTW1 and
! 365: * PTW2 to simplify pmap_extract(), by not having to check if
! 366: * the va is in a PTW.
! 367: */
! 368:
! 369: va = PTW1_BASE;
! 370: pa = PHYSMEM_BASE;
! 371: while (va < PTW1_BASE + PTW_WINDOW_SIZE) {
! 372: pde = pmap_pde(pmap_kernel(), va);
! 373: eva = trunc_seg(va) + NBSEG;
! 374: if (eva > PTW1_BASE + PTW_WINDOW_SIZE)
! 375: eva = PTW1_BASE + PTW_WINDOW_SIZE;
! 376: pte = pde_pte(pde, va);
! 377: while (va < eva) {
! 378: *pte++ = pa | PG_V | PG_S | PG_U | PG_CACHE;
! 379: va += PAGE_SIZE;
! 380: pa += PAGE_SIZE;
! 381: }
! 382: }
! 383:
! 384: va = PTW2_BASE;
! 385: pa = PHYSMEM_BASE;
! 386: while (va < PTW2_BASE + PTW_WINDOW_SIZE) {
! 387: pde = pmap_pde(pmap_kernel(), va);
! 388: eva = trunc_seg(va) + NBSEG;
! 389: if (eva > PTW2_BASE + PTW_WINDOW_SIZE)
! 390: eva = PTW2_BASE + PTW_WINDOW_SIZE;
! 391: pte = pde_pte(pde, va);
! 392: while (va < eva) {
! 393: *pte++ = pa | PG_V | PG_S | PG_U | PG_SHARED;
! 394: va += PAGE_SIZE;
! 395: pa += PAGE_SIZE;
! 396: }
! 397: }
! 398:
! 399: va = (vaddr_t)OBIO_PA_START;
! 400: while (va < (vaddr_t)OBIO_PA_END) {
! 401: pde = pmap_pde(pmap_kernel(), va);
! 402: eva = trunc_seg(va) + NBSEG;
! 403: if (eva > OBIO_PA_END)
! 404: eva = OBIO_PA_END;
! 405: pte = pde_pte(pde, va);
! 406: for (; va < eva; va += PAGE_SIZE)
! 407: *pte++ = va | PG_V | PG_S | PG_U | PG_IO;
! 408: }
! 409:
! 410: /*
! 411: * Compute the virtual memory space.
! 412: * Note that the kernel is mapped by PTW1 and PTW2, and is outside
! 413: * this range.
! 414: */
! 415:
! 416: virtual_avail = VM_MIN_KERNEL_ADDRESS;
! 417: virtual_end = VM_MAX_KERNEL_ADDRESS;
! 418:
! 419: /*
! 420: * Reserve two _virtual_ pages for copy and zero operations.
! 421: * Since we need to be able to tweak their PTE, they need to be
! 422: * outside PTW1 and PTW2. We'll steal them from the top of the
! 423: * virtual space; thus we are sure they will be in the same
! 424: * segment as well.
! 425: */
! 426:
! 427: virtual_end -= 2* PAGE_SIZE;
! 428: vreserve = virtual_end;
! 429: ptereserve = pmap_pte(pmap_kernel(), vreserve);
! 430:
! 431: /*
! 432: * Tell the VM system about the available memory.
! 433: * Physical memory starts at PHYSMEM_BASE; kernel uses space
! 434: * from PTW1_TO_PHYS(KERNBASE) to ekern at this point.
! 435: *
! 436: * The physical memory below the kernel is reserved for the PROM
! 437: * data and bss, and need to be left intact when invoking it, so
! 438: * we do not upload (manage) it.
! 439: *
! 440: * The PROM communication area may claim another area, way above
! 441: * the kernel (usually less than 200 KB, immediately under 8MB
! 442: * physical).
! 443: */
! 444:
! 445: if (sp->sp_interface >= PROM_INTERFACE) {
! 446: prompa = atop(PHYSMEM_BASE) + sp->sp_reserve_start;
! 447: promlen = sp->sp_reserve_len;
! 448: } else
! 449: promlen = 0;
! 450:
! 451: if (promlen != 0) {
! 452: #ifdef DIAGNOSTIC
! 453: if (PTW1_TO_PHYS(ekern) > ptoa(prompa))
! 454: panic("kernel overlaps PROM reserved area");
! 455: #endif
! 456: uvm_page_physload(
! 457: atop(PTW1_TO_PHYS(ekern)), prompa,
! 458: atop(PTW1_TO_PHYS(ekern)), prompa, VM_FREELIST_DEFAULT);
! 459: uvm_page_physload(
! 460: prompa + promlen, atop(PHYSMEM_BASE) + physmem,
! 461: prompa + promlen, atop(PHYSMEM_BASE) + physmem,
! 462: VM_FREELIST_DEFAULT);
! 463: } else {
! 464: uvm_page_physload(
! 465: atop(PTW1_TO_PHYS(ekern)), atop(PHYSMEM_BASE) + physmem,
! 466: atop(PTW1_TO_PHYS(ekern)), atop(PHYSMEM_BASE) + physmem,
! 467: VM_FREELIST_DEFAULT);
! 468: }
! 469: }
! 470:
! 471: /*
! 472: * Return the virtual area range available to the kernel.
! 473: */
! 474: void
! 475: pmap_virtual_space(vaddr_t *v_start, vaddr_t *v_end)
! 476: {
! 477: *v_start = virtual_avail;
! 478: *v_end = virtual_end;
! 479: }
! 480:
! 481: /*
! 482: * Secondary initialization, at uvm_init() time.
! 483: * We can now create the pools we'll use for pmap and pvlist allocations.
! 484: */
! 485: void
! 486: pmap_init()
! 487: {
! 488: pool_init(&pmappool, sizeof(struct pmap), 0, 0, 0, "pmappl",
! 489: &pool_allocator_nointr);
! 490: pool_init(&pvpool, sizeof(struct pvlist), 0, 0, 0, "pvpl", NULL);
! 491: }
! 492:
! 493: /*
! 494: * Create a new pmap.
! 495: *
! 496: * We initialize pmaps with an empty pde, and a shadow of the kernel
! 497: * space (VM_MIN_KERNEL_ADDRESS onwards).
! 498: */
! 499: pmap_t
! 500: pmap_create()
! 501: {
! 502: pmap_t pmap;
! 503: u_int pde;
! 504:
! 505: DPRINTF(PDB_CREATE, ("pmap_create()"));
! 506:
! 507: pmap = pool_get(&pmappool, PR_WAITOK);
! 508:
! 509: bzero(pmap, sizeof(*pmap));
! 510: pmap->pm_refcount = 1;
! 511: simple_lock_init(&pmap->pm_lock);
! 512:
! 513: /*
! 514: * Allocate the page directory.
! 515: */
! 516: pmap->pm_segtab = (pd_entry_t *)uvm_km_zalloc(kernel_map, PDT_SIZE);
! 517: if (pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_segtab,
! 518: &pmap->pm_psegtab) == FALSE)
! 519: panic("pmap_create: pmap_extract failed!");
! 520:
! 521: /*
! 522: * Shadow the kernel map in all user pmaps.
! 523: */
! 524: for (pde = (VM_MIN_KERNEL_ADDRESS >> PDT_INDEX_SHIFT);
! 525: pde < NBR_PDE; pde++) {
! 526: pmap->pm_segtab[pde].pde_pa =
! 527: pmap_kernel()->pm_segtab[pde].pde_pa;
! 528: pmap->pm_segtab[pde].pde_va =
! 529: pmap_kernel()->pm_segtab[pde].pde_va;
! 530: }
! 531:
! 532: DPRINTF(PDB_CREATE, (" -> %p\n", pmap));
! 533:
! 534: return (pmap);
! 535: }
! 536:
! 537: /*
! 538: * Destroy a pmap.
! 539: * Its mappings will not actually be removed until the reference count
! 540: * drops to zero.
! 541: */
! 542: void
! 543: pmap_destroy(struct pmap *pmap)
! 544: {
! 545: int count;
! 546:
! 547: DPRINTF(PDB_DESTROY, ("pmap_destroy(%p)\n", pmap));
! 548:
! 549: simple_lock(&pmap->pm_lock);
! 550: count = --pmap->pm_refcount;
! 551: simple_unlock(&pmap->pm_lock);
! 552: if (count == 0) {
! 553: pmap_release(pmap);
! 554: pool_put(&pmappool, pmap);
! 555: }
! 556: }
! 557:
! 558: /*
! 559: * Release all mappings and resources associated to a given pmap.
! 560: */
! 561: void
! 562: pmap_release(struct pmap *pmap)
! 563: {
! 564: u_int pde;
! 565: pt_entry_t *pdeva;
! 566: #ifdef DIAGNOSTIC
! 567: u_int pte;
! 568: #endif
! 569:
! 570: DPRINTF(PDB_RELEASE, ("pmap_release(%p)\n", pmap));
! 571:
! 572: /*
! 573: * Free all page tables.
! 574: */
! 575: for (pde = 0; pde < (VM_MIN_KERNEL_ADDRESS >> PDT_INDEX_SHIFT); pde++) {
! 576: if ((pdeva = pmap->pm_segtab[pde].pde_va) != NULL) {
! 577: #ifdef DIAGNOSTIC
! 578: for (pte = 0; pte < NBR_PTE; pte++)
! 579: if (pdeva[pte] & PG_V) {
! 580: DPRINTF(PDB_RELEASE,
! 581: ("pmap_release: unreleased pte "
! 582: "%p (%08x)\n",
! 583: pdeva + pte, pdeva[pte]));
! 584: }
! 585: #endif
! 586: uvm_km_free(kernel_map, (vaddr_t)pdeva, PT_SIZE);
! 587: }
! 588: }
! 589:
! 590: /*
! 591: * Free the page directory.
! 592: */
! 593: uvm_km_free(kernel_map, (vaddr_t)pmap->pm_segtab, PDT_SIZE);
! 594: }
! 595:
! 596: /*
! 597: * Returns a preferred virtual address for the given address, which
! 598: * does not cause a VAC aliasing situation.
! 599: */
! 600: void
! 601: pmap_prefer(vaddr_t foff, vaddr_t *vap)
! 602: {
! 603: /* XXX assume no cache aliasing yet */
! 604: }
! 605:
! 606: /*
! 607: * Activate the pmap associated to a given process.
! 608: * Called from the scheduler.
! 609: */
! 610: void
! 611: pmap_activate(struct proc *p)
! 612: {
! 613: pmap_t pmap = p->p_vmspace->vm_map.pmap;
! 614: int s;
! 615:
! 616: DPRINTF(PDB_ACTIVATE,
! 617: ("pmap_activate(%p/pmap %p/segtab pa %08x va %08x)\n",
! 618: p, pmap, pmap->pm_psegtab, (vaddr_t)pmap->pm_segtab));
! 619:
! 620: s = splvm();
! 621:
! 622: if (p == curproc) {
! 623: write_user_windows();
! 624: cache_flush_context();
! 625: sta(0, ASI_PID, 0);
! 626: sta(0, ASI_PDBR, pmap->pm_psegtab);
! 627: tlb_flush_all();
! 628: }
! 629:
! 630: splx(s);
! 631: }
! 632:
! 633: /*
! 634: * Increment the pmap reference counter.
! 635: */
! 636: void
! 637: pmap_reference(struct pmap *pmap)
! 638: {
! 639: DPRINTF(PDB_REFERENCE, ("pmap_reference(%p)\n", pmap));
! 640:
! 641: simple_lock(&pmap->pm_lock);
! 642: pmap->pm_refcount++;
! 643: simple_unlock(&pmap->pm_lock);
! 644: }
! 645:
! 646: /*
! 647: * Remove a range of virtual addresses from the given pmap.
! 648: * Addresses are expected to be page-aligned.
! 649: */
! 650: void
! 651: pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t e)
! 652: {
! 653: vaddr_t va, eva;
! 654: paddr_t pa;
! 655: pd_entry_t *pde;
! 656: pt_entry_t *pte, opte;
! 657: struct vm_page *pg;
! 658: struct pvlist *pvl, *prev, *cur;
! 659: int s;
! 660:
! 661: s = splvm();
! 662:
! 663: DPRINTF(PDB_REMOVE, ("pmap_remove(%p,%08x,%08x)\n", pmap, sva, e));
! 664:
! 665: va = sva;
! 666: while (va != e) {
! 667: pde = pmap_pde(pmap, va);
! 668: eva = trunc_seg(va) + NBSEG;
! 669: if (eva > e || eva == 0)
! 670: eva = e;
! 671:
! 672: if (pde == NULL) {
! 673: va = eva;
! 674: continue;
! 675: }
! 676:
! 677: pte = pde_pte(pde, va);
! 678: for (; va != eva; va += PAGE_SIZE, pte++) {
! 679: opte = *pte;
! 680: if ((opte & PG_V) == 0)
! 681: continue;
! 682:
! 683: pmap->pm_stats.resident_count--;
! 684:
! 685: pa = opte & PG_FRAME;
! 686:
! 687: #ifdef DIAGNOSTIC
! 688: if (opte & PG_W) {
! 689: printf("pmap_remove(%p): wired mapping for %08x",
! 690: pmap, va);
! 691: pmap->pm_stats.wired_count--;
! 692: }
! 693: #endif
! 694:
! 695: *pte = PG_NV;
! 696: tlb_flush(va);
! 697:
! 698: pg = PHYS_TO_VM_PAGE(pa);
! 699: if (pg == NULL)
! 700: continue;
! 701:
! 702: /*
! 703: * Remove the mapping from the pvlist for this
! 704: * physical page.
! 705: */
! 706: pvl = pg_to_pvl(pg);
! 707: #ifdef DIAGNOSTIC
! 708: if (pvl->pv_pmap == NULL)
! 709: panic("pmap_remove: NULL pmap in pvlist");
! 710: #endif
! 711: prev = NULL;
! 712: for (cur = pvl; cur != NULL; cur = cur->pv_next) {
! 713: if (cur->pv_va == va && cur->pv_pmap == pmap)
! 714: break;
! 715: prev = cur;
! 716: }
! 717: #ifdef DIAGNOSTIC
! 718: if (cur == NULL) {
! 719: panic("pmap_remove: va not in pvlist");
! 720: }
! 721: #endif
! 722: if (prev == NULL) {
! 723: cur = cur->pv_next;
! 724: if (cur != NULL) {
! 725: cur->pv_flags = pvl->pv_flags;
! 726: *pvl = *cur;
! 727: pool_put(&pvpool, cur);
! 728: } else {
! 729: pvl->pv_pmap = NULL;
! 730: }
! 731: } else {
! 732: prev->pv_next = cur->pv_next;
! 733: pool_put(&pvpool, cur);
! 734: }
! 735:
! 736: /* update saved attributes for managed page */
! 737: pvl->pv_flags |= (opte & (PG_U | PG_M));
! 738: }
! 739: }
! 740:
! 741: splx(s);
! 742: }
! 743:
! 744: /*
! 745: * Release any unnecessary management resources for the given pmap,
! 746: * before swapping it out.
! 747: */
! 748: void
! 749: pmap_collect(struct pmap *pmap)
! 750: {
! 751: u_int pde, pte;
! 752: pt_entry_t *pdeva;
! 753: int s;
! 754:
! 755: s = splvm();
! 756:
! 757: DPRINTF(PDB_COLLECT, ("pmap_collect(%p)\n"));
! 758:
! 759: /*
! 760: * Free all empty page tables.
! 761: */
! 762: for (pde = 0; pde < (VM_MIN_KERNEL_ADDRESS >> PDT_INDEX_SHIFT); pde++) {
! 763: if ((pdeva = pmap->pm_segtab[pde].pde_va) == NULL)
! 764: continue;
! 765: for (pte = 0; pte < NBR_PTE; pte++)
! 766: if (pdeva[pte] & PG_V)
! 767: break;
! 768: if (pte != NBR_PTE)
! 769: continue;
! 770:
! 771: /*
! 772: * Free the unused page table.
! 773: */
! 774: pmap->pm_segtab[pde].pde_va = NULL;
! 775: pmap->pm_segtab[pde].pde_pa = 0;
! 776: uvm_km_free(kernel_map, (vaddr_t)pdeva, PT_SIZE);
! 777: }
! 778:
! 779: splx(s);
! 780: }
! 781:
! 782: /*
! 783: * Change the protection for a given vm_page. The protection can only
! 784: * become more strict, i.e. protection rights get removed.
! 785: *
! 786: * Note that this pmap does not manage execution protection yet.
! 787: */
! 788: void
! 789: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
! 790: {
! 791: struct pvlist *pvl;
! 792: int s;
! 793:
! 794: if ((prot & VM_PROT_READ) == VM_PROT_NONE) { /* remove all */
! 795: s = splvm();
! 796: pvl = pg_to_pvl(pg);
! 797:
! 798: DPRINTF(PDB_REMOVE, ("pmap_page_protect(%p/pmap %p,%x)\n",
! 799: pg, pvl->pv_pmap, prot));
! 800:
! 801: while (pvl->pv_pmap != NULL) {
! 802: pmap_remove(pvl->pv_pmap, pvl->pv_va,
! 803: pvl->pv_va + PAGE_SIZE);
! 804: }
! 805:
! 806: splx(s);
! 807: } else if ((prot & VM_PROT_WRITE) == VM_PROT_NONE) {
! 808: s = splvm();
! 809: pvl = pg_to_pvl(pg);
! 810:
! 811: DPRINTF(PDB_REMOVE, ("pmap_page_protect(%p/pmap %p,%x)\n",
! 812: pg, pvl->pv_pmap, prot));
! 813:
! 814: if (pvl->pv_pmap != NULL)
! 815: for (; pvl != NULL; pvl = pvl->pv_next)
! 816: pmap_protect(pvl->pv_pmap, pvl->pv_va,
! 817: pvl->pv_va + PAGE_SIZE, prot);
! 818:
! 819: splx(s);
! 820: } else {
! 821: DPRINTF(PDB_REMOVE, ("pmap_page_protect(%p,%x)\n", pg, prot));
! 822: }
! 823: }
! 824:
! 825: /*
! 826: * Set the protection for a virtual address range in the given pmap.
! 827: *
! 828: * Note that this pmap does not manage execution protection yet.
! 829: */
! 830: void
! 831: pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t e, vm_prot_t prot)
! 832: {
! 833: vaddr_t va, eva;
! 834: pd_entry_t *pde;
! 835: pt_entry_t *pte, opte, npte;
! 836: int s;
! 837:
! 838: s = splvm();
! 839:
! 840: DPRINTF(PDB_PROTECT,
! 841: ("pmap_protect(%p,%08x,%08x,%x)\n", pmap, sva, e, prot));
! 842:
! 843: if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
! 844: pmap_remove(pmap, sva, e);
! 845: splx(s);
! 846: return;
! 847: }
! 848:
! 849: va = sva;
! 850: while (va != e) {
! 851: pde = pmap_pde(pmap, va);
! 852: eva = trunc_seg(va) + NBSEG;
! 853: if (eva > e || eva == 0)
! 854: eva = e;
! 855:
! 856: if (pde == NULL) {
! 857: va = eva;
! 858: continue;
! 859: }
! 860:
! 861: pte = pde_pte(pde, va);
! 862: for (; va != eva; va += PAGE_SIZE, pte++) {
! 863: opte = *pte;
! 864: if ((opte & PG_V) == 0)
! 865: continue;
! 866:
! 867: npte = (opte & ~PG_RO) |
! 868: (prot & VM_PROT_WRITE) ? PG_RW : PG_RO;
! 869: if (opte != npte) {
! 870: *pte = npte;
! 871: tlb_flush(va);
! 872: }
! 873: }
! 874: }
! 875:
! 876: splx(s);
! 877: }
! 878:
! 879: /*
! 880: * Expand a pmap, if necessary, to include a pte.
! 881: */
! 882: pt_entry_t *
! 883: pmap_grow_pte(struct pmap *pmap, vaddr_t va)
! 884: {
! 885: pd_entry_t *pde;
! 886:
! 887: pde = pmap_pde(pmap, va);
! 888: if (pde->pde_va == NULL) {
! 889: pde->pde_va = (pt_entry_t *)uvm_km_zalloc(kernel_map, PT_SIZE);
! 890: if (pde->pde_va == NULL)
! 891: return (NULL);
! 892: if (pmap_extract(pmap_kernel(), (vaddr_t)pde->pde_va,
! 893: (paddr_t *)&pde->pde_pa) == FALSE)
! 894: panic("pmap_grow_pte: pmap_extract on PT failed!");
! 895: tlb_flush((vaddr_t)pmap->pm_segtab);
! 896: }
! 897:
! 898: return (pde_pte(pde, va));
! 899: }
! 900:
! 901: /*
! 902: * Create or update a mapping for the page at the given physical and
! 903: * virtual addresses, for the given pmap.
! 904: */
! 905: int
! 906: pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
! 907: {
! 908: pt_entry_t *pte, opte, npte;
! 909: struct vm_page *pg;
! 910: struct pvlist *pvl, *cur;
! 911: int s;
! 912:
! 913: s = splvm();
! 914:
! 915: DPRINTF(PDB_ENTER,
! 916: ("pmap_enter(%p,%08x,%08x,%x,%x)", pmap, va, pa, prot, flags));
! 917:
! 918: if ((pte = pmap_grow_pte(pmap, va)) == NULL) {
! 919: DPRINTF(PDB_ENTER, (" -> pmap_grow_pte failed\n"));
! 920: if (flags & PMAP_CANFAIL)
! 921: return (ENOMEM);
! 922: else
! 923: panic("pmap_enter: unable to allocate PT");
! 924: }
! 925:
! 926: opte = *pte;
! 927: DPRINTF(PDB_ENTER, (" opte %08x", opte));
! 928:
! 929: /*
! 930: * Enable cache, by default, if on physical memory, unless
! 931: * PMAP_NC has been passed in pa.
! 932: */
! 933: switch (pa & PAGE_MASK) {
! 934: case PMAP_NC:
! 935: npte = PG_IO;
! 936: break;
! 937: case PMAP_BWS:
! 938: npte = PG_BYTE_SHARED;
! 939: break;
! 940: default:
! 941: if (pa >= PHYSMEM_BASE && pa < PHYSMEM_BASE + ptoa(physmem))
! 942: npte = PG_CACHE;
! 943: else
! 944: npte = PG_IO;
! 945: break;
! 946: }
! 947:
! 948: pa = trunc_page(pa);
! 949: npte |= pa | PG_V | (prot & VM_PROT_WRITE ? PG_RW : PG_RO);
! 950:
! 951: pg = PHYS_TO_VM_PAGE(pa);
! 952: if (pg != NULL) {
! 953: /*
! 954: * For a managed page, enter the mapping in the pvlist.
! 955: */
! 956: pvl = pg_to_pvl(pg);
! 957:
! 958: if (pvl->pv_pmap == NULL) {
! 959: /*
! 960: * We are the first mapping.
! 961: */
! 962: pvl->pv_pmap = pmap;
! 963: pvl->pv_va = va;
! 964: pvl->pv_next = NULL;
! 965: } else {
! 966: /*
! 967: * Add ourselves to the list.
! 968: * Note that, if we are only changing attributes
! 969: * and/or protection, we are already in the list!
! 970: */
! 971: for (cur = pvl; cur != NULL; cur = cur->pv_next) {
! 972: if (pmap == cur->pv_pmap && va == cur->pv_va)
! 973: break;
! 974: }
! 975:
! 976: if (cur == NULL) {
! 977: cur = pool_get(&pvpool, PR_NOWAIT);
! 978: if (cur == NULL) {
! 979: if (flags & PMAP_CANFAIL)
! 980: return (ENOMEM);
! 981: else
! 982: panic("pmap_enter: "
! 983: "pvlist pool exhausted");
! 984: }
! 985: /*
! 986: * Add the new entry after the header.
! 987: */
! 988: cur->pv_pmap = pmap;
! 989: cur->pv_va = va;
! 990: cur->pv_flags = 0;
! 991: cur->pv_next = pvl->pv_next;
! 992: pvl->pv_next = cur;
! 993: }
! 994: }
! 995: }
! 996:
! 997: if (flags & PMAP_WIRED) {
! 998: npte |= PG_W;
! 999: if ((opte & PG_W) == 0)
! 1000: pmap->pm_stats.wired_count++;
! 1001: } else {
! 1002: if ((opte & PG_W) != 0)
! 1003: pmap->pm_stats.wired_count--;
! 1004: }
! 1005: if ((opte & PG_V) == 0)
! 1006: pmap->pm_stats.resident_count++;
! 1007: if (pa >= VM_MIN_KERNEL_ADDRESS)
! 1008: npte |= PG_S;
! 1009:
! 1010: /*
! 1011: * Now update the pte.
! 1012: */
! 1013: if (opte != npte) {
! 1014: DPRINTF(PDB_ENTER, (" -> npte %08x", npte));
! 1015: *pte = npte;
! 1016: tlb_flush(va);
! 1017: }
! 1018:
! 1019: DPRINTF(PDB_ENTER, ("\n"));
! 1020:
! 1021: splx(s);
! 1022:
! 1023: return (0);
! 1024: }
! 1025:
! 1026: /*
! 1027: * Specific flavour of pmap_enter() for unmanaged wired mappings in the
! 1028: * kernel pmap.
! 1029: */
! 1030: void
! 1031: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
! 1032: {
! 1033: pt_entry_t *pte, opte, npte;
! 1034: int s;
! 1035:
! 1036: s = splvm();
! 1037:
! 1038: DPRINTF(PDB_KENTER,
! 1039: ("pmap_kenter_pa(%08x,%08x,%x)", va, pa, prot));
! 1040:
! 1041: if ((pte = pmap_grow_pte(pmap_kernel(), va)) == NULL) {
! 1042: DPRINTF(PDB_KENTER, (" -> pmap_grow_pte failed\n"));
! 1043: panic("pmap_kenter_pa: unable to allocate PT");
! 1044: }
! 1045:
! 1046: opte = *pte;
! 1047: DPRINTF(PDB_KENTER, (" opte %08x", opte));
! 1048:
! 1049: /*
! 1050: * Enable cache, by default, if on physical memory, unless
! 1051: * PMAP_NC has been passed in pa.
! 1052: */
! 1053: switch (pa & PAGE_MASK) {
! 1054: case PMAP_NC:
! 1055: npte = PG_IO;
! 1056: break;
! 1057: case PMAP_BWS:
! 1058: npte = PG_BYTE_SHARED;
! 1059: break;
! 1060: default:
! 1061: if (pa >= PHYSMEM_BASE && pa < PHYSMEM_BASE + ptoa(physmem))
! 1062: npte = PG_CACHE;
! 1063: else
! 1064: npte = PG_IO;
! 1065: break;
! 1066: }
! 1067:
! 1068: pa = trunc_page(pa);
! 1069: npte |= pa | PG_V | PG_W | (prot & VM_PROT_WRITE ? PG_RW : PG_RO);
! 1070:
! 1071: if ((opte & PG_W) == 0)
! 1072: pmap_kernel()->pm_stats.wired_count++;
! 1073: if ((opte & PG_V) == 0)
! 1074: pmap_kernel()->pm_stats.resident_count++;
! 1075: if (pa >= VM_MIN_KERNEL_ADDRESS)
! 1076: npte |= PG_S;
! 1077:
! 1078: /*
! 1079: * Now update the pte.
! 1080: */
! 1081: if (opte != npte) {
! 1082: DPRINTF(PDB_KENTER, (" -> npte %08x", npte));
! 1083: *pte = npte;
! 1084: tlb_flush(va);
! 1085: }
! 1086:
! 1087: DPRINTF(PDB_KENTER, ("\n"));
! 1088:
! 1089: splx(s);
! 1090: }
! 1091:
! 1092: /*
! 1093: * Specific flavour of pmap_remove for unmanaged wired mappings in the
! 1094: * kernel pmap.
! 1095: */
! 1096: void
! 1097: pmap_kremove(vaddr_t va, vsize_t len)
! 1098: {
! 1099: vaddr_t e, eva;
! 1100: pd_entry_t *pde;
! 1101: pt_entry_t *pte, opte;
! 1102: int s;
! 1103:
! 1104: s = splvm();
! 1105:
! 1106: DPRINTF(PDB_KREMOVE, ("pmap_kremove(%08x,%08x)\n", va, len));
! 1107:
! 1108: e = va + len;
! 1109: while (va != e) {
! 1110: pde = pmap_pde(pmap_kernel(), va);
! 1111: eva = trunc_seg(va) + NBSEG;
! 1112: if (eva > e || eva == 0)
! 1113: eva = e;
! 1114:
! 1115: if (pde == NULL) {
! 1116: va = eva;
! 1117: continue;
! 1118: }
! 1119:
! 1120: pte = pde_pte(pde, va);
! 1121: for (; va != eva; va += PAGE_SIZE, pte++) {
! 1122: opte = *pte;
! 1123: if ((opte & PG_V) == 0)
! 1124: continue;
! 1125:
! 1126: pmap_kernel()->pm_stats.resident_count--;
! 1127:
! 1128: #ifdef DIAGNOSTIC
! 1129: if (!(opte & PG_W)) {
! 1130: printf("pmap_kremove: non-wired mapping for %08x",
! 1131: va);
! 1132: } else
! 1133: #endif
! 1134: pmap_kernel()->pm_stats.wired_count--;
! 1135:
! 1136: *pte = PG_NV;
! 1137: tlb_flush(va);
! 1138: }
! 1139: }
! 1140:
! 1141: splx(s);
! 1142: }
! 1143:
! 1144: /*
! 1145: * Remove the wiring state of a page in the given pmap.
! 1146: */
! 1147: void
! 1148: pmap_unwire(struct pmap *pmap, vaddr_t va)
! 1149: {
! 1150: pt_entry_t *pte;
! 1151: int s;
! 1152:
! 1153: s = splvm();
! 1154:
! 1155: DPRINTF(PDB_UNWIRE, ("pmap_unwire(%p,%08x)\n", pmap, va));
! 1156:
! 1157: pte = pmap_pte(pmap, va);
! 1158:
! 1159: if (*pte & PG_V)
! 1160: if (*pte & PG_W) {
! 1161: pmap->pm_stats.wired_count--;
! 1162: /* No need to flush TLB, it's a software flag */
! 1163: *pte &= ~PG_W;
! 1164: }
! 1165:
! 1166: splx(s);
! 1167: }
! 1168:
! 1169: /*
! 1170: * Compute the physical address of a given virtual address in the given pmap.
! 1171: * If the physical address is not mapped by this pmap, FALSE is returned.
! 1172: */
! 1173: boolean_t
! 1174: pmap_extract(struct pmap *pmap, vaddr_t va, paddr_t *pap)
! 1175: {
! 1176: pt_entry_t *pte;
! 1177: paddr_t pa;
! 1178: boolean_t rv;
! 1179: int s;
! 1180:
! 1181: DPRINTF(PDB_EXTRACT, ("pmap_extract(%p,%08x)", pmap, va));
! 1182:
! 1183: s = splvm();
! 1184:
! 1185: pte = pmap_pte(pmap, va);
! 1186: if (pte != NULL && (*pte & PG_V) != 0) {
! 1187: rv = TRUE;
! 1188: pa = (*pte & PG_FRAME) | (va & PAGE_MASK);
! 1189: DPRINTF(PDB_EXTRACT, (" -> %08x\n", pa));
! 1190: if (pap != NULL)
! 1191: *pap = pa;
! 1192: } else {
! 1193: DPRINTF(PDB_EXTRACT, (" -> FALSE\n"));
! 1194: rv = FALSE;
! 1195: }
! 1196:
! 1197: splx(s);
! 1198:
! 1199: return (rv);
! 1200: }
! 1201:
! 1202: /*
! 1203: * Walk a vm_page and flush all existing mappings.
! 1204: */
! 1205: void
! 1206: pg_flushcache(struct vm_page *pg)
! 1207: {
! 1208: struct pvlist *pvl;
! 1209: int s;
! 1210:
! 1211: s = splvm();
! 1212:
! 1213: pvl = pg_to_pvl(pg);
! 1214: if (pvl->pv_pmap == NULL)
! 1215: return;
! 1216:
! 1217: /*
! 1218: * Since cache_flush_page() causes the whole cache to be flushed,
! 1219: * there is no need to loop - flush once.
! 1220: */
! 1221: /* for (; pvl != NULL; pvl = pvl->pv_next) */
! 1222: cache_flush_page(pvl->pv_va);
! 1223:
! 1224: splx(s);
! 1225: }
! 1226:
! 1227: /*
! 1228: * Fill a vm_page with zeroes.
! 1229: */
! 1230: void
! 1231: pmap_zero_page(struct vm_page *pg)
! 1232: {
! 1233: paddr_t pa;
! 1234: vaddr_t va;
! 1235: pt_entry_t *pte;
! 1236: int s;
! 1237:
! 1238: s = splvm();
! 1239:
! 1240: pa = VM_PAGE_TO_PHYS(pg);
! 1241: va = vreserve;
! 1242: pte = ptereserve;
! 1243:
! 1244: DPRINTF(PDB_ZERO, ("pmap_zero_page(%p/pa %x) pte %p\n", pg, pa, pte));
! 1245:
! 1246: pg_flushcache(pg);
! 1247:
! 1248: *pte = PG_V | PG_S | (pa & PG_FRAME);
! 1249: tlb_flush(va);
! 1250:
! 1251: qzero((caddr_t)va, PAGE_SIZE);
! 1252: cache_flush_page(va);
! 1253:
! 1254: /* paranoia */
! 1255: *pte = PG_NV;
! 1256: tlb_flush(va);
! 1257:
! 1258: splx(s);
! 1259: }
! 1260:
! 1261: /*
! 1262: * Copy the contents of a vm_page to another.
! 1263: */
! 1264: void
! 1265: pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg)
! 1266: {
! 1267: paddr_t srcpa, dstpa;
! 1268: vaddr_t srcva, dstva;
! 1269: pt_entry_t *srcpte, *dstpte;
! 1270: int s;
! 1271:
! 1272: s = splvm();
! 1273:
! 1274: DPRINTF(PDB_COPY, ("pmap_copy_page(%p,%p)\n", srcpg, dstpg));
! 1275:
! 1276: srcpa = VM_PAGE_TO_PHYS(srcpg);
! 1277: dstpa = VM_PAGE_TO_PHYS(dstpg);
! 1278: srcva = vreserve;
! 1279: dstva = srcva + PAGE_SIZE;
! 1280:
! 1281: dstpte = ptereserve;
! 1282: srcpte = dstpte++;
! 1283:
! 1284: pg_flushcache(srcpg);
! 1285: /*
! 1286: * Since pg_flushcache() causes the whole cache to be flushed,
! 1287: * there is no need flush dstpg.
! 1288: */
! 1289: /* pg_flushcache(dstpg); */
! 1290:
! 1291: *srcpte = PG_V | PG_S | PG_RO | (srcpa & PG_FRAME);
! 1292: *dstpte = PG_V | PG_S | (dstpa & PG_FRAME);
! 1293:
! 1294: tlb_flush(srcva);
! 1295: tlb_flush(dstva);
! 1296:
! 1297: qcopy((caddr_t)srcva, (caddr_t)dstva, PAGE_SIZE);
! 1298: cache_flush_page(srcva);
! 1299:
! 1300: *srcpte = *dstpte = PG_NV;
! 1301: tlb_flush(srcva);
! 1302: tlb_flush(dstva);
! 1303:
! 1304: splx(s);
! 1305: }
! 1306:
! 1307: /*
! 1308: * Clear the modify bits on all mappings associated to the given vm_page.
! 1309: */
! 1310: boolean_t
! 1311: pmap_clear_modify(struct vm_page *pg)
! 1312: {
! 1313: struct pvlist *pvl;
! 1314: pt_entry_t *pte;
! 1315: boolean_t rv;
! 1316: int s;
! 1317: int flushed;
! 1318:
! 1319: s = splvm();
! 1320:
! 1321: pvl = pg_to_pvl(pg);
! 1322:
! 1323: DPRINTF(PDB_CLEAR_M,
! 1324: ("pmap_clear_modify(%p/pmap %p)\n", pg, pvl->pv_pmap));
! 1325:
! 1326: if (pvl->pv_flags & PG_M) {
! 1327: pvl->pv_flags &= ~PG_M;
! 1328: rv = TRUE;
! 1329: }
! 1330:
! 1331: if (pvl->pv_pmap != NULL) {
! 1332: flushed = 0;
! 1333: for (; pvl != NULL; pvl = pvl->pv_next) {
! 1334: pte = pmap_pte(pvl->pv_pmap, pvl->pv_va);
! 1335: if ((*pte & PG_V) != 0 && (*pte & PG_M) != 0) {
! 1336: /*
! 1337: * Since cache_flush_page() causes the whole
! 1338: * cache to be flushed, only flush once.
! 1339: */
! 1340: if (flushed == 0) {
! 1341: cache_flush_page(pvl->pv_va);
! 1342: flushed = 1;
! 1343: }
! 1344:
! 1345: rv = TRUE;
! 1346: /* No need to flush TLB, it's a software flag */
! 1347: *pte &= ~PG_M;
! 1348: }
! 1349: }
! 1350: }
! 1351:
! 1352: splx(s);
! 1353:
! 1354: return (rv);
! 1355: }
! 1356:
! 1357: /*
! 1358: * Clear the reference bits on all mappings associated to the given vm_page.
! 1359: */
! 1360: boolean_t
! 1361: pmap_clear_reference(struct vm_page *pg)
! 1362: {
! 1363: struct pvlist *pvl;
! 1364: pt_entry_t *pte;
! 1365: boolean_t rv;
! 1366: int s;
! 1367:
! 1368: s = splvm();
! 1369:
! 1370: pvl = pg_to_pvl(pg);
! 1371:
! 1372: DPRINTF(PDB_CLEAR_U,
! 1373: ("pmap_clear_reference(%p/pmap %p)\n", pg, pvl->pv_pmap));
! 1374:
! 1375: if (pvl->pv_flags & PG_U) {
! 1376: pvl->pv_flags &= ~PG_U;
! 1377: rv = TRUE;
! 1378: }
! 1379:
! 1380: if (pvl->pv_pmap != NULL)
! 1381: for (; pvl != NULL; pvl = pvl->pv_next) {
! 1382: pte = pmap_pte(pvl->pv_pmap, pvl->pv_va);
! 1383: if ((*pte & PG_V) != 0 && (*pte & PG_U) != 0) {
! 1384: rv = TRUE;
! 1385: /* No need to flush TLB, it's a software flag */
! 1386: *pte &= ~PG_U;
! 1387: }
! 1388: }
! 1389:
! 1390: splx(s);
! 1391:
! 1392: return (rv);
! 1393: }
! 1394:
! 1395: /*
! 1396: * Check the reference bit attribute for the given vm_page.
! 1397: */
! 1398: boolean_t
! 1399: pmap_is_referenced(struct vm_page *pg)
! 1400: {
! 1401: struct pvlist *pvl;
! 1402: boolean_t rv;
! 1403: int s;
! 1404:
! 1405: s = splvm();
! 1406:
! 1407: pvl = pg_to_pvl(pg);
! 1408: rv = (pvl->pv_flags & PG_U) != 0;
! 1409:
! 1410: DPRINTF(PDB_IS_U,
! 1411: ("pmap_is_referenced(%p/pmap %p) -> %d\n", pg, pvl->pv_pmap, rv));
! 1412:
! 1413: splx(s);
! 1414:
! 1415: return (rv);
! 1416: }
! 1417:
! 1418: /*
! 1419: * Check the modify bit attribute for the given vm_page.
! 1420: */
! 1421: boolean_t
! 1422: pmap_is_modified(struct vm_page *pg)
! 1423: {
! 1424: struct pvlist *pvl;
! 1425: boolean_t rv;
! 1426: int s;
! 1427:
! 1428: s = splvm();
! 1429:
! 1430: pvl = pg_to_pvl(pg);
! 1431: rv = (pvl->pv_flags & PG_M) != 0;
! 1432:
! 1433: DPRINTF(PDB_IS_M,
! 1434: ("pmap_is_modified(%p/pmap %p) -> %d\n", pg, pvl->pv_pmap, rv));
! 1435:
! 1436: splx(s);
! 1437:
! 1438: return (rv);
! 1439: }
! 1440:
! 1441: /*
! 1442: * Flush instruction cache on the given dirty area.
! 1443: *
! 1444: * The KAP is the only sparc implementation OpenBSD runs on with independant
! 1445: * instruction and data caches; for now, we won't add a function pointer
! 1446: * to the cpu structure, but will directly invoke the necessary operation.
! 1447: */
! 1448: void
! 1449: pmap_proc_iflush(struct proc *p, vaddr_t va, vsize_t len)
! 1450: {
! 1451: /* There is no way to invalidate a subset of the icache */
! 1452: sta(0, ASI_ICACHE_INVAL, 0);
! 1453: }
! 1454:
! 1455: /*
! 1456: * The following routines are not part of the MI pmap API, but are
! 1457: * necessary to use the common sparc code.
! 1458: */
! 1459:
! 1460: /*
! 1461: * Enable caching of the page tables if necessary.
! 1462: */
! 1463: void
! 1464: pmap_cache_enable()
! 1465: {
! 1466: /* nothing to do */
! 1467: }
! 1468:
! 1469: /*
! 1470: * Change the protection for a specific kernel mapping.
! 1471: * Used by machdep.c only.
! 1472: */
! 1473: void
! 1474: pmap_changeprot(struct pmap *pmap, vaddr_t va, vm_prot_t prot, int wired)
! 1475: {
! 1476: pt_entry_t *pte, npte;
! 1477: int s;
! 1478:
! 1479: s = splvm();
! 1480:
! 1481: npte = PG_S | (prot & VM_PROT_WRITE ? PG_RW : PG_RO);
! 1482:
! 1483: pte = pmap_pte(pmap, va);
! 1484: if ((*pte & PG_PROT) != npte) {
! 1485: *pte = (*pte & ~PG_PROT) | npte;
! 1486: tlb_flush(va);
! 1487: }
! 1488:
! 1489: splx(s);
! 1490: }
! 1491:
! 1492: /*
! 1493: * Set a ``red zone'' below the kernel.
! 1494: */
! 1495: void
! 1496: pmap_redzone()
! 1497: {
! 1498: pt_entry_t *pte;
! 1499:
! 1500: pte = pmap_pte(pmap_kernel(), VM_MIN_KERNEL_ADDRESS);
! 1501: *pte = PG_NV;
! 1502: tlb_flush(VM_MIN_KERNEL_ADDRESS);
! 1503: }
! 1504:
! 1505: /*
! 1506: * Write a given byte in a protected page; used by the ddb breakpoints.
! 1507: */
! 1508: void
! 1509: pmap_writetext(unsigned char *dst, int ch)
! 1510: {
! 1511: pt_entry_t *pte, opte;
! 1512: int s;
! 1513:
! 1514: /*
! 1515: * Check for a PTW hit first.
! 1516: */
! 1517: switch ((vaddr_t)dst >> PTW_WINDOW_SHIFT) {
! 1518: case PTW1_WINDOW:
! 1519: case PTW2_WINDOW:
! 1520: *dst = (unsigned char)ch;
! 1521: cpuinfo.cache_flush(dst, 1);
! 1522: return;
! 1523: }
! 1524:
! 1525: s = splvm();
! 1526:
! 1527: pte = pmap_pte(pmap_kernel(), (vaddr_t)dst);
! 1528: if (pte != NULL) {
! 1529: opte = *pte;
! 1530: if ((opte & PG_V) != 0) {
! 1531: cpuinfo.cache_flush(dst, 1);
! 1532:
! 1533: if ((opte & PG_RO) != 0) {
! 1534: *pte &= ~PG_RO;
! 1535: tlb_flush(trunc_page((vaddr_t)dst));
! 1536: }
! 1537:
! 1538: *dst = (unsigned char)ch;
! 1539:
! 1540: if ((opte & PG_RO) != 0) {
! 1541: *pte = opte;
! 1542: tlb_flush(trunc_page((vaddr_t)dst));
! 1543: }
! 1544:
! 1545: cpuinfo.cache_flush(dst, 1);
! 1546: }
! 1547: }
! 1548:
! 1549: splx(s);
! 1550: }
! 1551:
! 1552: /*
! 1553: * Enable or disable cache for the given number of pages at the given
! 1554: * virtual address.
! 1555: */
! 1556: void
! 1557: kvm_setcache(caddr_t addr, int npages, int cached)
! 1558: {
! 1559: pt_entry_t *pte, opte;
! 1560: vaddr_t va = (vaddr_t)addr;
! 1561: int s;
! 1562: int flushed;
! 1563:
! 1564: #ifdef DIAGNOSTIC
! 1565: if (va & PAGE_MASK) {
! 1566: printf("kvm_setcache: unaligned va %08x\n", va);
! 1567: va = trunc_page(va);
! 1568: }
! 1569: #endif
! 1570:
! 1571: #ifdef DIAGNOSTIC
! 1572: /*
! 1573: * Check for a PTW hit first.
! 1574: */
! 1575: switch (va >> PTW_WINDOW_SHIFT) {
! 1576: case PTW1_WINDOW:
! 1577: case PTW2_WINDOW:
! 1578: printf("kvm_setcache(%08x, %08x, %d) in a PTW\n",
! 1579: va, npages << PAGE_SHIFT, cached);
! 1580: return;
! 1581: }
! 1582: #endif
! 1583:
! 1584: s = splvm();
! 1585:
! 1586: pte = pmap_pte(pmap_kernel(), va);
! 1587: flushed = 0;
! 1588: for (; --npages >= 0; va += PAGE_SIZE, pte++) {
! 1589: opte = *pte & ~PG_MA;
! 1590:
! 1591: if (cached)
! 1592: opte |= PG_CACHE;
! 1593: else
! 1594: opte |= PG_IO;
! 1595:
! 1596: *pte = opte;
! 1597: tlb_flush(va);
! 1598:
! 1599: /*
! 1600: * Since cache_flush_page() causes the whole
! 1601: * cache to be flushed, only flush once.
! 1602: */
! 1603: if (flushed == 0) {
! 1604: cache_flush_page(va);
! 1605: flushed = 1;
! 1606: }
! 1607: }
! 1608:
! 1609: splx(s);
! 1610: }
! 1611:
! 1612: /*
! 1613: * Simple wrapper around pmap_kenter_pa() for multiple pages.
! 1614: */
! 1615: vaddr_t
! 1616: pmap_map(vaddr_t va, paddr_t pa, paddr_t epa, int prot)
! 1617: {
! 1618: while (pa < epa) {
! 1619: pmap_kenter_pa(va, pa, (vm_prot_t)prot);
! 1620: va += PAGE_SIZE;
! 1621: pa += PAGE_SIZE;
! 1622: }
! 1623: return (va);
! 1624: }
! 1625:
! 1626: /*
! 1627: * Checks whether a given physical address is in physical memory or
! 1628: * in device space.
! 1629: * Used by mem.c.
! 1630: */
! 1631: int
! 1632: pmap_pa_exists(paddr_t pa)
! 1633: {
! 1634: return (pa >= PHYSMEM_BASE && pa < PHYSMEM_BASE + ptoa(physmem));
! 1635: }
CVSweb