Annotation of sys/arch/vax/vax/pmap.c, Revision 1.1
1.1 ! nbrk 1: /* $OpenBSD: pmap.c,v 1.40 2007/05/20 14:14:12 miod Exp $ */
! 2: /* $NetBSD: pmap.c,v 1.74 1999/11/13 21:32:25 matt Exp $ */
! 3: /*
! 4: * Copyright (c) 1994, 1998, 1999 Ludd, University of Lule}, Sweden.
! 5: * All rights reserved.
! 6: *
! 7: * Redistribution and use in source and binary forms, with or without
! 8: * modification, are permitted provided that the following conditions
! 9: * are met:
! 10: * 1. Redistributions of source code must retain the above copyright
! 11: * notice, this list of conditions and the following disclaimer.
! 12: * 2. Redistributions in binary form must reproduce the above copyright
! 13: * notice, this list of conditions and the following disclaimer in the
! 14: * documentation and/or other materials provided with the distribution.
! 15: * 3. All advertising materials mentioning features or use of this software
! 16: * must display the following acknowledgement:
! 17: * This product includes software developed at Ludd, University of Lule}.
! 18: * 4. The name of the author may not be used to endorse or promote products
! 19: * derived from this software without specific prior written permission
! 20: *
! 21: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
! 22: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
! 23: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
! 24: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
! 25: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
! 26: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
! 27: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
! 28: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
! 29: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
! 30: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
! 31: */
! 32:
! 33: #include <sys/types.h>
! 34: #include <sys/param.h>
! 35: #include <sys/queue.h>
! 36: #include <sys/malloc.h>
! 37: #include <sys/extent.h>
! 38: #include <sys/proc.h>
! 39: #include <sys/user.h>
! 40: #include <sys/systm.h>
! 41: #include <sys/device.h>
! 42: #include <sys/msgbuf.h>
! 43: #include <sys/pool.h>
! 44:
! 45: #ifdef PMAPDEBUG
! 46: #include <dev/cons.h>
! 47: #endif
! 48:
! 49: #include <uvm/uvm.h>
! 50:
! 51: #include <machine/pte.h>
! 52: #include <machine/pcb.h>
! 53: #include <machine/mtpr.h>
! 54: #include <machine/macros.h>
! 55: #include <machine/sid.h>
! 56: #include <machine/cpu.h>
! 57: #include <machine/scb.h>
! 58: #include <machine/rpb.h>
! 59:
! 60: #include <uvm/uvm.h>
! 61:
! 62: /* QDSS console mapping hack */
! 63: #include "qd.h"
! 64: void qdearly(void);
! 65:
! 66: #define ISTACK_SIZE (NBPG * 2)
! 67: vaddr_t istack;
! 68:
! 69: struct pmap kernel_pmap_store;
! 70:
! 71: pt_entry_t *Sysmap; /* System page table */
! 72: void *scratch;
! 73: vaddr_t iospace;
! 74:
! 75: vaddr_t ptemapstart, ptemapend;
! 76: struct extent *ptemap;
! 77: #define PTMAPSZ EXTENT_FIXED_STORAGE_SIZE(100)
! 78: char ptmapstorage[PTMAPSZ];
! 79:
! 80: struct pool pmap_pmap_pool;
! 81: struct pool pmap_pv_pool;
! 82:
! 83: #ifdef PMAPDEBUG
! 84: volatile int recurse;
! 85: #define RECURSESTART { \
! 86: if (recurse) \
! 87: printf("enter at %d, previous %d\n", __LINE__, recurse);\
! 88: recurse = __LINE__; \
! 89: }
! 90: #define RECURSEEND {recurse = 0; }
! 91: #else
! 92: #define RECURSESTART
! 93: #define RECURSEEND
! 94: #endif
! 95:
! 96: #ifdef PMAPDEBUG
! 97: int startpmapdebug = 0;
! 98: #endif
! 99:
! 100: #ifndef DEBUG
! 101: static inline
! 102: #endif
! 103: void pmap_decpteref(struct pmap *, pt_entry_t *);
! 104:
! 105: #ifndef PMAPDEBUG
! 106: static inline
! 107: #endif
! 108: void rensa(pt_entry_t, pt_entry_t *);
! 109:
! 110: vaddr_t avail_start, avail_end;
! 111: vaddr_t virtual_avail, virtual_end; /* Available virtual memory */
! 112:
! 113: #define get_pventry() (struct pv_entry *)pool_get(&pmap_pv_pool, PR_NOWAIT)
! 114: #define free_pventry(pv) pool_put(&pmap_pv_pool, (void *)pv)
! 115:
! 116: /*
! 117: * pmap_bootstrap().
! 118: * Called as part of vm bootstrap, allocates internal pmap structures.
! 119: * Assumes that nothing is mapped, and that kernel stack is located
! 120: * immediately after end.
! 121: */
! 122: void
! 123: pmap_bootstrap()
! 124: {
! 125: unsigned int sysptsize, i;
! 126: extern unsigned int etext, proc0paddr;
! 127: struct pcb *pcb = (struct pcb *)proc0paddr;
! 128: pmap_t pmap = pmap_kernel();
! 129:
! 130: /*
! 131: * Calculation of the System Page Table is somewhat a pain,
! 132: * because it must be in contiguous physical memory and all
! 133: * size calculations must be done now.
! 134: * Remember: sysptsize is in PTEs and nothing else!
! 135: */
! 136:
! 137: /* Kernel alloc area */
! 138: sysptsize = (((0x100000 * maxproc) >> VAX_PGSHIFT) / 4);
! 139: /* reverse mapping struct */
! 140: sysptsize += (avail_end >> VAX_PGSHIFT) * 2;
! 141: /* User Page table area. This may grow big */
! 142: sysptsize += ((USRPTSIZE * 4) / VAX_NBPG) * maxproc;
! 143: /* Kernel stacks per process */
! 144: sysptsize += UPAGES * maxproc;
! 145: /* IO device register space */
! 146: sysptsize += IOSPSZ;
! 147:
! 148: /*
! 149: * Virtual_* and avail_* is used for mapping of system page table.
! 150: * The need for kernel virtual memory is linear dependent of the
! 151: * amount of physical memory also, therefore sysptsize is
! 152: * a variable here that is changed dependent of the physical
! 153: * memory size.
! 154: */
! 155: virtual_avail = avail_end + KERNBASE;
! 156: virtual_end = KERNBASE + sysptsize * VAX_NBPG;
! 157: memset(Sysmap, 0, sysptsize * 4); /* clear SPT before using it */
! 158:
! 159: /*
! 160: * The first part of Kernel Virtual memory is the physical
! 161: * memory mapped in. This makes some mm routines both simpler
! 162: * and faster, but takes ~0.75% more memory.
! 163: */
! 164: pmap_map(KERNBASE, 0, avail_end, VM_PROT_READ|VM_PROT_WRITE);
! 165: /*
! 166: * Kernel code is always readable for user, it must be because
! 167: * of the emulation code that is somewhere in there.
! 168: * And it doesn't hurt, the kernel file is also public readable.
! 169: * There are also a couple of other things that must be in
! 170: * physical memory and that isn't managed by the vm system.
! 171: */
! 172: for (i = 0; i < ((unsigned)&etext - KERNBASE) >> VAX_PGSHIFT; i++)
! 173: Sysmap[i] = (Sysmap[i] & ~PG_PROT) | PG_URKW;
! 174:
! 175: /* Map System Page Table and zero it, Sysmap already set. */
! 176: mtpr((unsigned)Sysmap - KERNBASE, PR_SBR);
! 177:
! 178: /* Map Interrupt stack and set red zone */
! 179: istack = (unsigned)Sysmap + ROUND_PAGE(sysptsize * 4);
! 180: mtpr(istack + ISTACK_SIZE, PR_ISP);
! 181: *kvtopte(istack) &= ~PG_V;
! 182:
! 183: /* Some scratch pages */
! 184: scratch = (void *)((u_int)istack + ISTACK_SIZE);
! 185: avail_start = (u_int)scratch + 4 * VAX_NBPG - KERNBASE;
! 186:
! 187: /* Kernel message buffer */
! 188: avail_end -= MSGBUFSIZE;
! 189: msgbufp = (void *)(avail_end + KERNBASE);
! 190: msgbufp->msg_magic = MSG_MAGIC-1; /* ensure that it will be zeroed */
! 191:
! 192: /* zero all mapped physical memory from Sysmap to here */
! 193: memset((void *)istack, 0, (avail_start + KERNBASE) - istack);
! 194:
! 195: /* Set logical page size */
! 196: uvmexp.pagesize = NBPG;
! 197: uvm_setpagesize();
! 198:
! 199: /* QDSS console mapping hack */
! 200: #if NQD > 0
! 201: qdearly();
! 202: #endif
! 203:
! 204: /* User page table map. This is big. */
! 205: MAPVIRT(ptemapstart, USRPTSIZE);
! 206: ptemapend = virtual_avail;
! 207:
! 208: MAPVIRT(iospace, IOSPSZ); /* Device iospace mapping area */
! 209:
! 210: /* Init SCB and set up stray vectors. */
! 211: avail_start = scb_init(avail_start);
! 212: bcopy((caddr_t)proc0paddr + REDZONEADDR, 0, sizeof(struct rpb));
! 213:
! 214: if (dep_call->cpu_steal_pages)
! 215: (*dep_call->cpu_steal_pages)();
! 216:
! 217: avail_start = ROUND_PAGE(avail_start);
! 218: virtual_avail = ROUND_PAGE(virtual_avail);
! 219: virtual_end = TRUNC_PAGE(virtual_end);
! 220:
! 221:
! 222: #if 0 /* Breaks cninit() on some machines */
! 223: cninit();
! 224: printf("Sysmap %p, istack %lx, scratch %p\n",Sysmap,istack,scratch);
! 225: printf("etext %p\n", &etext);
! 226: printf("SYSPTSIZE %x\n",sysptsize);
! 227: printf("ptemapstart %lx ptemapend %lx\n", ptemapstart, ptemapend);
! 228: printf("avail_start %lx, avail_end %lx\n",avail_start,avail_end);
! 229: printf("virtual_avail %lx,virtual_end %lx\n",
! 230: virtual_avail, virtual_end);
! 231: printf("startpmapdebug %p\n",&startpmapdebug);
! 232: #endif
! 233:
! 234:
! 235: /* Init kernel pmap */
! 236: pmap->pm_p1br = (void *)KERNBASE;
! 237: pmap->pm_p0br = (void *)KERNBASE;
! 238: pmap->pm_p1lr = 0x200000;
! 239: pmap->pm_p0lr = AST_PCB;
! 240: pmap->pm_stats.wired_count = pmap->pm_stats.resident_count = 0;
! 241: /* btop(virtual_avail - KERNBASE); */
! 242:
! 243: pmap->ref_count = 1;
! 244:
! 245: /* Activate the kernel pmap. */
! 246: mtpr(pcb->P1BR = pmap->pm_p1br, PR_P1BR);
! 247: mtpr(pcb->P0BR = pmap->pm_p0br, PR_P0BR);
! 248: mtpr(pcb->P1LR = pmap->pm_p1lr, PR_P1LR);
! 249: mtpr(pcb->P0LR = pmap->pm_p0lr, PR_P0LR);
! 250:
! 251: /* Create the pmap and pv_entry pools. */
! 252: pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0,
! 253: "pmap_pool", NULL);
! 254: pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0,
! 255: "pv_pool", NULL);
! 256:
! 257: /*
! 258: * Now everything should be complete, start virtual memory.
! 259: */
! 260: uvm_page_physload(avail_start >> PGSHIFT, avail_end >> PGSHIFT,
! 261: avail_start >> PGSHIFT, avail_end >> PGSHIFT,
! 262: VM_FREELIST_DEFAULT);
! 263: mtpr(sysptsize, PR_SLR);
! 264: rpb.sbr = mfpr(PR_SBR);
! 265: rpb.slr = mfpr(PR_SLR);
! 266: mtpr(1, PR_MAPEN);
! 267: }
! 268:
! 269: void
! 270: pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
! 271: {
! 272: *vstartp = virtual_avail;
! 273: *vendp = virtual_end;
! 274: }
! 275:
! 276: /*
! 277: * Let the VM system do early memory allocation from the direct-mapped
! 278: * physical memory instead.
! 279: */
! 280: vaddr_t
! 281: pmap_steal_memory(size, vstartp, vendp)
! 282: vsize_t size;
! 283: vaddr_t *vstartp, *vendp;
! 284: {
! 285: vaddr_t v;
! 286: int npgs;
! 287:
! 288: #ifdef PMAPDEBUG
! 289: if (startpmapdebug)
! 290: printf("pmap_steal_memory: size 0x%lx start %p end %p\n",
! 291: size, vstartp, vendp);
! 292: #endif
! 293: size = round_page(size);
! 294: npgs = btoc(size);
! 295:
! 296: #ifdef DIAGNOSTIC
! 297: if (uvm.page_init_done == TRUE)
! 298: panic("pmap_steal_memory: called _after_ bootstrap");
! 299: #endif
! 300:
! 301: /*
! 302: * A vax only have one segment of memory.
! 303: */
! 304:
! 305: v = (vm_physmem[0].avail_start << PGSHIFT) | KERNBASE;
! 306: vm_physmem[0].avail_start += npgs;
! 307: vm_physmem[0].start += npgs;
! 308: if (vstartp)
! 309: *vstartp = virtual_avail;
! 310: if (vendp)
! 311: *vendp = virtual_end;
! 312: bzero((caddr_t)v, size);
! 313: return v;
! 314: }
! 315:
! 316: /*
! 317: * pmap_init() is called as part of vm init after memory management
! 318: * is enabled. It is meant to do machine-specific allocations.
! 319: * Here is the resource map for the user page tables inited.
! 320: */
! 321: void
! 322: pmap_init()
! 323: {
! 324: /*
! 325: * Create the extent map used to manage the page table space.
! 326: */
! 327: ptemap = extent_create("ptemap", ptemapstart, ptemapend,
! 328: M_VMPMAP, ptmapstorage, PTMAPSZ, EX_NOCOALESCE);
! 329: if (ptemap == NULL)
! 330: panic("pmap_init");
! 331: }
! 332:
! 333: /*
! 334: * Decrement a reference to a pte page. If all references are gone,
! 335: * free the page.
! 336: */
! 337: void
! 338: pmap_decpteref(pmap, pte)
! 339: struct pmap *pmap;
! 340: pt_entry_t *pte;
! 341: {
! 342: paddr_t paddr;
! 343: int index;
! 344:
! 345: if (pmap == pmap_kernel())
! 346: return;
! 347: index = ((vaddr_t)pte - (vaddr_t)pmap->pm_p0br) >> PGSHIFT;
! 348:
! 349: pte = (pt_entry_t *)trunc_page((vaddr_t)pte);
! 350: #ifdef PMAPDEBUG
! 351: if (startpmapdebug)
! 352: printf("pmap_decpteref: pmap %p pte %p index %d refcnt %d\n",
! 353: pmap, pte, index, pmap->pm_refcnt[index]);
! 354: #endif
! 355:
! 356: #ifdef DEBUG
! 357: if ((index < 0) || (index >= NPTEPGS))
! 358: panic("pmap_decpteref: bad index %d", index);
! 359: #endif
! 360: pmap->pm_refcnt[index]--;
! 361: #ifdef DEBUG
! 362: if (pmap->pm_refcnt[index] >= VAX_NBPG/sizeof(pt_entry_t))
! 363: panic("pmap_decpteref");
! 364: #endif
! 365: if (pmap->pm_refcnt[index] == 0) {
! 366: paddr = (*kvtopte(pte) & PG_FRAME) << VAX_PGSHIFT;
! 367: uvm_pagefree(PHYS_TO_VM_PAGE(paddr));
! 368: bzero(kvtopte(pte), sizeof(pt_entry_t) * LTOHPN);
! 369: }
! 370: }
! 371:
! 372: /*
! 373: * pmap_create() creates a pmap for a new task.
! 374: * If not already allocated, malloc space for one.
! 375: */
! 376: struct pmap *
! 377: pmap_create()
! 378: {
! 379: struct pmap *pmap;
! 380: int bytesiz, res;
! 381:
! 382: pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
! 383: bzero(pmap, sizeof(struct pmap));
! 384:
! 385: /*
! 386: * Allocate PTEs and stash them away in the pmap.
! 387: * XXX Ok to use kmem_alloc_wait() here?
! 388: */
! 389: bytesiz = USRPTSIZE * sizeof(pt_entry_t);
! 390: res = extent_alloc(ptemap, bytesiz, 4, 0, 0, EX_WAITSPACE|EX_WAITOK,
! 391: (u_long *)&pmap->pm_p0br);
! 392: if (res)
! 393: panic("pmap_create");
! 394: pmap->pm_p0lr = vax_btoc(MAXTSIZ + 40*1024*1024) | AST_PCB;
! 395: (vaddr_t)pmap->pm_p1br = (vaddr_t)pmap->pm_p0br + bytesiz - 0x800000;
! 396: pmap->pm_p1lr = (0x200000 - vax_btoc(MAXSSIZ));
! 397: pmap->pm_stack = USRSTACK;
! 398:
! 399: #ifdef PMAPDEBUG
! 400: if (startpmapdebug)
! 401: printf("pmap_create: pmap %p, "
! 402: "p0br=%p p0lr=0x%lx p1br=%p p1lr=0x%lx\n",
! 403: pmap, pmap->pm_p0br, pmap->pm_p0lr,
! 404: pmap->pm_p1br, pmap->pm_p1lr);
! 405: #endif
! 406:
! 407: pmap->ref_count = 1;
! 408:
! 409: return(pmap);
! 410: }
! 411:
! 412: void
! 413: pmap_unwire(pmap, va)
! 414: pmap_t pmap;
! 415: vaddr_t va;
! 416: {
! 417: int *p, *pte, i;
! 418:
! 419: if (va & KERNBASE) {
! 420: p = (int *)Sysmap;
! 421: i = (va - KERNBASE) >> VAX_PGSHIFT;
! 422: } else {
! 423: if(va < 0x40000000) {
! 424: p = (int *)pmap->pm_p0br;
! 425: i = va >> VAX_PGSHIFT;
! 426: } else {
! 427: p = (int *)pmap->pm_p1br;
! 428: i = (va - 0x40000000) >> VAX_PGSHIFT;
! 429: }
! 430: }
! 431: pte = &p[i];
! 432:
! 433: *pte &= ~PG_W;
! 434: }
! 435:
! 436: /*
! 437: * pmap_destroy(pmap): Remove a reference from the pmap.
! 438: * If this was the last reference, release all its resources.
! 439: */
! 440: void
! 441: pmap_destroy(pmap)
! 442: pmap_t pmap;
! 443: {
! 444: int count;
! 445: #ifdef DEBUG
! 446: vaddr_t saddr, eaddr;
! 447: int i;
! 448: #endif
! 449:
! 450: #ifdef PMAPDEBUG
! 451: if (startpmapdebug)
! 452: printf("pmap_destroy: pmap %p\n",pmap);
! 453: #endif
! 454:
! 455: simple_lock(&pmap->pm_lock);
! 456: count = --pmap->ref_count;
! 457: simple_unlock(&pmap->pm_lock);
! 458:
! 459: if (count != 0)
! 460: return;
! 461:
! 462: if (pmap->pm_p0br != 0) {
! 463: #ifdef DEBUG
! 464: for (i = 0; i < NPTEPGS; i++)
! 465: if (pmap->pm_refcnt[i])
! 466: panic("pmap_release: refcnt %d index %d",
! 467: pmap->pm_refcnt[i], i);
! 468:
! 469: saddr = (vaddr_t)pmap->pm_p0br;
! 470: eaddr = saddr + USRPTSIZE * sizeof(pt_entry_t);
! 471: for (; saddr < eaddr; saddr += NBPG)
! 472: if ((*kvtopte(saddr) & PG_FRAME) != 0)
! 473: panic("pmap_release: page mapped");
! 474: #endif
! 475: extent_free(ptemap, (u_long)pmap->pm_p0br,
! 476: USRPTSIZE * sizeof(pt_entry_t), EX_WAITOK);
! 477: }
! 478:
! 479: pool_put(&pmap_pmap_pool, pmap);
! 480: }
! 481:
! 482: /*
! 483: * Rensa is a help routine to remove a pv_entry from the pv list.
! 484: * Arguments are physical clustering page and page table entry pointer.
! 485: */
! 486: void
! 487: rensa(pte, ptp)
! 488: pt_entry_t pte;
! 489: pt_entry_t *ptp;
! 490: {
! 491: struct vm_page *pg;
! 492: struct pv_entry *pv, *npv, *ppv;
! 493: paddr_t pa;
! 494: int s, *g;
! 495:
! 496: /*
! 497: * Check that we are working on a managed page.
! 498: */
! 499: pa = (pte & PG_FRAME) << VAX_PGSHIFT;
! 500: pg = PHYS_TO_VM_PAGE(pa);
! 501: if (pg == NULL)
! 502: return;
! 503:
! 504: #ifdef PMAPDEBUG
! 505: if (startpmapdebug)
! 506: printf("rensa: pg %p ptp %p\n", pg, ptp);
! 507: #endif
! 508: s = splvm();
! 509: RECURSESTART;
! 510: for (ppv = NULL, pv = pg->mdpage.pv_head; pv != NULL;
! 511: ppv = pv, pv = npv) {
! 512: npv = pv->pv_next;
! 513: if (pv->pv_pte == ptp) {
! 514: g = (int *)pv->pv_pte;
! 515: if ((pg->mdpage.pv_attr & (PG_V|PG_M)) != (PG_V|PG_M))
! 516: pg->mdpage.pv_attr |=
! 517: g[0]|g[1]|g[2]|g[3]|g[4]|g[5]|g[6]|g[7];
! 518: pv->pv_pmap->pm_stats.resident_count--;
! 519: if (npv != NULL) {
! 520: *pv = *npv;
! 521: free_pventry(npv);
! 522: } else {
! 523: if (ppv != NULL)
! 524: ppv->pv_next = pv->pv_next;
! 525: else
! 526: pg->mdpage.pv_head = NULL;
! 527: free_pventry(pv);
! 528: }
! 529: goto leave;
! 530: }
! 531: }
! 532:
! 533: #ifdef DIAGNOSTIC
! 534: panic("rensa(0x%x, %p) page %p: mapping not found", pte, ptp, pg);
! 535: #endif
! 536:
! 537: leave:
! 538: splx(s);
! 539: RECURSEEND;
! 540: }
! 541:
! 542: /*
! 543: * New (real nice!) function that allocates memory in kernel space
! 544: * without tracking it in the MD code.
! 545: */
! 546: void
! 547: pmap_kenter_pa(va, pa, prot)
! 548: vaddr_t va;
! 549: paddr_t pa;
! 550: vm_prot_t prot;
! 551: {
! 552: pt_entry_t *ptp;
! 553:
! 554: ptp = kvtopte(va);
! 555: #ifdef PMAPDEBUG
! 556: if(startpmapdebug)
! 557: printf("pmap_kenter_pa: va: %lx, pa %lx, prot %x ptp %p\n", va, pa, prot, ptp);
! 558: #endif
! 559: ptp[0] = PG_V | ((prot & VM_PROT_WRITE)? PG_KW : PG_KR) |
! 560: PG_PFNUM(pa) | PG_SREF;
! 561: ptp[1] = ptp[0] + 1;
! 562: ptp[2] = ptp[0] + 2;
! 563: ptp[3] = ptp[0] + 3;
! 564: ptp[4] = ptp[0] + 4;
! 565: ptp[5] = ptp[0] + 5;
! 566: ptp[6] = ptp[0] + 6;
! 567: ptp[7] = ptp[0] + 7;
! 568: }
! 569:
! 570: void
! 571: pmap_kremove(va, len)
! 572: vaddr_t va;
! 573: vsize_t len;
! 574: {
! 575: pt_entry_t *pte;
! 576: int i;
! 577:
! 578: #ifdef PMAPDEBUG
! 579: if(startpmapdebug)
! 580: printf("pmap_kremove: va: %lx, len %lx, ptp %p\n", va, len, kvtopte(va));
! 581: #endif
! 582:
! 583: /*
! 584: * Unfortunately we must check if any page may be on the pv list.
! 585: */
! 586: pte = kvtopte(va);
! 587: len >>= PGSHIFT;
! 588:
! 589: for (i = 0; i < len; i++) {
! 590: if ((*pte & PG_FRAME) == 0)
! 591: continue;
! 592: #ifdef DIAGNOSTIC /* DEBUG */
! 593: if ((*pte & PG_SREF) == 0) {
! 594: printf("pmap_kremove(%p, %x): "
! 595: "pte %x@%p does not have SREF set!\n",
! 596: va, len << PGSHIFT, *pte, pte);
! 597: rensa(*pte, pte);
! 598: }
! 599: #endif
! 600: bzero(pte, LTOHPN * sizeof(pt_entry_t));
! 601: pte += LTOHPN;
! 602: }
! 603: mtpr(0, PR_TBIA);
! 604: }
! 605:
! 606: /*
! 607: * pmap_enter() is the main routine that puts in mappings for pages, or
! 608: * upgrades mappings to more "rights".
! 609: */
! 610: int
! 611: pmap_enter(pmap, v, p, prot, flags)
! 612: pmap_t pmap;
! 613: vaddr_t v;
! 614: paddr_t p;
! 615: vm_prot_t prot;
! 616: int flags;
! 617: {
! 618: struct vm_page *pg;
! 619: struct pv_entry *pv;
! 620: int i, s, newpte, oldpte, *patch, index = 0; /* XXX gcc */
! 621: #ifdef PMAPDEBUG
! 622: boolean_t wired = (flags & PMAP_WIRED) != 0;
! 623: #endif
! 624:
! 625: #ifdef PMAPDEBUG
! 626: if (startpmapdebug)
! 627: printf("pmap_enter: pmap %p v %lx p %lx prot %x wired %d flags %x\n",
! 628: pmap, v, p, prot, wired, flags);
! 629: #endif
! 630:
! 631: RECURSESTART;
! 632: /* Find address of correct pte */
! 633: if (v & KERNBASE) {
! 634: patch = (int *)Sysmap;
! 635: i = (v - KERNBASE) >> VAX_PGSHIFT;
! 636: newpte = (p>>VAX_PGSHIFT)|(prot&VM_PROT_WRITE?PG_KW:PG_KR);
! 637: } else {
! 638: if (v < 0x40000000) {
! 639: patch = (int *)pmap->pm_p0br;
! 640: i = (v >> VAX_PGSHIFT);
! 641: if (i >= (pmap->pm_p0lr & ~AST_MASK)) {
! 642: if (flags & PMAP_CANFAIL) {
! 643: RECURSEEND;
! 644: return (EFAULT);
! 645: }
! 646: panic("P0 too small in pmap_enter");
! 647: }
! 648: patch = (int *)pmap->pm_p0br;
! 649: newpte = (p >> VAX_PGSHIFT) |
! 650: (prot & VM_PROT_WRITE ? PG_RW : PG_RO);
! 651: } else {
! 652: patch = (int *)pmap->pm_p1br;
! 653: i = (v - 0x40000000) >> VAX_PGSHIFT;
! 654: if (i < pmap->pm_p1lr) {
! 655: if (flags & PMAP_CANFAIL) {
! 656: RECURSEEND;
! 657: return (EFAULT);
! 658: }
! 659: panic("pmap_enter: must expand P1");
! 660: }
! 661: if (v < pmap->pm_stack)
! 662: pmap->pm_stack = v;
! 663: newpte = (p >> VAX_PGSHIFT) |
! 664: (prot & VM_PROT_WRITE ? PG_RW : PG_RO);
! 665: }
! 666:
! 667: /*
! 668: * Check if a pte page must be mapped in.
! 669: */
! 670: index = ((u_int)&patch[i] - (u_int)pmap->pm_p0br) >> PGSHIFT;
! 671: #ifdef DIAGNOSTIC
! 672: if ((index < 0) || (index >= NPTEPGS))
! 673: panic("pmap_enter: bad index %d", index);
! 674: #endif
! 675: if (pmap->pm_refcnt[index] == 0) {
! 676: vaddr_t ptaddr = trunc_page((vaddr_t)&patch[i]);
! 677: paddr_t phys;
! 678: struct vm_page *pg;
! 679: #ifdef DEBUG
! 680: if ((*kvtopte(&patch[i]) & PG_FRAME) != 0)
! 681: panic("pmap_enter: refcnt == 0");
! 682: #endif
! 683: /*
! 684: * It seems to be legal to sleep here to wait for
! 685: * pages; at least some other ports do so.
! 686: */
! 687: for (;;) {
! 688: pg = uvm_pagealloc(NULL, 0, NULL, 0);
! 689: if (pg != NULL)
! 690: break;
! 691: if (flags & PMAP_CANFAIL) {
! 692: RECURSEEND;
! 693: return (ENOMEM);
! 694: }
! 695:
! 696: panic("pmap_enter: no free pages");
! 697: }
! 698:
! 699: phys = VM_PAGE_TO_PHYS(pg);
! 700: bzero((caddr_t)(phys|KERNBASE), NBPG);
! 701: pmap_kenter_pa(ptaddr, phys,
! 702: VM_PROT_READ|VM_PROT_WRITE);
! 703: pmap_update(pmap_kernel());
! 704: }
! 705: }
! 706:
! 707: /*
! 708: * Do not keep track of anything if mapping IO space.
! 709: */
! 710: pg = PHYS_TO_VM_PAGE(p);
! 711: if (pg == NULL) {
! 712: patch[i] = newpte;
! 713: patch[i+1] = newpte+1;
! 714: patch[i+2] = newpte+2;
! 715: patch[i+3] = newpte+3;
! 716: patch[i+4] = newpte+4;
! 717: patch[i+5] = newpte+5;
! 718: patch[i+6] = newpte+6;
! 719: patch[i+7] = newpte+7;
! 720: if (pmap != pmap_kernel())
! 721: pmap->pm_refcnt[index]++; /* New mapping */
! 722: RECURSEEND;
! 723: return (0);
! 724: }
! 725: if (flags & PMAP_WIRED)
! 726: newpte |= PG_W;
! 727:
! 728: oldpte = patch[i] & ~(PG_V|PG_M);
! 729:
! 730: /* wiring change? */
! 731: if (newpte == (oldpte | PG_W)) {
! 732: patch[i] |= PG_W; /* Just wiring change */
! 733: RECURSEEND;
! 734: return (0);
! 735: }
! 736:
! 737: /* mapping unchanged? just return. */
! 738: if (newpte == oldpte) {
! 739: RECURSEEND;
! 740: return (0);
! 741: }
! 742:
! 743: /* Changing mapping? */
! 744: if ((newpte & PG_FRAME) != (oldpte & PG_FRAME)) {
! 745: /*
! 746: * Mapped before? Remove it then.
! 747: */
! 748: if (oldpte & PG_FRAME) {
! 749: RECURSEEND;
! 750: if ((oldpte & PG_SREF) == 0)
! 751: rensa(oldpte, (pt_entry_t *)&patch[i]);
! 752: RECURSESTART;
! 753: } else if (pmap != pmap_kernel())
! 754: pmap->pm_refcnt[index]++; /* New mapping */
! 755:
! 756: s = splvm();
! 757: pv = get_pventry();
! 758: if (pv == NULL) {
! 759: if (flags & PMAP_CANFAIL) {
! 760: RECURSEEND;
! 761: return (ENOMEM);
! 762: }
! 763: panic("pmap_enter: could not allocate pv_entry");
! 764: }
! 765: pv->pv_pte = (pt_entry_t *)&patch[i];
! 766: pv->pv_pmap = pmap;
! 767: pv->pv_next = pg->mdpage.pv_head;
! 768: pg->mdpage.pv_head = pv;
! 769: splx(s);
! 770: pmap->pm_stats.resident_count++;
! 771: } else {
! 772: /* No mapping change, just flush the TLB; necessary? */
! 773: mtpr(0, PR_TBIA);
! 774: }
! 775:
! 776: if (flags & VM_PROT_READ) {
! 777: pg->mdpage.pv_attr |= PG_V;
! 778: newpte |= PG_V;
! 779: }
! 780: if (flags & VM_PROT_WRITE)
! 781: pg->mdpage.pv_attr |= PG_M;
! 782:
! 783: if (flags & PMAP_WIRED)
! 784: newpte |= PG_V; /* Not allowed to be invalid */
! 785:
! 786: patch[i] = newpte;
! 787: patch[i+1] = newpte+1;
! 788: patch[i+2] = newpte+2;
! 789: patch[i+3] = newpte+3;
! 790: patch[i+4] = newpte+4;
! 791: patch[i+5] = newpte+5;
! 792: patch[i+6] = newpte+6;
! 793: patch[i+7] = newpte+7;
! 794: RECURSEEND;
! 795: #ifdef DEBUG
! 796: if (pmap != pmap_kernel())
! 797: if (pmap->pm_refcnt[index] > VAX_NBPG/sizeof(pt_entry_t))
! 798: panic("pmap_enter: refcnt %d", pmap->pm_refcnt[index]);
! 799: #endif
! 800:
! 801: mtpr(0, PR_TBIA); /* Always; safety belt */
! 802: return (0);
! 803: }
! 804:
! 805: vaddr_t
! 806: pmap_map(virtuell, pstart, pend, prot)
! 807: vaddr_t virtuell;
! 808: paddr_t pstart, pend;
! 809: int prot;
! 810: {
! 811: vaddr_t count;
! 812: int *pentry;
! 813:
! 814: #ifdef PMAPDEBUG
! 815: if(startpmapdebug)
! 816: printf("pmap_map: virt %lx, pstart %lx, pend %lx, Sysmap %p\n",
! 817: virtuell, pstart, pend, Sysmap);
! 818: #endif
! 819:
! 820: pstart=(uint)pstart &0x7fffffff;
! 821: pend=(uint)pend &0x7fffffff;
! 822: virtuell=(uint)virtuell &0x7fffffff;
! 823: pentry = (int *)((((uint)(virtuell)>>VAX_PGSHIFT)*4)+(uint)Sysmap);
! 824: for(count=pstart;count<pend;count+=VAX_NBPG){
! 825: *pentry++ = (count>>VAX_PGSHIFT)|PG_V|
! 826: (prot & VM_PROT_WRITE ? PG_KW : PG_KR);
! 827: }
! 828: return(virtuell+(count-pstart)+KERNBASE);
! 829: }
! 830:
! 831: boolean_t
! 832: pmap_extract(pmap, va, pap)
! 833: pmap_t pmap;
! 834: vaddr_t va;
! 835: paddr_t *pap;
! 836: {
! 837: paddr_t pa = 0;
! 838: int *pte, sva;
! 839:
! 840: #ifdef PMAPDEBUG
! 841: if(startpmapdebug)printf("pmap_extract: pmap %p, va %lx\n",pmap, va);
! 842: #endif
! 843:
! 844: if (va & KERNBASE) {
! 845: pa = kvtophys(va); /* Is 0 if not mapped */
! 846: *pap = pa;
! 847: return (TRUE);
! 848: }
! 849:
! 850: sva = PG_PFNUM(va);
! 851: if (va < 0x40000000) {
! 852: if (sva > (pmap->pm_p0lr & ~AST_MASK))
! 853: return (FALSE);
! 854: pte = (int *)pmap->pm_p0br;
! 855: } else {
! 856: if (sva < pmap->pm_p1lr)
! 857: return (FALSE);
! 858: pte = (int *)pmap->pm_p1br;
! 859: }
! 860: if ((*kvtopte(&pte[sva]) & PG_FRAME) != 0) {
! 861: *pap = ((pte[sva] & PG_FRAME) << VAX_PGSHIFT);
! 862: return (TRUE);
! 863: }
! 864:
! 865: return (FALSE);
! 866: }
! 867:
! 868: /*
! 869: * Sets protection for a given region to prot. If prot == none then
! 870: * unmap region. pmap_remove is implemented as pmap_protect with
! 871: * protection none.
! 872: */
! 873: void
! 874: pmap_protect(pmap, start, end, prot)
! 875: pmap_t pmap;
! 876: vaddr_t start, end;
! 877: vm_prot_t prot;
! 878: {
! 879: pt_entry_t *pt, *pts, *ptd;
! 880: pt_entry_t pr;
! 881:
! 882: #ifdef PMAPDEBUG
! 883: if(startpmapdebug) printf("pmap_protect: pmap %p, start %lx, end %lx, prot %x\n",
! 884: pmap, start, end,prot);
! 885: #endif
! 886:
! 887: if (pmap == 0)
! 888: return;
! 889:
! 890: RECURSESTART;
! 891: if (start & KERNBASE) { /* System space */
! 892: pt = Sysmap;
! 893: #ifdef DIAGNOSTIC
! 894: if (((end & 0x3fffffff) >> VAX_PGSHIFT) > mfpr(PR_SLR))
! 895: panic("pmap_protect: outside SLR: %lx", end);
! 896: #endif
! 897: start &= ~KERNBASE;
! 898: end &= ~KERNBASE;
! 899: pr = (prot & VM_PROT_WRITE ? PG_KW : PG_KR);
! 900: } else {
! 901: if (start & 0x40000000) { /* P1 space */
! 902: if (end <= pmap->pm_stack) {
! 903: RECURSEEND;
! 904: return;
! 905: }
! 906: if (start < pmap->pm_stack)
! 907: start = pmap->pm_stack;
! 908: pt = pmap->pm_p1br;
! 909: if (((start & 0x3fffffff) >> VAX_PGSHIFT) <
! 910: pmap->pm_p1lr) {
! 911: #ifdef PMAPDEBUG
! 912: panic("pmap_protect: outside P1LR");
! 913: #else
! 914: RECURSEEND;
! 915: return;
! 916: #endif
! 917: }
! 918: start &= 0x3fffffff;
! 919: end = (end == KERNBASE ? end >> 1 : end & 0x3fffffff);
! 920: } else { /* P0 space */
! 921: pt = pmap->pm_p0br;
! 922: if ((end >> VAX_PGSHIFT) >
! 923: (pmap->pm_p0lr & ~AST_MASK)) {
! 924: #ifdef PMAPDEBUG
! 925: panic("pmap_protect: outside P0LR");
! 926: #else
! 927: RECURSEEND;
! 928: return;
! 929: #endif
! 930: }
! 931: }
! 932: pr = (prot & VM_PROT_WRITE ? PG_RW : PG_RO);
! 933: }
! 934: pts = &pt[start >> VAX_PGSHIFT];
! 935: ptd = &pt[end >> VAX_PGSHIFT];
! 936: #ifdef DEBUG
! 937: if (((int)pts - (int)pt) & 7)
! 938: panic("pmap_remove: pts not even");
! 939: if (((int)ptd - (int)pt) & 7)
! 940: panic("pmap_remove: ptd not even");
! 941: #endif
! 942:
! 943: while (pts < ptd) {
! 944: if ((*kvtopte(pts) & PG_FRAME) != 0 && *(int *)pts) {
! 945: if (prot == VM_PROT_NONE) {
! 946: RECURSEEND;
! 947: if ((*(int *)pts & PG_SREF) == 0)
! 948: rensa(*pts, pts);
! 949: RECURSESTART;
! 950: bzero(pts, sizeof(pt_entry_t) * LTOHPN);
! 951: pmap_decpteref(pmap, pts);
! 952: } else {
! 953: pts[0] = (pts[0] & ~PG_PROT) | pr;
! 954: pts[1] = (pts[1] & ~PG_PROT) | pr;
! 955: pts[2] = (pts[2] & ~PG_PROT) | pr;
! 956: pts[3] = (pts[3] & ~PG_PROT) | pr;
! 957: pts[4] = (pts[4] & ~PG_PROT) | pr;
! 958: pts[5] = (pts[5] & ~PG_PROT) | pr;
! 959: pts[6] = (pts[6] & ~PG_PROT) | pr;
! 960: pts[7] = (pts[7] & ~PG_PROT) | pr;
! 961: }
! 962: }
! 963: pts += LTOHPN;
! 964: }
! 965: RECURSEEND;
! 966: mtpr(0,PR_TBIA);
! 967: }
! 968:
! 969: int pmap_simulref(int bits, int addr);
! 970: /*
! 971: * Called from interrupt vector routines if we get a page invalid fault.
! 972: * Note: the save mask must be or'ed with 0x3f for this function.
! 973: * Returns 0 if normal call, 1 if CVAX bug detected.
! 974: */
! 975: int
! 976: pmap_simulref(int bits, int addr)
! 977: {
! 978: pt_entry_t *pte;
! 979: struct vm_page *pg;
! 980: paddr_t pa;
! 981:
! 982: #ifdef PMAPDEBUG
! 983: if (startpmapdebug)
! 984: printf("pmap_simulref: bits %x addr %x\n", bits, addr);
! 985: #endif
! 986: #ifdef DEBUG
! 987: if (bits & 1)
! 988: panic("pte trans len");
! 989: #endif
! 990: /* Set address on logical page boundary */
! 991: addr &= ~PGOFSET;
! 992: /* First decode userspace addr */
! 993: if (addr >= 0) {
! 994: if ((addr << 1) < 0)
! 995: pte = (pt_entry_t *)mfpr(PR_P1BR);
! 996: else
! 997: pte = (pt_entry_t *)mfpr(PR_P0BR);
! 998: pte += PG_PFNUM(addr);
! 999: if (bits & 2) { /* PTE reference */
! 1000: pte = (pt_entry_t *)TRUNC_PAGE(pte);
! 1001: pte = kvtopte(pte);
! 1002: if (pte[0] == 0) /* Check for CVAX bug */
! 1003: return 1;
! 1004: pa = (paddr_t)pte & ~KERNBASE;
! 1005: } else
! 1006: pa = (Sysmap[PG_PFNUM(pte)] & PG_FRAME) << VAX_PGSHIFT;
! 1007: } else {
! 1008: pte = kvtopte(addr);
! 1009: pa = (paddr_t)pte & ~KERNBASE;
! 1010: }
! 1011: pte[0] |= PG_V;
! 1012: pte[1] |= PG_V;
! 1013: pte[2] |= PG_V;
! 1014: pte[3] |= PG_V;
! 1015: pte[4] |= PG_V;
! 1016: pte[5] |= PG_V;
! 1017: pte[6] |= PG_V;
! 1018: pte[7] |= PG_V;
! 1019:
! 1020: pa = trunc_page(pa);
! 1021: pg = PHYS_TO_VM_PAGE(pa);
! 1022: if (pg != NULL) {
! 1023: pg->mdpage.pv_attr |= PG_V; /* Referenced */
! 1024: if (bits & 4) /* (will be) modified. XXX page tables */
! 1025: pg->mdpage.pv_attr |= PG_M;
! 1026: }
! 1027: return 0;
! 1028: }
! 1029:
! 1030: /*
! 1031: * Checks if page is referenced; returns true or false depending on result.
! 1032: */
! 1033: boolean_t
! 1034: pmap_is_referenced(pg)
! 1035: struct vm_page *pg;
! 1036: {
! 1037: #ifdef PMAPDEBUG
! 1038: if (startpmapdebug)
! 1039: printf("pmap_is_referenced: pg %p pv_attr %x\n",
! 1040: pg, pg->mdpage.pv_attr);
! 1041: #endif
! 1042:
! 1043: if (pg->mdpage.pv_attr & PG_V)
! 1044: return 1;
! 1045:
! 1046: return 0;
! 1047: }
! 1048:
! 1049: /*
! 1050: * Clears valid bit in all ptes referenced to this physical page.
! 1051: */
! 1052: boolean_t
! 1053: pmap_clear_reference(pg)
! 1054: struct vm_page *pg;
! 1055: {
! 1056: struct pv_entry *pv;
! 1057: boolean_t ref = FALSE;
! 1058:
! 1059: #ifdef PMAPDEBUG
! 1060: if (startpmapdebug)
! 1061: printf("pmap_clear_reference: pg %p\n", pg);
! 1062: #endif
! 1063:
! 1064: if (pg->mdpage.pv_attr & PG_V)
! 1065: ref = TRUE;
! 1066:
! 1067: pg->mdpage.pv_attr &= ~PG_V;
! 1068:
! 1069: RECURSESTART;
! 1070: for (pv = pg->mdpage.pv_head; pv != NULL; pv = pv->pv_next)
! 1071: if ((pv->pv_pte[0] & PG_W) == 0) {
! 1072: pv->pv_pte[0] &= ~PG_V;
! 1073: pv->pv_pte[1] &= ~PG_V;
! 1074: pv->pv_pte[2] &= ~PG_V;
! 1075: pv->pv_pte[3] &= ~PG_V;
! 1076: pv->pv_pte[4] &= ~PG_V;
! 1077: pv->pv_pte[5] &= ~PG_V;
! 1078: pv->pv_pte[6] &= ~PG_V;
! 1079: pv->pv_pte[7] &= ~PG_V;
! 1080: }
! 1081:
! 1082: RECURSEEND;
! 1083: return ref;
! 1084: }
! 1085:
! 1086: /*
! 1087: * Checks if page is modified; returns true or false depending on result.
! 1088: */
! 1089: boolean_t
! 1090: pmap_is_modified(pg)
! 1091: struct vm_page *pg;
! 1092: {
! 1093: struct pv_entry *pv;
! 1094:
! 1095: #ifdef PMAPDEBUG
! 1096: if (startpmapdebug)
! 1097: printf("pmap_is_modified: pg %p pv_attr %x\n",
! 1098: pg, pg->mdpage.pv_attr);
! 1099: #endif
! 1100:
! 1101: if (pg->mdpage.pv_attr & PG_M)
! 1102: return TRUE;
! 1103:
! 1104: for (pv = pg->mdpage.pv_head; pv != NULL; pv = pv->pv_next)
! 1105: if ((pv->pv_pte[0] | pv->pv_pte[1] | pv->pv_pte[2] |
! 1106: pv->pv_pte[3] | pv->pv_pte[4] | pv->pv_pte[5] |
! 1107: pv->pv_pte[6] | pv->pv_pte[7]) & PG_M)
! 1108: return TRUE;
! 1109:
! 1110: return FALSE;
! 1111: }
! 1112:
! 1113: /*
! 1114: * Clears modify bit in all ptes referenced to this physical page.
! 1115: */
! 1116: boolean_t
! 1117: pmap_clear_modify(pg)
! 1118: struct vm_page *pg;
! 1119: {
! 1120: struct pv_entry *pv;
! 1121: boolean_t rv = FALSE;
! 1122:
! 1123: #ifdef PMAPDEBUG
! 1124: if (startpmapdebug)
! 1125: printf("pmap_clear_modify: pg %p\n", pg);
! 1126: #endif
! 1127: if (pg->mdpage.pv_attr & PG_M)
! 1128: rv = TRUE;
! 1129: pg->mdpage.pv_attr &= ~PG_M;
! 1130:
! 1131: for (pv = pg->mdpage.pv_head; pv != NULL; pv = pv->pv_next)
! 1132: if ((pv->pv_pte[0] | pv->pv_pte[1] | pv->pv_pte[2] |
! 1133: pv->pv_pte[3] | pv->pv_pte[4] | pv->pv_pte[5] |
! 1134: pv->pv_pte[6] | pv->pv_pte[7]) & PG_M) {
! 1135: rv = TRUE;
! 1136:
! 1137: pv->pv_pte[0] &= ~PG_M;
! 1138: pv->pv_pte[1] &= ~PG_M;
! 1139: pv->pv_pte[2] &= ~PG_M;
! 1140: pv->pv_pte[3] &= ~PG_M;
! 1141: pv->pv_pte[4] &= ~PG_M;
! 1142: pv->pv_pte[5] &= ~PG_M;
! 1143: pv->pv_pte[6] &= ~PG_M;
! 1144: pv->pv_pte[7] &= ~PG_M;
! 1145: }
! 1146:
! 1147: return rv;
! 1148: }
! 1149:
! 1150: /*
! 1151: * Lower the permission for all mappings to a given page.
! 1152: * Lower permission can only mean setting protection to either read-only
! 1153: * or none; where none is unmapping of the page.
! 1154: */
! 1155: void
! 1156: pmap_page_protect(pg, prot)
! 1157: struct vm_page *pg;
! 1158: vm_prot_t prot;
! 1159: {
! 1160: pt_entry_t *pt;
! 1161: struct pv_entry *pv, *npv;
! 1162: int s, *g;
! 1163:
! 1164: #ifdef PMAPDEBUG
! 1165: if (startpmapdebug)
! 1166: printf("pmap_page_protect: pg %p, prot %x, ", pg, prot);
! 1167: #endif
! 1168:
! 1169: if (pg->mdpage.pv_head == NULL)
! 1170: return;
! 1171:
! 1172: if (prot == VM_PROT_ALL) /* 'cannot happen' */
! 1173: return;
! 1174:
! 1175: RECURSESTART;
! 1176: if (prot == VM_PROT_NONE) {
! 1177: s = splvm();
! 1178: npv = pg->mdpage.pv_head;
! 1179: pg->mdpage.pv_head = NULL;
! 1180: while ((pv = npv) != NULL) {
! 1181: npv = pv->pv_next;
! 1182: g = (int *)pv->pv_pte;
! 1183: if ((pg->mdpage.pv_attr & (PG_V|PG_M)) != (PG_V|PG_M))
! 1184: pg->mdpage.pv_attr |=
! 1185: g[0]|g[1]|g[2]|g[3]|g[4]|g[5]|g[6]|g[7];
! 1186: bzero(g, sizeof(pt_entry_t) * LTOHPN);
! 1187: pv->pv_pmap->pm_stats.resident_count--;
! 1188: pmap_decpteref(pv->pv_pmap, pv->pv_pte);
! 1189: free_pventry(pv);
! 1190: }
! 1191: splx(s);
! 1192: } else { /* read-only */
! 1193: for (pv = pg->mdpage.pv_head; pv != NULL; pv = pv->pv_next) {
! 1194: pt_entry_t pr;
! 1195:
! 1196: pt = pv->pv_pte;
! 1197: pr = (vaddr_t)pv->pv_pte < ptemapstart ?
! 1198: PG_KR : PG_RO;
! 1199:
! 1200: pt[0] = (pt[0] & ~PG_PROT) | pr;
! 1201: pt[1] = (pt[1] & ~PG_PROT) | pr;
! 1202: pt[2] = (pt[2] & ~PG_PROT) | pr;
! 1203: pt[3] = (pt[3] & ~PG_PROT) | pr;
! 1204: pt[4] = (pt[4] & ~PG_PROT) | pr;
! 1205: pt[5] = (pt[5] & ~PG_PROT) | pr;
! 1206: pt[6] = (pt[6] & ~PG_PROT) | pr;
! 1207: pt[7] = (pt[7] & ~PG_PROT) | pr;
! 1208: }
! 1209: }
! 1210: RECURSEEND;
! 1211: mtpr(0, PR_TBIA);
! 1212: }
! 1213:
! 1214: /*
! 1215: * Activate the address space for the specified process.
! 1216: * Note that if the process to activate is the current process, then
! 1217: * the processor internal registers must also be loaded; otherwise
! 1218: * the current process will have wrong pagetables.
! 1219: */
! 1220: void
! 1221: pmap_activate(p)
! 1222: struct proc *p;
! 1223: {
! 1224: pmap_t pmap;
! 1225: struct pcb *pcb;
! 1226:
! 1227: #ifdef PMAPDEBUG
! 1228: if(startpmapdebug) printf("pmap_activate: p %p\n", p);
! 1229: #endif
! 1230:
! 1231: pmap = p->p_vmspace->vm_map.pmap;
! 1232: pcb = &p->p_addr->u_pcb;
! 1233:
! 1234: pcb->P0BR = pmap->pm_p0br;
! 1235: pcb->P0LR = pmap->pm_p0lr;
! 1236: pcb->P1BR = pmap->pm_p1br;
! 1237: pcb->P1LR = pmap->pm_p1lr;
! 1238:
! 1239: if (p == curproc) {
! 1240: mtpr(pmap->pm_p0br, PR_P0BR);
! 1241: mtpr(pmap->pm_p0lr, PR_P0LR);
! 1242: mtpr(pmap->pm_p1br, PR_P1BR);
! 1243: mtpr(pmap->pm_p1lr, PR_P1LR);
! 1244: }
! 1245: mtpr(0, PR_TBIA);
! 1246: }
CVSweb