Annotation of sys/arch/hppa64/hppa64/pmap.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: pmap.c,v 1.5 2007/04/13 18:57:49 art Exp $ */
2:
3: /*
4: * Copyright (c) 2005 Michael Shalayeff
5: * All rights reserved.
6: *
7: * Permission to use, copy, modify, and distribute this software for any
8: * purpose with or without fee is hereby granted, provided that the above
9: * copyright notice and this permission notice appear in all copies.
10: *
11: * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12: * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13: * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14: * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15: * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER IN
16: * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
17: * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18: */
19:
20: #define PMAPDEBUG
21:
22: #include <sys/param.h>
23: #include <sys/systm.h>
24: #include <sys/lock.h>
25: #include <sys/user.h>
26: #include <sys/proc.h>
27: #include <sys/pool.h>
28: #include <sys/extent.h>
29:
30: #include <uvm/uvm.h>
31:
32: #include <machine/iomod.h>
33:
34: #include <dev/rndvar.h>
35:
36: #ifdef PMAPDEBUG
37: #define DPRINTF(l,s) do { \
38: if ((pmapdebug & (l)) == (l)) \
39: printf s; \
40: } while(0)
41: #define PDB_FOLLOW 0x00000001
42: #define PDB_INIT 0x00000002
43: #define PDB_ENTER 0x00000004
44: #define PDB_REMOVE 0x00000008
45: #define PDB_CREATE 0x00000010
46: #define PDB_PTPAGE 0x00000020
47: #define PDB_CACHE 0x00000040
48: #define PDB_BITS 0x00000080
49: #define PDB_COLLECT 0x00000100
50: #define PDB_PROTECT 0x00000200
51: #define PDB_EXTRACT 0x00000400
52: #define PDB_VP 0x00000800
53: #define PDB_PV 0x00001000
54: #define PDB_PARANOIA 0x00002000
55: #define PDB_WIRING 0x00004000
56: #define PDB_PMAP 0x00008000
57: #define PDB_STEAL 0x00010000
58: #define PDB_PHYS 0x00020000
59: #define PDB_POOL 0x00040000
60: int pmapdebug = 0
61: | PDB_INIT
62: /* | PDB_FOLLOW */
63: /* | PDB_VP */
64: /* | PDB_PV */
65: /* | PDB_ENTER */
66: /* | PDB_REMOVE */
67: /* | PDB_STEAL */
68: /* | PDB_PROTECT */
69: /* | PDB_PHYS */
70: ;
71: #else
72: #define DPRINTF(l,s) /* */
73: #endif
74:
75: paddr_t physical_steal, physical_end;
76:
77: struct pmap kernel_pmap_store;
78: struct pool pmap_pmap_pool;
79: struct pool pmap_pv_pool;
80: int pmap_pvlowat = 252;
81: int pmap_initialized;
82: int pmap_nkpdes = 32;
83:
84: pt_entry_t hppa_prot[8];
85: #define pmap_prot(m,vp) (hppa_prot[(vp)] | ((m) == pmap_kernel()? 0 : PTE_USER))
86:
87: pt_entry_t kernel_ptes[] = {
88: PTE_EXEC | PTE_ORDER | PTE_PREDICT | PTE_WIRED |
89: TLB_PAGE(0x000000) | PTE_PG4M,
90: PTE_WRITE | PTE_ORDER | PTE_DIRTY | PTE_WIRED |
91: TLB_PAGE(0x400000) | PTE_PG4M,
92: PTE_WRITE | PTE_ORDER | PTE_DIRTY | PTE_WIRED |
93: TLB_PAGE(0x800000) | PTE_PG4M,
94: PTE_WRITE | PTE_ORDER | PTE_DIRTY | PTE_WIRED |
95: TLB_PAGE(0xc00000) | PTE_PG4M
96: };
97:
98: #define pmap_pvh_attrs(a) \
99: (((a) & PTE_DIRTY) | ((a) ^ PTE_REFTRAP))
100:
101: struct vm_page *
102: pmap_pagealloc(int wait)
103: {
104: struct vm_page *pg;
105:
106: if ((pg = uvm_pagealloc(NULL, 0, NULL,
107: UVM_PGA_USERESERVE | UVM_PGA_ZERO)) == NULL)
108: printf("pmap_pagealloc fail\n");
109:
110: return (pg);
111: }
112:
113: volatile pt_entry_t *
114: pmap_pde_get(volatile u_int32_t *pd, vaddr_t va)
115: {
116: int i;
117:
118: DPRINTF(PDB_FOLLOW|PDB_VP,
119: ("pmap_pde_get(%p, 0x%lx)\n", pd, va));
120:
121: i = (va & PIE_MASK) >> PIE_SHIFT;
122: if (i) {
123: pd = (volatile u_int32_t *)((u_int64_t)pd[i] << PAGE_SHIFT);
124:
125: if (!pd)
126: return (NULL);
127: } else
128: pd += PAGE_SIZE / sizeof(*pd);
129:
130: i = (va & PDE_MASK) >> PDE_SHIFT;
131: return (pt_entry_t *)((u_int64_t)pd[i] << PAGE_SHIFT);
132: }
133:
134: void
135: pmap_pde_set(struct pmap *pm, vaddr_t va, paddr_t ptp)
136: {
137: volatile u_int32_t *pd = pm->pm_pdir;
138: int i;
139:
140: DPRINTF(PDB_FOLLOW|PDB_VP,
141: ("pmap_pde_set(%p, 0x%lx, 0x%lx)\n", pm, va, ptp));
142:
143: i = (va & PIE_MASK) >> PIE_SHIFT;
144: if (i)
145: pd = (volatile u_int32_t *)((u_int64_t)pd[i] << PAGE_SHIFT);
146: else
147: pd += PAGE_SIZE / sizeof(*pd);
148:
149: i = (va & PDE_MASK) >> PDE_SHIFT;
150: pd[i] = ptp >> PAGE_SHIFT;
151: }
152:
153: pt_entry_t *
154: pmap_pde_alloc(struct pmap *pm, vaddr_t va, struct vm_page **pdep)
155: {
156: struct vm_page *pg;
157: paddr_t pa;
158:
159: DPRINTF(PDB_FOLLOW|PDB_VP,
160: ("pmap_pde_alloc(%p, 0x%lx, %p)\n", pm, va, pdep));
161:
162: if ((pg = pmap_pagealloc(0)) == NULL)
163: return (NULL);
164:
165: pa = VM_PAGE_TO_PHYS(pg);
166:
167: DPRINTF(PDB_FOLLOW|PDB_VP, ("pmap_pde_alloc: pde %lx\n", pa));
168:
169: atomic_clearbits_int(&pg->pg_flags, PG_BUSY);
170: pg->wire_count = 1; /* no mappings yet */
171: pmap_pde_set(pm, va, pa);
172: pm->pm_stats.resident_count++; /* count PTP as resident */
173: pm->pm_ptphint = pg;
174: if (pdep)
175: *pdep = pg;
176: return ((pt_entry_t *)pa);
177: }
178:
179: static __inline struct vm_page *
180: pmap_pde_ptp(struct pmap *pm, volatile pt_entry_t *pde)
181: {
182: paddr_t pa = (paddr_t)pde;
183:
184: DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_pde_ptp(%p, %p)\n", pm, pde));
185:
186: if (pm->pm_ptphint && VM_PAGE_TO_PHYS(pm->pm_ptphint) == pa)
187: return (pm->pm_ptphint);
188:
189: DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_pde_ptp: lookup 0x%lx\n", pa));
190:
191: return (PHYS_TO_VM_PAGE(pa));
192: }
193:
194: static __inline void
195: pmap_pde_release(struct pmap *pmap, vaddr_t va, struct vm_page *ptp)
196: {
197: DPRINTF(PDB_FOLLOW|PDB_PV,
198: ("pmap_pde_release(%p, 0x%lx, %p)\n", pmap, va, ptp));
199:
200: if (pmap != pmap_kernel() && --ptp->wire_count <= 1) {
201: DPRINTF(PDB_FOLLOW|PDB_PV,
202: ("pmap_pde_release: disposing ptp %p\n", ptp));
203: pmap_pde_set(pmap, va, 0);
204: pmap->pm_stats.resident_count--;
205: if (pmap->pm_ptphint == ptp)
206: pmap->pm_ptphint = NULL;
207: ptp->wire_count = 0;
208: #ifdef DIAGNOSTIC
209: if (ptp->pg_flags & PG_BUSY)
210: panic("pmap_pde_release: busy page table page");
211: #endif
212: pdcache(HPPA_SID_KERNEL, (vaddr_t)ptp, PAGE_SIZE);
213: uvm_pagefree(ptp);
214: }
215: }
216:
217: static __inline pt_entry_t
218: pmap_pte_get(volatile pt_entry_t *pde, vaddr_t va)
219: {
220: DPRINTF(PDB_FOLLOW|PDB_VP,
221: ("pmap_pte_get(%p, 0x%lx)\n", pde, va));
222:
223: return (pde[(va & PTE_MASK) >> PTE_SHIFT]);
224: }
225:
226: static __inline void
227: pmap_pte_set(volatile pt_entry_t *pde, vaddr_t va, pt_entry_t pte)
228: {
229: DPRINTF(PDB_FOLLOW|PDB_VP,
230: ("pmap_pte_set(%p, 0x%lx, 0x%lx)\n", pde, va, pte));
231:
232: pde[(va & PTE_MASK) >> PTE_SHIFT] = pte;
233: }
234:
235: void
236: pmap_pte_flush(struct pmap *pmap, vaddr_t va, pt_entry_t pte)
237: {
238: if (pte & PTE_EXEC) {
239: ficache(pmap->pm_space, va, PAGE_SIZE);
240: pitlb(pmap->pm_space, va);
241: }
242: fdcache(pmap->pm_space, va, PAGE_SIZE);
243: pdtlb(pmap->pm_space, va);
244: }
245:
246: static __inline pt_entry_t
247: pmap_vp_find(struct pmap *pm, vaddr_t va)
248: {
249: volatile pt_entry_t *pde;
250:
251: if (!(pde = pmap_pde_get(pm->pm_pdir, va)))
252: return (0);
253:
254: return (pmap_pte_get(pde, va));
255: }
256:
257: #ifdef DDB
258: void
259: pmap_dump_table(pa_space_t space, vaddr_t sva)
260: {
261: pa_space_t sp;
262: volatile pt_entry_t *pde;
263: volatile u_int32_t *pd;
264: pt_entry_t pte;
265: vaddr_t va, pdemask;
266:
267: if (space)
268: pd = (u_int32_t *)mfctl(CR_VTOP);
269: else
270: pd = pmap_kernel()->pm_pdir;
271:
272: for (pdemask = 1, va = sva ? sva : 0;
273: va < VM_MAX_ADDRESS; va += PAGE_SIZE) {
274: if (pdemask != (va & (PDE_MASK|PIE_MASK))) {
275: pdemask = va & (PDE_MASK|PIE_MASK);
276: if (!(pde = pmap_pde_get(pd, va))) {
277: va += ~PDE_MASK + 1 - PAGE_SIZE;
278: continue;
279: }
280: printf("%x:%8p:\n", sp, pde);
281: }
282:
283: if (!(pte = pmap_pte_get(pde, va)))
284: continue;
285:
286: printf("0x%08lx-0x%08lx:%b\n",
287: va, PTE_PAGE(pte), PTE_GETBITS(pte), PTE_BITS);
288: }
289: }
290:
291: void
292: pmap_dump_pv(paddr_t pa)
293: {
294: struct vm_page *pg;
295: struct pv_entry *pve;
296:
297: pg = PHYS_TO_VM_PAGE(pa);
298: simple_lock(&pg->mdpage.pvh_lock);
299: for(pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next)
300: printf("%x:%lx\n", pve->pv_pmap->pm_space, pve->pv_va);
301: simple_unlock(&pg->mdpage.pvh_lock);
302: }
303: #endif
304:
305: #ifdef PMAPDEBUG
306: int
307: pmap_check_alias(struct pv_entry *pve, vaddr_t va, pt_entry_t pte)
308: {
309: int ret;
310:
311: /* check for non-equ aliased mappings */
312: for (ret = 0; pve; pve = pve->pv_next) {
313: pte |= pmap_vp_find(pve->pv_pmap, pve->pv_va);
314: if ((va & HPPA_PGAOFF) != (pve->pv_va & HPPA_PGAOFF) &&
315: (pte & PTE_WRITE)) {
316: printf("pmap_check_alias: "
317: "aliased writable mapping 0x%x:0x%lx\n",
318: pve->pv_pmap->pm_space, pve->pv_va);
319: ret++;
320: }
321: }
322:
323: return (ret);
324: }
325: #endif
326:
327: static __inline struct pv_entry *
328: pmap_pv_alloc(void)
329: {
330: struct pv_entry *pv;
331:
332: DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_pv_alloc()\n"));
333:
334: pv = pool_get(&pmap_pv_pool, PR_NOWAIT);
335:
336: DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_pv_alloc: %p\n", pv));
337:
338: return (pv);
339: }
340:
341: void
342: pmap_pv_free(struct pv_entry *pv)
343: {
344: if (pv->pv_ptp)
345: pmap_pde_release(pv->pv_pmap, pv->pv_va, pv->pv_ptp);
346:
347: pool_put(&pmap_pv_pool, pv);
348: }
349:
350: void
351: pmap_pv_enter(struct vm_page *pg, struct pv_entry *pve, struct pmap *pm,
352: vaddr_t va, struct vm_page *pdep)
353: {
354: DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_pv_enter(%p, %p, %p, 0x%lx, %p)\n",
355: pg, pve, pm, va, pdep));
356: pve->pv_pmap = pm;
357: pve->pv_va = va;
358: pve->pv_ptp = pdep;
359: pve->pv_next = pg->mdpage.pvh_list;
360: pg->mdpage.pvh_list = pve;
361: #ifdef PMAPDEBUG
362: if (pmap_check_alias(pve, va, 0))
363: Debugger();
364: #endif
365: }
366:
367: struct pv_entry *
368: pmap_pv_remove(struct vm_page *pg, struct pmap *pmap, vaddr_t va)
369: {
370: struct pv_entry **pve, *pv;
371:
372: DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_pv_remove(%p, %p, 0x%lx)\n",
373: pg, pmap, va));
374:
375: simple_lock(&pg->mdpage.pvh_lock); /* lock pv_head */
376: for(pv = *(pve = &pg->mdpage.pvh_list);
377: pv; pv = *(pve = &(*pve)->pv_next))
378: if (pv->pv_pmap == pmap && pv->pv_va == va) {
379: *pve = pv->pv_next;
380: break;
381: }
382: simple_unlock(&pg->mdpage.pvh_lock); /* unlock, done! */
383: return (pv);
384: }
385:
386: const pt_entry_t hppa_pgs[] = {
387: PTE_PG4K,
388: PTE_PG16K,
389: PTE_PG64K,
390: PTE_PG256K,
391: PTE_PG1M,
392: PTE_PG4M,
393: PTE_PG16M,
394: PTE_PG64M
395: };
396: #define nhppa_pgs sizeof(hppa_pgs)/sizeof(hppa_pgs[0])
397:
398: void
399: pmap_maphys(paddr_t spa, paddr_t epa)
400: {
401: volatile pt_entry_t *pde, *epde, pte;
402: paddr_t pa, tpa;
403: int s, e, i;
404:
405: DPRINTF(PDB_INIT, ("pmap_maphys: mapping 0x%lx - 0x%lx\n", spa, epa));
406:
407: s = ffs(spa) - 12;
408: e = ffs(epa) - 12;
409:
410: if (s < e || (s == e && s / 2 < nhppa_pgs)) {
411: i = s / 2;
412: if (i > nhppa_pgs)
413: i = nhppa_pgs;
414: pa = spa;
415: spa = tpa = 0x1000 << ((i + 1) * 2);
416: } else if (s > e) {
417: i = e / 2;
418: if (i > nhppa_pgs)
419: i = nhppa_pgs;
420: epa = pa = epa & (0xfffff000 << ((i + 1) * 2));
421: tpa = epa;
422: } else {
423: i = s / 2;
424: if (i > nhppa_pgs)
425: i = nhppa_pgs;
426: pa = spa;
427: spa = tpa = epa;
428: }
429:
430: printf("pa 0x%lx tpa 0x%lx\n", pa, tpa);
431: while (pa < tpa) {
432: pte = TLB_PAGE(pa) | hppa_pgs[i - 1] |
433: PTE_WRITE | PTE_ORDER | PTE_DIRTY | PTE_WIRED;
434: pde = pmap_pde_get(pmap_kernel()->pm_pdir, pa);
435: epde = pde + (PTE_MASK >> PTE_SHIFT) + 1;
436: if (pa + (PTE_MASK + (1 << PTE_SHIFT)) > tpa)
437: epde = pde + ((tpa & PTE_MASK) >> PTE_SHIFT);
438: printf("pde %p epde %p pte 0x%lx\n", pde, epde, pte);
439: for (pde += (pa & PTE_MASK) >> PTE_SHIFT; pde < epde;)
440: *pde++ = pte;
441: pa += PTE_MASK + (1 << PTE_SHIFT);
442: pa &= ~(PTE_MASK | PAGE_MASK);
443: }
444:
445: if (spa < epa)
446: pmap_maphys(spa, epa);
447: }
448:
449: void
450: pmap_bootstrap(vstart)
451: vaddr_t vstart;
452: {
453: extern int resvphysmem, __rodata_end, __data_start;
454: vaddr_t va, eaddr, addr = round_page(vstart);
455: struct pmap *kpm;
456:
457: DPRINTF(PDB_FOLLOW|PDB_INIT, ("pmap_bootstrap(0x%lx)\n", vstart));
458:
459: uvm_setpagesize();
460:
461: hppa_prot[UVM_PROT_NONE] = PTE_ORDER|PTE_ACC_NONE;
462: hppa_prot[UVM_PROT_READ] = PTE_ORDER|PTE_READ;
463: hppa_prot[UVM_PROT_WRITE] = PTE_ORDER|PTE_WRITE;
464: hppa_prot[UVM_PROT_RW] = PTE_ORDER|PTE_READ|PTE_WRITE;
465: hppa_prot[UVM_PROT_EXEC] = PTE_ORDER|PTE_EXEC;
466: hppa_prot[UVM_PROT_RX] = PTE_ORDER|PTE_READ|PTE_EXEC;
467: hppa_prot[UVM_PROT_WX] = PTE_ORDER|PTE_WRITE|PTE_EXEC;
468: hppa_prot[UVM_PROT_RWX] = PTE_ORDER|PTE_READ|PTE_WRITE|PTE_EXEC;
469:
470: /*
471: * Initialize kernel pmap
472: */
473: kpm = &kernel_pmap_store;
474: bzero(kpm, sizeof(*kpm));
475: simple_lock_init(&kpm->pm_lock);
476: kpm->pm_refcount = 1;
477: kpm->pm_space = HPPA_SID_KERNEL;
478: TAILQ_INIT(&kpm->pm_pglist);
479: kpm->pm_pdir = (u_int32_t *)mfctl(CR_VTOP);
480:
481: /*
482: * Allocate various tables and structures.
483: */
484:
485: if (&__rodata_end < &__data_start) {
486: physical_steal = (vaddr_t)&__rodata_end;
487: physical_end = (vaddr_t)&__data_start;
488: DPRINTF(PDB_INIT, ("physpool: 0x%lx @ 0x%lx\n",
489: physical_end - physical_steal, physical_steal));
490: }
491:
492: /* map enough PDEs to map initial physmem */
493: for (va = 0x1000000, eaddr = ptoa(physmem);
494: va < eaddr; addr += PAGE_SIZE, va += 1 << PDE_SHIFT) {
495: bzero((void *)addr, PAGE_SIZE);
496: pmap_pde_set(kpm, va, addr);
497: kpm->pm_stats.resident_count++; /* count PTP as resident */
498: }
499:
500: /* map a little of initial kmem */
501: for (va = VM_MIN_KERNEL_ADDRESS + ((pmap_nkpdes - 1) << PDE_SHIFT);
502: va >= VM_MIN_KERNEL_ADDRESS;
503: addr += PAGE_SIZE, va -= 1 << PDE_SHIFT) {
504: bzero((void *)addr, PAGE_SIZE);
505: pmap_pde_set(kpm, va, addr);
506: kpm->pm_stats.resident_count++; /* count PTP as resident */
507: }
508:
509: pmap_maphys(0x1000000, ctob(physmem));
510:
511: eaddr = physmem - atop(round_page(MSGBUFSIZE));
512: resvphysmem = atop(addr);
513: DPRINTF(PDB_INIT, ("physmem: 0x%lx - 0x%lx\n", resvphysmem, eaddr));
514: uvm_page_physload(0, physmem,
515: resvphysmem, eaddr, VM_FREELIST_DEFAULT);
516: }
517:
518: void
519: pmap_init()
520: {
521: DPRINTF(PDB_FOLLOW|PDB_INIT, ("pmap_init()\n"));
522:
523: pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
524: &pool_allocator_nointr);
525: pool_init(&pmap_pv_pool, sizeof(struct pv_entry),0,0,0, "pmappv", NULL);
526: pool_setlowat(&pmap_pv_pool, pmap_pvlowat);
527: pool_sethiwat(&pmap_pv_pool, pmap_pvlowat * 32);
528:
529: pmap_initialized = 1;
530:
531: DPRINTF(PDB_FOLLOW|PDB_INIT, ("pmap_init(): done\n"));
532: }
533:
534: #ifdef PMAP_STEAL_MEMORY
535: vaddr_t
536: pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
537: {
538: vaddr_t va;
539: int npg;
540:
541: DPRINTF(PDB_FOLLOW|PDB_PHYS,
542: ("pmap_steal_memory(0x%lx, %p, %p)\n", size, vstartp, vendp));
543:
544: size = round_page(size);
545: npg = atop(size);
546:
547: if (vm_physmem[0].avail_end - vm_physmem[0].avail_start < npg)
548: panic("pmap_steal_memory: no more");
549:
550: if (vstartp)
551: *vstartp = VM_MIN_KERNEL_ADDRESS;
552: if (vendp)
553: *vendp = VM_MAX_KERNEL_ADDRESS;
554:
555: vm_physmem[0].end -= npg;
556: vm_physmem[0].avail_end -= npg;
557: va = ptoa(vm_physmem[0].avail_end) - size;
558: bzero((void *)va, size);
559:
560: DPRINTF(PDB_FOLLOW|PDB_PHYS, ("pmap_steal_memory: 0x%lx\n", va));
561:
562: return (va);
563: }
564: #else
565: void
566: pmap_virtual_space(vaddr_t *startp, vaddr_t *endp)
567: {
568: *startp = VM_MIN_KERNEL_ADDRESS;
569: *endp = VM_MAX_KERNEL_ADDRESS;
570: }
571: #endif /* PMAP_STEAL_MEMORY */
572:
573: #ifdef PMAP_GROWKERNEL
574: vaddr_t
575: pmap_growkernel(vaddr_t kva)
576: {
577: vaddr_t va;
578:
579: DPRINTF(PDB_FOLLOW|PDB_PHYS, ("pmap_growkernel(0x%lx)\n", kva));
580:
581: va = VM_MIN_KERNEL_ADDRESS + (pmap_nkpdes << PDE_SHIFT);
582: DPRINTF(PDB_PHYS, ("pmap_growkernel: was va 0x%lx\n", va));
583: if (va < kva) {
584: simple_lock(&pmap_kernel()->pm_obj.vmobjlock);
585:
586: for ( ; va < kva ; pmap_nkpdes++, va += 1 << PDE_SHIFT)
587: if (uvm.page_init_done) {
588: if (!pmap_pde_alloc(pmap_kernel(), va, NULL))
589: break;
590: } else {
591: paddr_t pa;
592:
593: pa = pmap_steal_memory(PAGE_SIZE, NULL, NULL);
594: if (pa)
595: panic("pmap_growkernel: out of memory");
596: pmap_pde_set(pmap_kernel(), va, pa);
597: pmap_kernel()->pm_stats.resident_count++;
598: }
599:
600: simple_unlock(&pmap_kernel()->pm_obj.vmobjlock);
601: }
602: DPRINTF(PDB_PHYS|PDB_VP, ("pmap_growkernel: now va 0x%lx\n", va));
603: return (va);
604: }
605: #endif /* PMAP_GROWKERNEL */
606:
607: struct pmap *
608: pmap_create()
609: {
610: struct pmap *pmap;
611: struct vm_page *pg;
612: pa_space_t space;
613: paddr_t pa;
614:
615: DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_create()\n"));
616:
617: pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
618:
619: simple_lock_init(&pmap->pm_lock);
620: pmap->pm_refcount = 1;
621: pmap->pm_ptphint = NULL;
622:
623: TAILQ_INIT(&pmap->pm_pglist);
624: if (uvm_pglistalloc(2 * PAGE_SIZE, 0, VM_MIN_KERNEL_ADDRESS,
625: PAGE_SIZE, 2 * PAGE_SIZE, &pmap->pm_pglist, 1, 1))
626: panic("pmap_create: no pages");
627:
628: pg = TAILQ_FIRST(&pmap->pm_pglist);
629: atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_CLEAN);
630: pmap->pm_pdir = (u_int32_t *)(pa = VM_PAGE_TO_PHYS(pg));
631: bzero((void *)pa, PAGE_SIZE);
632:
633: /* set the first PIE that's covering low 2g of the address space */
634: pg = TAILQ_LAST(&pmap->pm_pglist, pglist);
635: atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_CLEAN);
636: *pmap->pm_pdir = (pa = VM_PAGE_TO_PHYS(pg)) >> PAGE_SHIFT;
637: bzero((void *)pa, PAGE_SIZE);
638:
639: /* TODO for (space = 1 + (arc4random() & HPPA_SID_MAX);
640: pmap_sdir_get(space); space = (space + 1) % HPPA_SID_MAX); */
641: pmap->pm_space = space;
642:
643: pmap->pm_stats.resident_count = 2;
644: pmap->pm_stats.wired_count = 0;
645:
646: return (pmap);
647: }
648:
649: void
650: pmap_destroy(pmap)
651: struct pmap *pmap;
652: {
653: int refs;
654:
655: DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_destroy(%p)\n", pmap));
656:
657: simple_lock(&pmap->pm_lock);
658: refs = --pmap->pm_refcount;
659: simple_unlock(&pmap->pm_lock);
660:
661: if (refs > 0)
662: return;
663:
664: uvm_pglistfree(&pmap->pm_pglist);
665: TAILQ_INIT(&pmap->pm_pglist);
666: pool_put(&pmap_pmap_pool, pmap);
667: }
668:
669: /*
670: * Add a reference to the specified pmap.
671: */
672: void
673: pmap_reference(struct pmap *pmap)
674: {
675: DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_reference(%p)\n", pmap));
676:
677: simple_lock(&pmap->pm_lock);
678: pmap->pm_refcount++;
679: simple_unlock(&pmap->pm_lock);
680: }
681:
682: void
683: pmap_collect(struct pmap *pmap)
684: {
685: DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_collect(%p)\n", pmap));
686: /* nothing yet */
687: }
688:
689: int
690: pmap_enter(pmap, va, pa, prot, flags)
691: struct pmap *pmap;
692: vaddr_t va;
693: paddr_t pa;
694: vm_prot_t prot;
695: int flags;
696: {
697: volatile pt_entry_t *pde;
698: pt_entry_t pte;
699: struct vm_page *pg, *ptp = NULL;
700: struct pv_entry *pve;
701: boolean_t wired = (flags & PMAP_WIRED) != 0;
702:
703: DPRINTF(PDB_FOLLOW|PDB_ENTER,
704: ("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
705: pmap, va, pa, prot, flags));
706:
707: simple_lock(&pmap->pm_lock);
708:
709: if (!(pde = pmap_pde_get(pmap->pm_pdir, va)) &&
710: !(pde = pmap_pde_alloc(pmap, va, &ptp))) {
711: if (flags & PMAP_CANFAIL) {
712: simple_unlock(&pmap->pm_lock);
713: return (ENOMEM);
714: }
715:
716: panic("pmap_enter: cannot allocate pde");
717: }
718:
719: if (!ptp)
720: ptp = pmap_pde_ptp(pmap, pde);
721:
722: if ((pte = pmap_pte_get(pde, va))) {
723:
724: DPRINTF(PDB_ENTER,
725: ("pmap_enter: remapping 0x%lx -> 0x%lx\n", pte, pa));
726:
727: pmap_pte_flush(pmap, va, pte);
728: if (wired && !(pte & PTE_WIRED))
729: pmap->pm_stats.wired_count++;
730: else if (!wired && (pte & PTE_WIRED))
731: pmap->pm_stats.wired_count--;
732: pte &= PTE_UNCACHABLE|PTE_DIRTY|PTE_REFTRAP;
733:
734: if (PTE_PAGE(pte) == pa) {
735: DPRINTF(PDB_FOLLOW|PDB_ENTER,
736: ("pmap_enter: same page\n"));
737: goto enter;
738: }
739:
740: pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
741: simple_lock(&pg->mdpage.pvh_lock);
742: pve = pmap_pv_remove(pg, pmap, va);
743: pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
744: } else {
745: DPRINTF(PDB_ENTER,
746: ("pmap_enter: new mapping 0x%lx -> 0x%lx\n", va, pa));
747: pte = PTE_REFTRAP;
748: pve = NULL;
749: pmap->pm_stats.resident_count++;
750: if (wired)
751: pmap->pm_stats.wired_count++;
752: if (ptp)
753: ptp->wire_count++;
754: simple_lock(&pg->mdpage.pvh_lock);
755: }
756:
757: if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(pa))) {
758: if (!pve && !(pve = pmap_pv_alloc())) {
759: if (flags & PMAP_CANFAIL) {
760: simple_unlock(&pg->mdpage.pvh_lock);
761: simple_unlock(&pmap->pm_lock);
762: return (ENOMEM);
763: }
764: panic("pmap_enter: no pv entries available");
765: }
766: pmap_pv_enter(pg, pve, pmap, va, ptp);
767: } else if (pve)
768: pmap_pv_free(pve);
769: simple_unlock(&pg->mdpage.pvh_lock);
770:
771: enter:
772: /* preserve old ref & mod */
773: pte = TLB_PAGE(pa) | pmap_prot(pmap, prot);
774: if (wired)
775: pte |= PTE_WIRED;
776: pmap_pte_set(pde, va, pte);
777:
778: simple_unlock(&pmap->pm_lock);
779:
780: DPRINTF(PDB_FOLLOW|PDB_ENTER, ("pmap_enter: leaving\n"));
781:
782: return (0);
783: }
784:
785: void
786: pmap_remove(pmap, sva, eva)
787: struct pmap *pmap;
788: vaddr_t sva;
789: vaddr_t eva;
790: {
791: struct pv_entry *pve;
792: volatile pt_entry_t *pde;
793: pt_entry_t pte;
794: struct vm_page *pg;
795: vaddr_t pdemask;
796: int batch;
797:
798: DPRINTF(PDB_FOLLOW|PDB_REMOVE,
799: ("pmap_remove(%p, 0x%lx, 0x%lx)\n", pmap, sva, eva));
800:
801: simple_lock(&pmap->pm_lock);
802:
803: for (batch = 0, pdemask = 1; sva < eva; sva += PAGE_SIZE) {
804: if (pdemask != (sva & PDE_MASK)) {
805: pdemask = sva & PDE_MASK;
806: if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) {
807: sva += ~PDE_MASK + 1 - PAGE_SIZE;
808: continue;
809: }
810: batch = pdemask == sva && sva + ~PDE_MASK + 1 <= eva;
811: }
812:
813: if ((pte = pmap_pte_get(pde, sva))) {
814:
815: /* TODO measure here the speed tradeoff
816: * for flushing whole PT vs per-page
817: * in case of non-complete pde fill
818: */
819: pmap_pte_flush(pmap, sva, pte);
820: if (pte & PTE_WIRED)
821: pmap->pm_stats.wired_count--;
822: pmap->pm_stats.resident_count--;
823:
824: /* iff properly accounted pde will be dropped anyway */
825: if (!batch)
826: pmap_pte_set(pde, sva, 0);
827:
828: if (pmap_initialized &&
829: (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) {
830:
831: simple_lock(&pg->mdpage.pvh_lock);
832: pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
833: if ((pve = pmap_pv_remove(pg, pmap, sva)))
834: pmap_pv_free(pve);
835: simple_unlock(&pg->mdpage.pvh_lock);
836: }
837: }
838: }
839:
840: simple_unlock(&pmap->pm_lock);
841:
842: DPRINTF(PDB_FOLLOW|PDB_REMOVE, ("pmap_remove: leaving\n"));
843: }
844:
845: void
846: pmap_write_protect(pmap, sva, eva, prot)
847: struct pmap *pmap;
848: vaddr_t sva;
849: vaddr_t eva;
850: vm_prot_t prot;
851: {
852: struct vm_page *pg;
853: volatile pt_entry_t *pde;
854: pt_entry_t pte;
855: u_int tlbprot, pdemask;
856:
857: DPRINTF(PDB_FOLLOW|PDB_PMAP,
858: ("pmap_write_protect(%p, %lx, %lx, %x)\n", pmap, sva, eva, prot));
859:
860: sva = trunc_page(sva);
861: tlbprot = pmap_prot(pmap, prot);
862:
863: simple_lock(&pmap->pm_lock);
864:
865: for (pdemask = 1; sva < eva; sva += PAGE_SIZE) {
866: if (pdemask != (sva & PDE_MASK)) {
867: pdemask = sva & PDE_MASK;
868: if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) {
869: sva += ~PDE_MASK + 1 - PAGE_SIZE;
870: continue;
871: }
872: }
873: if ((pte = pmap_pte_get(pde, sva))) {
874:
875: DPRINTF(PDB_PMAP,
876: ("pmap_write_protect: va=0x%lx pte=0x%lx\n",
877: sva, pte));
878: /*
879: * Determine if mapping is changing.
880: * If not, nothing to do.
881: */
882: if ((pte & PTE_ACC_MASK) == tlbprot)
883: continue;
884:
885: pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
886: simple_lock(&pg->mdpage.pvh_lock);
887: pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
888: simple_unlock(&pg->mdpage.pvh_lock);
889:
890: pmap_pte_flush(pmap, sva, pte);
891: pte &= ~PTE_ACC_MASK;
892: pte |= tlbprot;
893: pmap_pte_set(pde, sva, pte);
894: }
895: }
896:
897: simple_unlock(&pmap->pm_lock);
898: }
899:
900: void
901: pmap_page_remove(pg)
902: struct vm_page *pg;
903: {
904: struct pv_entry *pve, *ppve;
905:
906: DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_page_remove(%p)\n", pg));
907:
908: if (pg->mdpage.pvh_list == NULL)
909: return;
910:
911: simple_lock(&pg->mdpage.pvh_lock);
912: for (pve = pg->mdpage.pvh_list; pve;
913: pve = (ppve = pve)->pv_next, pmap_pv_free(ppve)) {
914: struct pmap *pmap = pve->pv_pmap;
915: vaddr_t va = pve->pv_va;
916: volatile pt_entry_t *pde;
917: pt_entry_t pte;
918:
919: simple_lock(&pmap->pm_lock);
920:
921: pde = pmap_pde_get(pmap->pm_pdir, va);
922: pte = pmap_pte_get(pde, va);
923: pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
924:
925: pmap_pte_flush(pmap, va, pte);
926: if (pte & PTE_WIRED)
927: pmap->pm_stats.wired_count--;
928: pmap->pm_stats.resident_count--;
929:
930: pmap_pte_set(pde, va, 0);
931: simple_unlock(&pmap->pm_lock);
932: }
933: pg->mdpage.pvh_list = NULL;
934: simple_unlock(&pg->mdpage.pvh_lock);
935:
936: DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_page_remove: leaving\n"));
937:
938: }
939:
940: void
941: pmap_unwire(pmap, va)
942: struct pmap *pmap;
943: vaddr_t va;
944: {
945: volatile pt_entry_t *pde;
946: pt_entry_t pte = 0;
947:
948: DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_unwire(%p, 0x%lx)\n", pmap, va));
949:
950: simple_lock(&pmap->pm_lock);
951: if ((pde = pmap_pde_get(pmap->pm_pdir, va))) {
952: pte = pmap_pte_get(pde, va);
953:
954: if (pte & PTE_WIRED) {
955: pte &= ~PTE_WIRED;
956: pmap->pm_stats.wired_count--;
957: pmap_pte_set(pde, va, pte);
958: }
959: }
960: simple_unlock(&pmap->pm_lock);
961:
962: DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_unwire: leaving\n"));
963:
964: #ifdef DIAGNOSTIC
965: if (!pte)
966: panic("pmap_unwire: invalid va 0x%lx", va);
967: #endif
968: }
969:
970: boolean_t
971: pmap_changebit(struct vm_page *pg, pt_entry_t set, pt_entry_t clear)
972: {
973: struct pv_entry *pve;
974: pt_entry_t res;
975:
976: DPRINTF(PDB_FOLLOW|PDB_BITS,
977: ("pmap_changebit(%p, %lx, %lx)\n", pg, set, clear));
978:
979: simple_lock(&pg->mdpage.pvh_lock);
980: res = pg->mdpage.pvh_attrs = 0;
981: for(pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next) {
982: struct pmap *pmap = pve->pv_pmap;
983: vaddr_t va = pve->pv_va;
984: volatile pt_entry_t *pde;
985: pt_entry_t opte, pte;
986:
987: simple_lock(&pmap->pm_lock);
988: if ((pde = pmap_pde_get(pmap->pm_pdir, va))) {
989: opte = pte = pmap_pte_get(pde, va);
990: #ifdef PMAPDEBUG
991: if (!pte) {
992: printf("pmap_changebit: zero pte for 0x%lx\n",
993: va);
994: continue;
995: }
996: #endif
997: pte &= ~clear;
998: pte |= set;
999: pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
1000: res |= pmap_pvh_attrs(opte);
1001:
1002: if (opte != pte) {
1003: pmap_pte_flush(pmap, va, opte);
1004: pmap_pte_set(pde, va, pte);
1005: }
1006: }
1007: simple_unlock(&pmap->pm_lock);
1008: }
1009: simple_unlock(&pg->mdpage.pvh_lock);
1010:
1011: return ((res & (clear | set)) != 0);
1012: }
1013:
1014: boolean_t
1015: pmap_testbit(struct vm_page *pg, pt_entry_t bit)
1016: {
1017: struct pv_entry *pve;
1018: pt_entry_t pte;
1019:
1020: DPRINTF(PDB_FOLLOW|PDB_BITS, ("pmap_testbit(%p, %lx)\n", pg, bit));
1021:
1022: simple_lock(&pg->mdpage.pvh_lock);
1023: for(pve = pg->mdpage.pvh_list; !(pg->mdpage.pvh_attrs & bit) && pve;
1024: pve = pve->pv_next) {
1025: simple_lock(&pve->pv_pmap->pm_lock);
1026: pte = pmap_vp_find(pve->pv_pmap, pve->pv_va);
1027: simple_unlock(&pve->pv_pmap->pm_lock);
1028: pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
1029: }
1030: simple_unlock(&pg->mdpage.pvh_lock);
1031:
1032: return ((pg->mdpage.pvh_attrs & bit) != 0);
1033: }
1034:
1035: boolean_t
1036: pmap_extract(pmap, va, pap)
1037: struct pmap *pmap;
1038: vaddr_t va;
1039: paddr_t *pap;
1040: {
1041: pt_entry_t pte;
1042:
1043: DPRINTF(PDB_FOLLOW|PDB_EXTRACT, ("pmap_extract(%p, %lx)\n", pmap, va));
1044:
1045: simple_lock(&pmap->pm_lock);
1046: pte = pmap_vp_find(pmap, va);
1047: simple_unlock(&pmap->pm_lock);
1048:
1049: if (pte) {
1050: if (pap)
1051: *pap = PTE_PAGE(pte) | (va & PAGE_MASK);
1052: return (TRUE);
1053: }
1054:
1055: return (FALSE);
1056: }
1057:
1058: void
1059: pmap_activate(struct proc *p)
1060: {
1061: struct pmap *pmap = p->p_vmspace->vm_map.pmap;
1062: struct pcb *pcb = &p->p_addr->u_pcb;
1063:
1064: pcb->pcb_space = pmap->pm_space;
1065: pcb->pcb_uva = (vaddr_t)p->p_addr;
1066: }
1067:
1068: void
1069: pmap_deactivate(struct proc *p)
1070: {
1071:
1072: }
1073:
1074: static __inline void
1075: pmap_flush_page(struct vm_page *pg, int purge)
1076: {
1077: struct pv_entry *pve;
1078:
1079: /* purge cache for all possible mappings for the pa */
1080: simple_lock(&pg->mdpage.pvh_lock);
1081: for(pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next)
1082: if (purge)
1083: pdcache(pve->pv_pmap->pm_space, pve->pv_va, PAGE_SIZE);
1084: else
1085: fdcache(pve->pv_pmap->pm_space, pve->pv_va, PAGE_SIZE);
1086: simple_unlock(&pg->mdpage.pvh_lock);
1087: }
1088:
1089: void
1090: pmap_zero_page(struct vm_page *pg)
1091: {
1092: paddr_t pa = VM_PAGE_TO_PHYS(pg);
1093:
1094: DPRINTF(PDB_FOLLOW|PDB_PHYS, ("pmap_zero_page(%lx)\n", pa));
1095:
1096: pmap_flush_page(pg, 1);
1097: bzero((void *)pa, PAGE_SIZE);
1098: fdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE);
1099: }
1100:
1101: void
1102: pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg)
1103: {
1104: paddr_t spa = VM_PAGE_TO_PHYS(srcpg);
1105: paddr_t dpa = VM_PAGE_TO_PHYS(dstpg);
1106: DPRINTF(PDB_FOLLOW|PDB_PHYS, ("pmap_copy_page(%lx, %lx)\n", spa, dpa));
1107:
1108: pmap_flush_page(srcpg, 0);
1109: pmap_flush_page(dstpg, 1);
1110: bcopy((void *)spa, (void *)dpa, PAGE_SIZE);
1111: pdcache(HPPA_SID_KERNEL, spa, PAGE_SIZE);
1112: fdcache(HPPA_SID_KERNEL, dpa, PAGE_SIZE);
1113: }
1114:
1115: void
1116: pmap_kenter_pa(va, pa, prot)
1117: vaddr_t va;
1118: paddr_t pa;
1119: vm_prot_t prot;
1120: {
1121: volatile pt_entry_t *pde;
1122: pt_entry_t pte, opte;
1123:
1124: DPRINTF(PDB_FOLLOW|PDB_ENTER,
1125: ("pmap_kenter_pa(%lx, %lx, %x)\n", va, pa, prot));
1126:
1127: simple_lock(&pmap->pm_lock);
1128:
1129: if (!(pde = pmap_pde_get(pmap_kernel()->pm_pdir, va)) &&
1130: !(pde = pmap_pde_alloc(pmap_kernel(), va, NULL)))
1131: panic("pmap_kenter_pa: cannot allocate pde for va=0x%lx", va);
1132: opte = pmap_pte_get(pde, va);
1133: pte = TLB_PAGE(pa) | PTE_WIRED | PTE_REFTRAP |
1134: pmap_prot(pmap_kernel(), prot);
1135: if (pa >= 0xf0000000ULL /* TODO (HPPA_IOBEGIN & HPPA_PHYSMAP) */)
1136: pte |= PTE_UNCACHABLE | PTE_ORDER;
1137: DPRINTF(PDB_ENTER, ("pmap_kenter_pa: pde %p va %lx pte %lx\n",
1138: pde, va, pte));
1139: pmap_pte_set(pde, va, pte);
1140: pmap_kernel()->pm_stats.wired_count++;
1141: pmap_kernel()->pm_stats.resident_count++;
1142: if (opte)
1143: pmap_pte_flush(pmap_kernel(), va, opte);
1144:
1145: #ifdef PMAPDEBUG
1146: {
1147: struct vm_page *pg;
1148:
1149: if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) {
1150:
1151: simple_lock(&pg->mdpage.pvh_lock);
1152: if (pmap_check_alias(pg->mdpage.pvh_list, va, pte))
1153: Debugger();
1154: simple_unlock(&pg->mdpage.pvh_lock);
1155: }
1156: }
1157: #endif
1158: simple_unlock(&pmap->pm_lock);
1159:
1160: DPRINTF(PDB_FOLLOW|PDB_ENTER, ("pmap_kenter_pa: leaving\n"));
1161: }
1162:
1163: void
1164: pmap_kremove(va, size)
1165: vaddr_t va;
1166: vsize_t size;
1167: {
1168: struct pv_entry *pve;
1169: vaddr_t eva, pdemask;
1170: volatile pt_entry_t *pde;
1171: pt_entry_t pte;
1172: struct vm_page *pg;
1173:
1174: DPRINTF(PDB_FOLLOW|PDB_REMOVE,
1175: ("pmap_kremove(%lx, %lx)\n", va, size));
1176: #ifdef PMAPDEBUG
1177: if (va < ptoa(physmem)) {
1178: printf("pmap_kremove(%lx, %lx): unmapping physmem\n", va, size);
1179: return;
1180: }
1181: #endif
1182:
1183: simple_lock(&pmap->pm_lock);
1184:
1185: for (pdemask = 1, eva = va + size; va < eva; va += PAGE_SIZE) {
1186: if (pdemask != (va & PDE_MASK)) {
1187: pdemask = va & PDE_MASK;
1188: if (!(pde = pmap_pde_get(pmap_kernel()->pm_pdir, va))) {
1189: va += ~PDE_MASK + 1 - PAGE_SIZE;
1190: continue;
1191: }
1192: }
1193: if (!(pte = pmap_pte_get(pde, va))) {
1194: #ifdef DEBUG
1195: printf("pmap_kremove: unmapping unmapped 0x%lx\n", va);
1196: #endif
1197: continue;
1198: }
1199:
1200: pmap_pte_flush(pmap_kernel(), va, pte);
1201: pmap_pte_set(pde, va, 0);
1202: if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) {
1203:
1204: simple_lock(&pg->mdpage.pvh_lock);
1205: pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
1206: /* just in case we have enter/kenter mismatch */
1207: if ((pve = pmap_pv_remove(pg, pmap_kernel(), va)))
1208: pmap_pv_free(pve);
1209: simple_unlock(&pg->mdpage.pvh_lock);
1210: }
1211: }
1212:
1213: simple_unlock(&pmap->pm_lock);
1214:
1215: DPRINTF(PDB_FOLLOW|PDB_REMOVE, ("pmap_kremove: leaving\n"));
1216: }
CVSweb