Annotation of sys/arch/hppa/hppa/pmap.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: pmap.c,v 1.130 2007/04/13 18:57:49 art Exp $ */
2:
3: /*
4: * Copyright (c) 1998-2004 Michael Shalayeff
5: * All rights reserved.
6: *
7: * Redistribution and use in source and binary forms, with or without
8: * modification, are permitted provided that the following conditions
9: * are met:
10: * 1. Redistributions of source code must retain the above copyright
11: * notice, this list of conditions and the following disclaimer.
12: * 2. Redistributions in binary form must reproduce the above copyright
13: * notice, this list of conditions and the following disclaimer in the
14: * documentation and/or other materials provided with the distribution.
15: *
16: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19: * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20: * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21: * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22: * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24: * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25: * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26: * THE POSSIBILITY OF SUCH DAMAGE.
27: */
28: /*
29: * References:
30: * 1. PA7100LC ERS, Hewlett-Packard, March 30 1999, Public version 1.0
31: * 2. PA7300LC ERS, Hewlett-Packard, March 18 1996, Version 1.0
32: *
33: */
34:
35: #include <sys/param.h>
36: #include <sys/systm.h>
37: #include <sys/lock.h>
38: #include <sys/user.h>
39: #include <sys/proc.h>
40: #include <sys/pool.h>
41: #include <sys/extent.h>
42:
43: #include <uvm/uvm.h>
44:
45: #include <machine/cpufunc.h>
46: #include <machine/iomod.h>
47:
48: #include <dev/rndvar.h>
49:
50: #ifdef PMAPDEBUG
51: #define DPRINTF(l,s) do { \
52: if ((pmapdebug & (l)) == (l)) \
53: printf s; \
54: } while(0)
55: #define PDB_FOLLOW 0x00000001
56: #define PDB_INIT 0x00000002
57: #define PDB_ENTER 0x00000004
58: #define PDB_REMOVE 0x00000008
59: #define PDB_CREATE 0x00000010
60: #define PDB_PTPAGE 0x00000020
61: #define PDB_CACHE 0x00000040
62: #define PDB_BITS 0x00000080
63: #define PDB_COLLECT 0x00000100
64: #define PDB_PROTECT 0x00000200
65: #define PDB_EXTRACT 0x00000400
66: #define PDB_VP 0x00000800
67: #define PDB_PV 0x00001000
68: #define PDB_PARANOIA 0x00002000
69: #define PDB_WIRING 0x00004000
70: #define PDB_PMAP 0x00008000
71: #define PDB_STEAL 0x00010000
72: #define PDB_PHYS 0x00020000
73: #define PDB_POOL 0x00040000
74: int pmapdebug = 0
75: /* | PDB_INIT */
76: /* | PDB_FOLLOW */
77: /* | PDB_VP */
78: /* | PDB_PV */
79: /* | PDB_ENTER */
80: /* | PDB_REMOVE */
81: /* | PDB_STEAL */
82: /* | PDB_PROTECT */
83: /* | PDB_PHYS */
84: ;
85: #else
86: #define DPRINTF(l,s) /* */
87: #endif
88:
89: paddr_t physical_steal, physical_end;
90:
91: int pmap_hptsize = 16 * PAGE_SIZE; /* patchable */
92: vaddr_t pmap_hpt;
93:
94: struct pmap kernel_pmap_store;
95: int hppa_sid_max = HPPA_SID_MAX;
96: struct pool pmap_pmap_pool;
97: struct pool pmap_pv_pool;
98: int pmap_pvlowat = 252;
99: struct simplelock pvalloc_lock;
100: int pmap_initialized;
101:
102: u_int hppa_prot[8];
103:
104: #define pmap_sid(pmap, va) \
105: (((va & 0xc0000000) != 0xc0000000)? pmap->pmap_space : HPPA_SID_KERNEL)
106:
107: #define pmap_pvh_attrs(a) \
108: (((a) & PTE_PROT(TLB_DIRTY)) | ((a) ^ PTE_PROT(TLB_REFTRAP)))
109:
110: struct vm_page *
111: pmap_pagealloc(struct uvm_object *obj, voff_t off)
112: {
113: struct vm_page *pg;
114:
115: if ((pg = uvm_pagealloc(obj, off, NULL,
116: UVM_PGA_USERESERVE | UVM_PGA_ZERO)) == NULL)
117: printf("pmap_pagealloc fail\n");
118:
119: return (pg);
120: }
121:
122: #ifdef USE_HPT
123: /*
124: * This hash function is the one used by the hardware TLB walker on the 7100LC.
125: */
126: static __inline struct vp_entry *
127: pmap_hash(struct pmap *pmap, vaddr_t va)
128: {
129: return (struct vp_entry *)(pmap_hpt +
130: (((va >> 8) ^ (pmap->pm_space << 9)) & (pmap_hptsize - 1)));
131: }
132:
133: static __inline u_int32_t
134: pmap_vtag(struct pmap *pmap, vaddr_t va)
135: {
136: return (0x80000000 | (pmap->pm_space & 0xffff) |
137: ((va >> 1) & 0x7fff0000));
138: }
139: #endif
140:
141: static __inline void
142: pmap_sdir_set(pa_space_t space, volatile u_int32_t *pd)
143: {
144: volatile u_int32_t *vtop;
145:
146: mfctl(CR_VTOP, vtop);
147: #ifdef PMAPDEBUG
148: if (!vtop)
149: panic("pmap_sdir_set: zero vtop");
150: #endif
151: vtop[space] = (u_int32_t)pd;
152: }
153:
154: static __inline u_int32_t *
155: pmap_sdir_get(pa_space_t space)
156: {
157: u_int32_t *vtop;
158:
159: mfctl(CR_VTOP, vtop);
160: return ((u_int32_t *)vtop[space]);
161: }
162:
163: static __inline volatile pt_entry_t *
164: pmap_pde_get(volatile u_int32_t *pd, vaddr_t va)
165: {
166: return ((pt_entry_t *)pd[va >> 22]);
167: }
168:
169: static __inline void
170: pmap_pde_set(struct pmap *pm, vaddr_t va, paddr_t ptp)
171: {
172: #ifdef PMAPDEBUG
173: if (ptp & PGOFSET)
174: panic("pmap_pde_set, unaligned ptp 0x%x", ptp);
175: #endif
176: DPRINTF(PDB_FOLLOW|PDB_VP,
177: ("pmap_pde_set(%p, 0x%x, 0x%x)\n", pm, va, ptp));
178:
179: pm->pm_pdir[va >> 22] = ptp;
180: }
181:
182: static __inline pt_entry_t *
183: pmap_pde_alloc(struct pmap *pm, vaddr_t va, struct vm_page **pdep)
184: {
185: struct vm_page *pg;
186: paddr_t pa;
187:
188: DPRINTF(PDB_FOLLOW|PDB_VP,
189: ("pmap_pde_alloc(%p, 0x%x, %p)\n", pm, va, pdep));
190:
191: if ((pg = pmap_pagealloc(&pm->pm_obj, va)) == NULL)
192: return (NULL);
193:
194: pa = VM_PAGE_TO_PHYS(pg);
195:
196: DPRINTF(PDB_FOLLOW|PDB_VP, ("pmap_pde_alloc: pde %x\n", pa));
197:
198: atomic_clearbits_int(&pg->pg_flags, PG_BUSY);
199: pg->wire_count = 1; /* no mappings yet */
200: pmap_pde_set(pm, va, pa);
201: pm->pm_stats.resident_count++; /* count PTP as resident */
202: pm->pm_ptphint = pg;
203: if (pdep)
204: *pdep = pg;
205: return ((pt_entry_t *)pa);
206: }
207:
208: static __inline struct vm_page *
209: pmap_pde_ptp(struct pmap *pm, volatile pt_entry_t *pde)
210: {
211: paddr_t pa = (paddr_t)pde;
212:
213: DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_pde_ptp(%p, %p)\n", pm, pde));
214:
215: if (pm->pm_ptphint && VM_PAGE_TO_PHYS(pm->pm_ptphint) == pa)
216: return (pm->pm_ptphint);
217:
218: DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_pde_ptp: lookup 0x%x\n", pa));
219:
220: return (PHYS_TO_VM_PAGE(pa));
221: }
222:
223: static __inline void
224: pmap_pde_release(struct pmap *pmap, vaddr_t va, struct vm_page *ptp)
225: {
226: DPRINTF(PDB_FOLLOW|PDB_PV,
227: ("pmap_pde_release(%p, 0x%x, %p)\n", pmap, va, ptp));
228:
229: if (pmap != pmap_kernel() && --ptp->wire_count <= 1) {
230: DPRINTF(PDB_FOLLOW|PDB_PV,
231: ("pmap_pde_release: disposing ptp %p\n", ptp));
232: pmap_pde_set(pmap, va, 0);
233: pmap->pm_stats.resident_count--;
234: if (pmap->pm_ptphint == ptp)
235: pmap->pm_ptphint = TAILQ_FIRST(&pmap->pm_obj.memq);
236: ptp->wire_count = 0;
237: #ifdef DIAGNOSTIC
238: if (ptp->pg_flags & PG_BUSY)
239: panic("pmap_pde_release: busy page table page");
240: #endif
241: uvm_pagefree(ptp);
242: }
243: }
244:
245: static __inline pt_entry_t
246: pmap_pte_get(volatile pt_entry_t *pde, vaddr_t va)
247: {
248: return (pde[(va >> 12) & 0x3ff]);
249: }
250:
251: static __inline void
252: pmap_pte_set(volatile pt_entry_t *pde, vaddr_t va, pt_entry_t pte)
253: {
254: DPRINTF(PDB_FOLLOW|PDB_VP, ("pmap_pte_set(%p, 0x%x, 0x%x)\n",
255: pde, va, pte));
256:
257: #ifdef PMAPDEBUG
258: if (!pde)
259: panic("pmap_pte_set: zero pde");
260:
261: if ((paddr_t)pde & PGOFSET)
262: panic("pmap_pte_set, unaligned pde %p", pde);
263: #endif
264:
265: pde[(va >> 12) & 0x3ff] = pte;
266: }
267:
268: void
269: pmap_pte_flush(struct pmap *pmap, vaddr_t va, pt_entry_t pte)
270: {
271: if (pte & PTE_PROT(TLB_EXECUTE)) {
272: ficache(pmap->pm_space, va, PAGE_SIZE);
273: pitlb(pmap->pm_space, va);
274: }
275: fdcache(pmap->pm_space, va, PAGE_SIZE);
276: pdtlb(pmap->pm_space, va);
277: #ifdef USE_HPT
278: if (pmap_hpt) {
279: struct vp_entry *hpt;
280: hpt = pmap_hash(pmap, va);
281: if (hpt->vp_tag == pmap_vtag(pmap, va))
282: hpt->vp_tag = 0xffff;
283: }
284: #endif
285: }
286:
287: static __inline pt_entry_t
288: pmap_vp_find(struct pmap *pm, vaddr_t va)
289: {
290: volatile pt_entry_t *pde;
291:
292: if (!(pde = pmap_pde_get(pm->pm_pdir, va)))
293: return (0);
294:
295: return (pmap_pte_get(pde, va));
296: }
297:
298: #ifdef DDB
299: void
300: pmap_dump_table(pa_space_t space, vaddr_t sva)
301: {
302: pa_space_t sp;
303:
304: for (sp = 0; sp <= hppa_sid_max; sp++) {
305: volatile pt_entry_t *pde;
306: pt_entry_t pte;
307: vaddr_t va, pdemask;
308: u_int32_t *pd;
309:
310: if (((int)space >= 0 && sp != space) ||
311: !(pd = pmap_sdir_get(sp)))
312: continue;
313:
314: for (pdemask = 1, va = sva ? sva : 0;
315: va < 0xfffff000; va += PAGE_SIZE) {
316: if (pdemask != (va & PDE_MASK)) {
317: pdemask = va & PDE_MASK;
318: if (!(pde = pmap_pde_get(pd, va))) {
319: va += ~PDE_MASK + 1 - PAGE_SIZE;
320: continue;
321: }
322: printf("%x:%8p:\n", sp, pde);
323: }
324:
325: if (!(pte = pmap_pte_get(pde, va)))
326: continue;
327:
328: printf("0x%08lx-0x%08x:%b\n", va, pte & ~PAGE_MASK,
329: TLB_PROT(pte & PAGE_MASK), TLB_BITS);
330: }
331: }
332: }
333:
334: void
335: pmap_dump_pv(paddr_t pa)
336: {
337: struct vm_page *pg;
338: struct pv_entry *pve;
339:
340: pg = PHYS_TO_VM_PAGE(pa);
341: simple_lock(&pg->mdpage.pvh_lock);
342: for(pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next)
343: printf("%x:%lx\n", pve->pv_pmap->pm_space, pve->pv_va);
344: simple_unlock(&pg->mdpage.pvh_lock);
345: }
346: #endif
347:
348: #ifdef PMAPDEBUG
349: int
350: pmap_check_alias(struct pv_entry *pve, vaddr_t va, pt_entry_t pte)
351: {
352: int ret;
353:
354: /* check for non-equ aliased mappings */
355: for (ret = 0; pve; pve = pve->pv_next) {
356: pte |= pmap_vp_find(pve->pv_pmap, pve->pv_va);
357: if ((va & HPPA_PGAOFF) != (pve->pv_va & HPPA_PGAOFF) &&
358: (pte & PTE_PROT(TLB_WRITE))) {
359: printf("pmap_check_alias: "
360: "aliased writable mapping 0x%x:0x%x\n",
361: pve->pv_pmap->pm_space, pve->pv_va);
362: ret++;
363: }
364: }
365:
366: return (ret);
367: }
368: #endif
369:
370: static __inline struct pv_entry *
371: pmap_pv_alloc(void)
372: {
373: struct pv_entry *pv;
374:
375: DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_pv_alloc()\n"));
376:
377: simple_lock(&pvalloc_lock);
378:
379: pv = pool_get(&pmap_pv_pool, PR_NOWAIT);
380:
381: simple_unlock(&pvalloc_lock);
382:
383: DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_pv_alloc: %p\n", pv));
384:
385: return (pv);
386: }
387:
388: static __inline void
389: pmap_pv_free(struct pv_entry *pv)
390: {
391: simple_lock(&pvalloc_lock);
392:
393: if (pv->pv_ptp)
394: pmap_pde_release(pv->pv_pmap, pv->pv_va, pv->pv_ptp);
395:
396: pool_put(&pmap_pv_pool, pv);
397:
398: simple_unlock(&pvalloc_lock);
399: }
400:
401: static __inline void
402: pmap_pv_enter(struct vm_page *pg, struct pv_entry *pve, struct pmap *pm,
403: vaddr_t va, struct vm_page *pdep)
404: {
405: DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_pv_enter(%p, %p, %p, 0x%x, %p)\n",
406: pg, pve, pm, va, pdep));
407: pve->pv_pmap = pm;
408: pve->pv_va = va;
409: pve->pv_ptp = pdep;
410: pve->pv_next = pg->mdpage.pvh_list;
411: pg->mdpage.pvh_list = pve;
412: #ifdef PMAPDEBUG
413: if (pmap_check_alias(pve, va, 0))
414: Debugger();
415: #endif
416: }
417:
418: static __inline struct pv_entry *
419: pmap_pv_remove(struct vm_page *pg, struct pmap *pmap, vaddr_t va)
420: {
421: struct pv_entry **pve, *pv;
422:
423: simple_lock(&pg->mdpage.pvh_lock); /* lock pv_head */
424: for(pv = *(pve = &pg->mdpage.pvh_list);
425: pv; pv = *(pve = &(*pve)->pv_next))
426: if (pv->pv_pmap == pmap && pv->pv_va == va) {
427: *pve = pv->pv_next;
428: break;
429: }
430: simple_unlock(&pg->mdpage.pvh_lock); /* unlock, done! */
431: return (pv);
432: }
433:
434: void
435: pmap_bootstrap(vstart)
436: vaddr_t vstart;
437: {
438: extern int resvphysmem, etext, __rodata_end, __data_start;
439: extern u_int *ie_mem;
440: extern paddr_t hppa_vtop;
441: vaddr_t va, addr = round_page(vstart), eaddr, t;
442: vsize_t size;
443: struct pmap *kpm;
444: int npdes, nkpdes;
445:
446: DPRINTF(PDB_FOLLOW|PDB_INIT, ("pmap_bootstrap(0x%x)\n", vstart));
447:
448: uvm_setpagesize();
449:
450: hppa_prot[UVM_PROT_NONE] = TLB_AR_NA;
451: hppa_prot[UVM_PROT_READ] = TLB_AR_R;
452: hppa_prot[UVM_PROT_WRITE] = TLB_AR_RW;
453: hppa_prot[UVM_PROT_RW] = TLB_AR_RW;
454: hppa_prot[UVM_PROT_EXEC] = TLB_AR_RX;
455: hppa_prot[UVM_PROT_RX] = TLB_AR_RX;
456: hppa_prot[UVM_PROT_WX] = TLB_AR_RWX;
457: hppa_prot[UVM_PROT_RWX] = TLB_AR_RWX;
458:
459: /*
460: * Initialize kernel pmap
461: */
462: kpm = &kernel_pmap_store;
463: bzero(kpm, sizeof(*kpm));
464: simple_lock_init(&kpm->pm_lock);
465: kpm->pm_obj.pgops = NULL;
466: TAILQ_INIT(&kpm->pm_obj.memq);
467: kpm->pm_obj.uo_npages = 0;
468: kpm->pm_obj.uo_refs = 1;
469: kpm->pm_space = HPPA_SID_KERNEL;
470: kpm->pm_pid = HPPA_PID_KERNEL;
471: kpm->pm_pdir_pg = NULL;
472: kpm->pm_pdir = (u_int32_t *)addr;
473: bzero((void *)addr, PAGE_SIZE);
474: fdcache(HPPA_SID_KERNEL, addr, PAGE_SIZE);
475: addr += PAGE_SIZE;
476:
477: /*
478: * Allocate various tables and structures.
479: */
480:
481: mtctl(addr, CR_VTOP);
482: hppa_vtop = addr;
483: size = round_page((hppa_sid_max + 1) * 4);
484: bzero((void *)addr, size);
485: fdcache(HPPA_SID_KERNEL, addr, size);
486: DPRINTF(PDB_INIT, ("vtop: 0x%x @ 0x%x\n", size, addr));
487: addr += size;
488: pmap_sdir_set(HPPA_SID_KERNEL, kpm->pm_pdir);
489:
490: ie_mem = (u_int *)addr;
491: addr += 0x8000;
492:
493: #ifdef USE_HPT
494: if (pmap_hptsize) {
495: struct vp_entry *hptp;
496: int i, error;
497:
498: /* must be aligned to the size XXX */
499: if (addr & (pmap_hptsize - 1))
500: addr += pmap_hptsize;
501: addr &= ~(pmap_hptsize - 1);
502:
503: bzero((void *)addr, pmap_hptsize);
504: for (hptp = (struct vp_entry *)addr, i = pmap_hptsize / 16; i--;)
505: hptp[i].vp_tag = 0xffff;
506: pmap_hpt = addr;
507: addr += pmap_hptsize;
508:
509: DPRINTF(PDB_INIT, ("hpt_table: 0x%x @ %p\n",
510: pmap_hptsize, addr));
511:
512: if ((error = (cpu_hpt_init)(pmap_hpt, pmap_hptsize)) < 0) {
513: printf("WARNING: HPT init error %d -- DISABLED\n",
514: error);
515: pmap_hpt = 0;
516: } else
517: DPRINTF(PDB_INIT,
518: ("HPT: installed for %d entries @ 0x%x\n",
519: pmap_hptsize / sizeof(struct vp_entry), addr));
520: }
521: #endif
522:
523: /* XXX PCXS needs this inserted into an IBTLB */
524: /* and can block-map the whole phys w/ another */
525: t = (vaddr_t)&etext;
526: if (btlb_insert(HPPA_SID_KERNEL, 0, 0, &t,
527: pmap_sid2pid(HPPA_SID_KERNEL) |
528: pmap_prot(pmap_kernel(), UVM_PROT_RX)) < 0)
529: printf("WARNING: cannot block map kernel text\n");
530:
531: if (&__rodata_end < &__data_start) {
532: physical_steal = (vaddr_t)&__rodata_end;
533: physical_end = (vaddr_t)&__data_start;
534: DPRINTF(PDB_INIT, ("physpool: 0x%x @ 0x%x\n",
535: physical_end - physical_steal, physical_steal));
536: }
537:
538: /* kernel virtual is the last gig of the moohicans */
539: nkpdes = physmem >> 14; /* at least 16/gig for kmem */
540: if (nkpdes < 4)
541: nkpdes = 4; /* ... but no less than four */
542: nkpdes += HPPA_IOLEN / PDE_SIZE; /* ... and io space too */
543: npdes = nkpdes + (physmem + btoc(PDE_SIZE) - 1) / btoc(PDE_SIZE);
544:
545: /* map the pdes */
546: for (va = 0; npdes--; va += PDE_SIZE, addr += PAGE_SIZE) {
547:
548: /* last nkpdes are for the kernel virtual */
549: if (npdes == nkpdes - 1)
550: va = SYSCALLGATE;
551: if (npdes == HPPA_IOLEN / PDE_SIZE - 1)
552: va = HPPA_IOBEGIN;
553: /* now map the pde for the physmem */
554: bzero((void *)addr, PAGE_SIZE);
555: DPRINTF(PDB_INIT|PDB_VP, ("pde premap 0x%x 0x%x\n", va, addr));
556: pmap_pde_set(kpm, va, addr);
557: kpm->pm_stats.resident_count++; /* count PTP as resident */
558: }
559:
560: resvphysmem = atop(addr);
561: eaddr = physmem - atop(round_page(MSGBUFSIZE));
562: DPRINTF(PDB_INIT, ("physmem: 0x%x - 0x%x\n", resvphysmem, eaddr));
563: uvm_page_physload(0, eaddr,
564: resvphysmem, eaddr, VM_FREELIST_DEFAULT);
565:
566: /* TODO optimize/inline the kenter */
567: for (va = 0; va < ptoa(physmem); va += PAGE_SIZE) {
568: extern struct user *proc0paddr;
569: vm_prot_t prot = UVM_PROT_RW;
570:
571: if (va < (vaddr_t)&etext)
572: prot = UVM_PROT_RX;
573: else if (va < (vaddr_t)&__rodata_end)
574: prot = UVM_PROT_READ;
575: else if (va == (vaddr_t)proc0paddr + USPACE)
576: prot = UVM_PROT_NONE;
577:
578: pmap_kenter_pa(va, va, prot);
579: }
580:
581: DPRINTF(PDB_INIT, ("bootstrap: mapped %p - 0x%x\n", &etext, va));
582: }
583:
584: void
585: pmap_init()
586: {
587: DPRINTF(PDB_FOLLOW|PDB_INIT, ("pmap_init()\n"));
588:
589: simple_lock_init(&pvalloc_lock);
590:
591: pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
592: &pool_allocator_nointr);
593: pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pmappv",
594: NULL);
595: pool_setlowat(&pmap_pv_pool, pmap_pvlowat);
596: pool_sethiwat(&pmap_pv_pool, pmap_pvlowat * 32);
597:
598: pmap_initialized = 1;
599:
600: /*
601: * map SysCall gateways page once for everybody
602: * NB: we'll have to remap the phys memory
603: * if we have any at SYSCALLGATE address (;
604: */
605: {
606: volatile pt_entry_t *pde;
607:
608: if (!(pde = pmap_pde_get(pmap_kernel()->pm_pdir, SYSCALLGATE)) &&
609: !(pde = pmap_pde_alloc(pmap_kernel(), SYSCALLGATE, NULL)))
610: panic("pmap_init: cannot allocate pde");
611:
612: pmap_pte_set(pde, SYSCALLGATE, (paddr_t)&gateway_page |
613: PTE_PROT(TLB_GATE_PROT));
614: }
615:
616: DPRINTF(PDB_FOLLOW|PDB_INIT, ("pmap_init(): done\n"));
617: }
618:
619: void
620: pmap_virtual_space(vaddr_t *startp, vaddr_t *endp)
621: {
622: *startp = SYSCALLGATE + PAGE_SIZE;
623: *endp = VM_MAX_KERNEL_ADDRESS;
624: }
625:
626: struct pmap *
627: pmap_create()
628: {
629: struct pmap *pmap;
630: pa_space_t space;
631:
632: DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_create()\n"));
633:
634: pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
635:
636: simple_lock_init(&pmap->pm_lock);
637: pmap->pm_obj.pgops = NULL; /* currently not a mappable object */
638: TAILQ_INIT(&pmap->pm_obj.memq);
639: pmap->pm_obj.uo_npages = 0;
640: pmap->pm_obj.uo_refs = 1;
641:
642: for (space = 1 + (arc4random() % hppa_sid_max);
643: pmap_sdir_get(space); space = (space + 1) % hppa_sid_max);
644:
645: if ((pmap->pm_pdir_pg = pmap_pagealloc(NULL, 0)) == NULL)
646: panic("pmap_create: no pages");
647: pmap->pm_ptphint = NULL;
648: pmap->pm_pdir = (u_int32_t *)VM_PAGE_TO_PHYS(pmap->pm_pdir_pg);
649: pmap_sdir_set(space, pmap->pm_pdir);
650:
651: pmap->pm_space = space;
652: pmap->pm_pid = (space + 1) << 1;
653:
654: pmap->pm_stats.resident_count = 1;
655: pmap->pm_stats.wired_count = 0;
656:
657: return (pmap);
658: }
659:
660: void
661: pmap_destroy(pmap)
662: struct pmap *pmap;
663: {
664: #ifdef DIAGNOSTIC
665: struct vm_page *pg;
666: #endif
667: int refs;
668:
669: DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_destroy(%p)\n", pmap));
670:
671: simple_lock(&pmap->pm_lock);
672: refs = --pmap->pm_obj.uo_refs;
673: simple_unlock(&pmap->pm_lock);
674:
675: if (refs > 0)
676: return;
677:
678: #ifdef DIAGNOSTIC
679: while ((pg = TAILQ_FIRST(&pmap->pm_obj.memq))) {
680: pt_entry_t *pde, *epde;
681: struct vm_page *sheep;
682: struct pv_entry *haggis;
683:
684: if (pg == pmap->pm_pdir_pg)
685: continue;
686:
687: #ifdef PMAPDEBUG
688: printf("pmap_destroy(%p): stray ptp 0x%lx w/ %d ents:",
689: pmap, VM_PAGE_TO_PHYS(pg), pg->wire_count - 1);
690: #endif
691:
692: pde = (pt_entry_t *)VM_PAGE_TO_PHYS(pg);
693: epde = (pt_entry_t *)(VM_PAGE_TO_PHYS(pg) + PAGE_SIZE);
694: for (; pde < epde; pde++) {
695: if (*pde == 0)
696: continue;
697:
698: sheep = PHYS_TO_VM_PAGE(PTE_PAGE(*pde));
699: for (haggis = sheep->mdpage.pvh_list; haggis != NULL; )
700: if (haggis->pv_pmap == pmap) {
701: #ifdef PMAPDEBUG
702: printf(" 0x%x", haggis->pv_va);
703: #endif
704: pmap_remove(pmap, haggis->pv_va,
705: haggis->pv_va + PAGE_SIZE);
706:
707: /* exploit the sacred knowledge
708: of lambeous ozzmosis */
709: haggis = sheep->mdpage.pvh_list;
710: } else
711: haggis = haggis->pv_next;
712: }
713: #ifdef PMAPDEBUG
714: printf("\n");
715: #endif
716: }
717: #endif
718: pmap_sdir_set(pmap->pm_space, 0);
719: uvm_pagefree(pmap->pm_pdir_pg);
720: pmap->pm_pdir_pg = NULL;
721: pool_put(&pmap_pmap_pool, pmap);
722: }
723:
724: /*
725: * Add a reference to the specified pmap.
726: */
727: void
728: pmap_reference(pmap)
729: struct pmap *pmap;
730: {
731: DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_reference(%p)\n", pmap));
732:
733: simple_lock(&pmap->pm_lock);
734: pmap->pm_obj.uo_refs++;
735: simple_unlock(&pmap->pm_lock);
736: }
737:
738: void
739: pmap_collect(struct pmap *pmap)
740: {
741: DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_collect(%p)\n", pmap));
742: /* nothing yet */
743: }
744:
745: int
746: pmap_enter(pmap, va, pa, prot, flags)
747: struct pmap *pmap;
748: vaddr_t va;
749: paddr_t pa;
750: vm_prot_t prot;
751: int flags;
752: {
753: volatile pt_entry_t *pde;
754: pt_entry_t pte;
755: struct vm_page *pg, *ptp = NULL;
756: struct pv_entry *pve;
757: boolean_t wired = (flags & PMAP_WIRED) != 0;
758:
759: DPRINTF(PDB_FOLLOW|PDB_ENTER,
760: ("pmap_enter(%p, 0x%x, 0x%x, 0x%x, 0x%x)\n",
761: pmap, va, pa, prot, flags));
762:
763: simple_lock(&pmap->pm_lock);
764:
765: if (!(pde = pmap_pde_get(pmap->pm_pdir, va)) &&
766: !(pde = pmap_pde_alloc(pmap, va, &ptp))) {
767: if (flags & PMAP_CANFAIL) {
768: simple_unlock(&pmap->pm_lock);
769: return (ENOMEM);
770: }
771:
772: panic("pmap_enter: cannot allocate pde");
773: }
774:
775: if (!ptp)
776: ptp = pmap_pde_ptp(pmap, pde);
777:
778: if ((pte = pmap_pte_get(pde, va))) {
779:
780: DPRINTF(PDB_ENTER,
781: ("pmap_enter: remapping 0x%x -> 0x%x\n", pte, pa));
782:
783: pmap_pte_flush(pmap, va, pte);
784: if (wired && !(pte & PTE_PROT(TLB_WIRED)))
785: pmap->pm_stats.wired_count++;
786: else if (!wired && (pte & PTE_PROT(TLB_WIRED)))
787: pmap->pm_stats.wired_count--;
788:
789: if (PTE_PAGE(pte) == pa) {
790: DPRINTF(PDB_FOLLOW|PDB_ENTER,
791: ("pmap_enter: same page\n"));
792: goto enter;
793: }
794:
795: pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
796: simple_lock(&pg->mdpage.pvh_lock);
797: pve = pmap_pv_remove(pg, pmap, va);
798: pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
799: } else {
800: DPRINTF(PDB_ENTER,
801: ("pmap_enter: new mapping 0x%x -> 0x%x\n", va, pa));
802: pte = PTE_PROT(TLB_REFTRAP);
803: pve = NULL;
804: pmap->pm_stats.resident_count++;
805: if (wired)
806: pmap->pm_stats.wired_count++;
807: if (ptp)
808: ptp->wire_count++;
809: simple_lock(&pg->mdpage.pvh_lock);
810: }
811:
812: if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pa)))) {
813: if (!pve && !(pve = pmap_pv_alloc())) {
814: if (flags & PMAP_CANFAIL) {
815: simple_unlock(&pg->mdpage.pvh_lock);
816: simple_unlock(&pmap->pm_lock);
817: return (ENOMEM);
818: }
819: panic("pmap_enter: no pv entries available");
820: }
821: pmap_pv_enter(pg, pve, pmap, va, ptp);
822: } else if (pve)
823: pmap_pv_free(pve);
824: simple_unlock(&pg->mdpage.pvh_lock);
825:
826: enter:
827: /* preserve old ref & mod */
828: pte = pa | PTE_PROT(pmap_prot(pmap, prot)) |
829: (pte & PTE_PROT(TLB_UNCACHABLE|TLB_DIRTY|TLB_REFTRAP));
830: if (wired)
831: pte |= PTE_PROT(TLB_WIRED);
832: pmap_pte_set(pde, va, pte);
833:
834: simple_unlock(&pmap->pm_lock);
835:
836: DPRINTF(PDB_FOLLOW|PDB_ENTER, ("pmap_enter: leaving\n"));
837:
838: return (0);
839: }
840:
841: void
842: pmap_remove(pmap, sva, eva)
843: struct pmap *pmap;
844: vaddr_t sva;
845: vaddr_t eva;
846: {
847: struct pv_entry *pve;
848: volatile pt_entry_t *pde;
849: pt_entry_t pte;
850: struct vm_page *pg;
851: vaddr_t pdemask;
852: int batch;
853:
854: DPRINTF(PDB_FOLLOW|PDB_REMOVE,
855: ("pmap_remove(%p, 0x%x, 0x%x)\n", pmap, sva, eva));
856:
857: simple_lock(&pmap->pm_lock);
858:
859: for (batch = 0, pdemask = 1; sva < eva; sva += PAGE_SIZE) {
860: if (pdemask != (sva & PDE_MASK)) {
861: pdemask = sva & PDE_MASK;
862: if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) {
863: sva += ~PDE_MASK + 1 - PAGE_SIZE;
864: continue;
865: }
866: batch = pdemask == sva && sva + ~PDE_MASK + 1 <= eva;
867: }
868:
869: if ((pte = pmap_pte_get(pde, sva))) {
870:
871: /* TODO measure here the speed tradeoff
872: * for flushing whole 4M vs per-page
873: * in case of non-complete pde fill
874: */
875: pmap_pte_flush(pmap, sva, pte);
876: if (pte & PTE_PROT(TLB_WIRED))
877: pmap->pm_stats.wired_count--;
878: pmap->pm_stats.resident_count--;
879:
880: /* iff properly accounted pde will be dropped anyway */
881: if (!batch)
882: pmap_pte_set(pde, sva, 0);
883:
884: if (pmap_initialized &&
885: (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) {
886:
887: simple_lock(&pg->mdpage.pvh_lock);
888: pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
889: if ((pve = pmap_pv_remove(pg, pmap, sva)))
890: pmap_pv_free(pve);
891: simple_unlock(&pg->mdpage.pvh_lock);
892: }
893: }
894: }
895:
896: simple_unlock(&pmap->pm_lock);
897:
898: DPRINTF(PDB_FOLLOW|PDB_REMOVE, ("pmap_remove: leaving\n"));
899: }
900:
901: void
902: pmap_write_protect(pmap, sva, eva, prot)
903: struct pmap *pmap;
904: vaddr_t sva;
905: vaddr_t eva;
906: vm_prot_t prot;
907: {
908: struct vm_page *pg;
909: volatile pt_entry_t *pde;
910: pt_entry_t pte;
911: u_int tlbprot, pdemask;
912:
913: DPRINTF(PDB_FOLLOW|PDB_PMAP,
914: ("pmap_write_protect(%p, %x, %x, %x)\n", pmap, sva, eva, prot));
915:
916: sva = trunc_page(sva);
917: tlbprot = PTE_PROT(pmap_prot(pmap, prot));
918:
919: simple_lock(&pmap->pm_lock);
920:
921: for (pdemask = 1; sva < eva; sva += PAGE_SIZE) {
922: if (pdemask != (sva & PDE_MASK)) {
923: pdemask = sva & PDE_MASK;
924: if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) {
925: sva += ~PDE_MASK + 1 - PAGE_SIZE;
926: continue;
927: }
928: }
929: if ((pte = pmap_pte_get(pde, sva))) {
930:
931: DPRINTF(PDB_PMAP,
932: ("pmap_write_protect: va=0x%x pte=0x%x\n",
933: sva, pte));
934: /*
935: * Determine if mapping is changing.
936: * If not, nothing to do.
937: */
938: if ((pte & PTE_PROT(TLB_AR_MASK)) == tlbprot)
939: continue;
940:
941: pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
942: simple_lock(&pg->mdpage.pvh_lock);
943: pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
944: simple_unlock(&pg->mdpage.pvh_lock);
945:
946: pmap_pte_flush(pmap, sva, pte);
947: pte &= ~PTE_PROT(TLB_AR_MASK);
948: pte |= tlbprot;
949: pmap_pte_set(pde, sva, pte);
950: }
951: }
952:
953: simple_unlock(&pmap->pm_lock);
954: }
955:
956: void
957: pmap_page_remove(pg)
958: struct vm_page *pg;
959: {
960: struct pv_entry *pve, *ppve;
961:
962: DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_page_remove(%p)\n", pg));
963:
964: if (pg->mdpage.pvh_list == NULL)
965: return;
966:
967: simple_lock(&pg->mdpage.pvh_lock);
968: for (pve = pg->mdpage.pvh_list; pve;
969: pve = (ppve = pve)->pv_next, pmap_pv_free(ppve)) {
970: struct pmap *pmap = pve->pv_pmap;
971: vaddr_t va = pve->pv_va;
972: volatile pt_entry_t *pde;
973: pt_entry_t pte;
974:
975: simple_lock(&pmap->pm_lock);
976:
977: pde = pmap_pde_get(pmap->pm_pdir, va);
978: pte = pmap_pte_get(pde, va);
979: pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
980:
981: pmap_pte_flush(pmap, va, pte);
982: if (pte & PTE_PROT(TLB_WIRED))
983: pmap->pm_stats.wired_count--;
984: pmap->pm_stats.resident_count--;
985:
986: pmap_pte_set(pde, va, 0);
987: simple_unlock(&pmap->pm_lock);
988: }
989: pg->mdpage.pvh_list = NULL;
990: simple_unlock(&pg->mdpage.pvh_lock);
991:
992: DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_page_remove: leaving\n"));
993:
994: }
995:
996: void
997: pmap_unwire(pmap, va)
998: struct pmap *pmap;
999: vaddr_t va;
1000: {
1001: volatile pt_entry_t *pde;
1002: pt_entry_t pte = 0;
1003:
1004: DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_unwire(%p, 0x%x)\n", pmap, va));
1005:
1006: simple_lock(&pmap->pm_lock);
1007: if ((pde = pmap_pde_get(pmap->pm_pdir, va))) {
1008: pte = pmap_pte_get(pde, va);
1009:
1010: if (pte & PTE_PROT(TLB_WIRED)) {
1011: pte &= ~PTE_PROT(TLB_WIRED);
1012: pmap->pm_stats.wired_count--;
1013: pmap_pte_set(pde, va, pte);
1014: }
1015: }
1016: simple_unlock(&pmap->pm_lock);
1017:
1018: DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_unwire: leaving\n"));
1019:
1020: #ifdef DIAGNOSTIC
1021: if (!pte)
1022: panic("pmap_unwire: invalid va 0x%lx", va);
1023: #endif
1024: }
1025:
1026: boolean_t
1027: pmap_changebit(struct vm_page *pg, u_int set, u_int clear)
1028: {
1029: struct pv_entry *pve;
1030: pt_entry_t res;
1031:
1032: DPRINTF(PDB_FOLLOW|PDB_BITS,
1033: ("pmap_changebit(%p, %x, %x)\n", pg, set, clear));
1034:
1035: simple_lock(&pg->mdpage.pvh_lock);
1036: res = pg->mdpage.pvh_attrs = 0;
1037: for(pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next) {
1038: struct pmap *pmap = pve->pv_pmap;
1039: vaddr_t va = pve->pv_va;
1040: volatile pt_entry_t *pde;
1041: pt_entry_t opte, pte;
1042:
1043: simple_lock(&pmap->pm_lock);
1044: if ((pde = pmap_pde_get(pmap->pm_pdir, va))) {
1045: opte = pte = pmap_pte_get(pde, va);
1046: #ifdef PMAPDEBUG
1047: if (!pte) {
1048: printf("pmap_changebit: zero pte for 0x%x\n",
1049: va);
1050: continue;
1051: }
1052: #endif
1053: pte &= ~clear;
1054: pte |= set;
1055: pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
1056: res |= pmap_pvh_attrs(opte);
1057:
1058: if (opte != pte) {
1059: pmap_pte_flush(pmap, va, opte);
1060: pmap_pte_set(pde, va, pte);
1061: }
1062: }
1063: simple_unlock(&pmap->pm_lock);
1064: }
1065: simple_unlock(&pg->mdpage.pvh_lock);
1066:
1067: return ((res & (clear | set)) != 0);
1068: }
1069:
1070: boolean_t
1071: pmap_testbit(struct vm_page *pg, u_int bit)
1072: {
1073: struct pv_entry *pve;
1074: pt_entry_t pte;
1075:
1076: DPRINTF(PDB_FOLLOW|PDB_BITS, ("pmap_testbit(%p, %x)\n", pg, bit));
1077:
1078: simple_lock(&pg->mdpage.pvh_lock);
1079: for(pve = pg->mdpage.pvh_list; !(pg->mdpage.pvh_attrs & bit) && pve;
1080: pve = pve->pv_next) {
1081: simple_lock(&pve->pv_pmap->pm_lock);
1082: pte = pmap_vp_find(pve->pv_pmap, pve->pv_va);
1083: simple_unlock(&pve->pv_pmap->pm_lock);
1084: pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
1085: }
1086: simple_unlock(&pg->mdpage.pvh_lock);
1087:
1088: return ((pg->mdpage.pvh_attrs & bit) != 0);
1089: }
1090:
1091: boolean_t
1092: pmap_extract(pmap, va, pap)
1093: struct pmap *pmap;
1094: vaddr_t va;
1095: paddr_t *pap;
1096: {
1097: pt_entry_t pte;
1098:
1099: DPRINTF(PDB_FOLLOW|PDB_EXTRACT, ("pmap_extract(%p, %x)\n", pmap, va));
1100:
1101: simple_lock(&pmap->pm_lock);
1102: pte = pmap_vp_find(pmap, va);
1103: simple_unlock(&pmap->pm_lock);
1104:
1105: if (pte) {
1106: if (pap)
1107: *pap = (pte & ~PGOFSET) | (va & PGOFSET);
1108: return (TRUE);
1109: }
1110:
1111: return (FALSE);
1112: }
1113:
1114: void
1115: pmap_activate(struct proc *p)
1116: {
1117: struct pmap *pmap = p->p_vmspace->vm_map.pmap;
1118: struct pcb *pcb = &p->p_addr->u_pcb;
1119:
1120: pcb->pcb_space = pmap->pm_space;
1121: pcb->pcb_uva = (vaddr_t)p->p_addr;
1122: fdcache(HPPA_SID_KERNEL, (vaddr_t)pcb, PAGE_SIZE);
1123: }
1124:
1125: void
1126: pmap_deactivate(struct proc *p)
1127: {
1128:
1129: }
1130:
1131: static __inline void
1132: pmap_flush_page(struct vm_page *pg, int purge)
1133: {
1134: struct pv_entry *pve;
1135:
1136: /* purge cache for all possible mappings for the pa */
1137: simple_lock(&pg->mdpage.pvh_lock);
1138: for(pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next)
1139: if (purge)
1140: pdcache(pve->pv_pmap->pm_space, pve->pv_va, PAGE_SIZE);
1141: else
1142: fdcache(pve->pv_pmap->pm_space, pve->pv_va, PAGE_SIZE);
1143: simple_unlock(&pg->mdpage.pvh_lock);
1144: }
1145:
1146: void
1147: pmap_zero_page(struct vm_page *pg)
1148: {
1149: paddr_t pa = VM_PAGE_TO_PHYS(pg);
1150:
1151: DPRINTF(PDB_FOLLOW|PDB_PHYS, ("pmap_zero_page(%x)\n", pa));
1152:
1153: pmap_flush_page(pg, 1);
1154: bzero((void *)pa, PAGE_SIZE);
1155: fdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE);
1156: }
1157:
1158: void
1159: pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg)
1160: {
1161: paddr_t spa = VM_PAGE_TO_PHYS(srcpg);
1162: paddr_t dpa = VM_PAGE_TO_PHYS(dstpg);
1163: DPRINTF(PDB_FOLLOW|PDB_PHYS, ("pmap_copy_page(%x, %x)\n", spa, dpa));
1164:
1165: pmap_flush_page(srcpg, 0);
1166: pmap_flush_page(dstpg, 1);
1167: bcopy((void *)spa, (void *)dpa, PAGE_SIZE);
1168: pdcache(HPPA_SID_KERNEL, spa, PAGE_SIZE);
1169: fdcache(HPPA_SID_KERNEL, dpa, PAGE_SIZE);
1170: }
1171:
1172: void
1173: pmap_kenter_pa(va, pa, prot)
1174: vaddr_t va;
1175: paddr_t pa;
1176: vm_prot_t prot;
1177: {
1178: volatile pt_entry_t *pde;
1179: pt_entry_t pte, opte;
1180:
1181: DPRINTF(PDB_FOLLOW|PDB_ENTER,
1182: ("pmap_kenter_pa(%x, %x, %x)\n", va, pa, prot));
1183:
1184: simple_lock(&pmap->pm_lock);
1185:
1186: if (!(pde = pmap_pde_get(pmap_kernel()->pm_pdir, va)) &&
1187: !(pde = pmap_pde_alloc(pmap_kernel(), va, NULL)))
1188: panic("pmap_kenter_pa: cannot allocate pde for va=0x%lx", va);
1189: opte = pmap_pte_get(pde, va);
1190: pte = pa | PTE_PROT(TLB_WIRED | TLB_REFTRAP |
1191: pmap_prot(pmap_kernel(), prot));
1192: if (pa >= HPPA_IOBEGIN)
1193: pte |= PTE_PROT(TLB_UNCACHABLE);
1194: pmap_pte_set(pde, va, pte);
1195: pmap_kernel()->pm_stats.wired_count++;
1196: pmap_kernel()->pm_stats.resident_count++;
1197: if (opte)
1198: pmap_pte_flush(pmap_kernel(), va, opte);
1199:
1200: #ifdef PMAPDEBUG
1201: {
1202: struct vm_page *pg;
1203:
1204: if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) {
1205:
1206: simple_lock(&pg->mdpage.pvh_lock);
1207: if (pmap_check_alias(pg->mdpage.pvh_list, va, pte))
1208: Debugger();
1209: simple_unlock(&pg->mdpage.pvh_lock);
1210: }
1211: }
1212: #endif
1213: simple_unlock(&pmap->pm_lock);
1214:
1215: DPRINTF(PDB_FOLLOW|PDB_ENTER, ("pmap_kenter_pa: leaving\n"));
1216: }
1217:
1218: void
1219: pmap_kremove(va, size)
1220: vaddr_t va;
1221: vsize_t size;
1222: {
1223: struct pv_entry *pve;
1224: vaddr_t eva, pdemask;
1225: volatile pt_entry_t *pde;
1226: pt_entry_t pte;
1227: struct vm_page *pg;
1228:
1229: DPRINTF(PDB_FOLLOW|PDB_REMOVE,
1230: ("pmap_kremove(%x, %x)\n", va, size));
1231: #ifdef PMAPDEBUG
1232: if (va < ptoa(physmem)) {
1233: printf("pmap_kremove(%x, %x): unmapping physmem\n", va, size);
1234: return;
1235: }
1236: #endif
1237:
1238: simple_lock(&pmap->pm_lock);
1239:
1240: for (pdemask = 1, eva = va + size; va < eva; va += PAGE_SIZE) {
1241: if (pdemask != (va & PDE_MASK)) {
1242: pdemask = va & PDE_MASK;
1243: if (!(pde = pmap_pde_get(pmap_kernel()->pm_pdir, va))) {
1244: va += ~PDE_MASK + 1 - PAGE_SIZE;
1245: continue;
1246: }
1247: }
1248: if (!(pte = pmap_pte_get(pde, va))) {
1249: #ifdef DEBUG
1250: printf("pmap_kremove: unmapping unmapped 0x%x\n", va);
1251: #endif
1252: continue;
1253: }
1254:
1255: pmap_pte_flush(pmap_kernel(), va, pte);
1256: pmap_pte_set(pde, va, 0);
1257: if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) {
1258:
1259: simple_lock(&pg->mdpage.pvh_lock);
1260: pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
1261: /* just in case we have enter/kenter mismatch */
1262: if ((pve = pmap_pv_remove(pg, pmap_kernel(), va)))
1263: pmap_pv_free(pve);
1264: simple_unlock(&pg->mdpage.pvh_lock);
1265: }
1266: }
1267:
1268: simple_unlock(&pmap->pm_lock);
1269:
1270: DPRINTF(PDB_FOLLOW|PDB_REMOVE, ("pmap_kremove: leaving\n"));
1271: }
CVSweb