Annotation of sys/arch/sh/sh/pmap.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: pmap.c,v 1.7 2007/06/21 04:41:21 miod Exp $ */
2: /* $NetBSD: pmap.c,v 1.55 2006/08/07 23:19:36 tsutsui Exp $ */
3:
4: /*-
5: * Copyright (c) 2002 The NetBSD Foundation, Inc.
6: * All rights reserved.
7: *
8: * This code is derived from software contributed to The NetBSD Foundation
9: * by UCHIYAMA Yasushi.
10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
39:
40: #include <sys/param.h>
41: #include <sys/systm.h>
42: #include <sys/pool.h>
43: #include <sys/msgbuf.h>
44:
45: #include <uvm/uvm.h>
46:
47: #include <sh/mmu.h>
48: #include <sh/cache.h>
49:
50: #ifdef DEBUG
51: #define STATIC
52: #else
53: #define STATIC static
54: #endif
55:
56: #define __PMAP_PTP_SHIFT 22
57: #define __PMAP_PTP_TRUNC(va) \
58: (((va) + (1 << __PMAP_PTP_SHIFT) - 1) & ~((1 << __PMAP_PTP_SHIFT) - 1))
59: #define __PMAP_PTP_PG_N (PAGE_SIZE / sizeof(pt_entry_t))
60: #define __PMAP_PTP_INDEX(va) (((va) >> __PMAP_PTP_SHIFT) & (__PMAP_PTP_N - 1))
61: #define __PMAP_PTP_OFSET(va) ((va >> PGSHIFT) & (__PMAP_PTP_PG_N - 1))
62:
63: struct pmap __pmap_kernel;
64: STATIC vaddr_t __pmap_kve; /* VA of last kernel virtual */
65: paddr_t avail_start; /* PA of first available physical page */
66: paddr_t avail_end; /* PA of last available physical page */
67:
68: /* pmap pool */
69: STATIC struct pool __pmap_pmap_pool;
70:
71: /* pv_entry ops. */
72: struct pv_entry {
73: struct pmap *pv_pmap;
74: vaddr_t pv_va;
75: vm_prot_t pv_prot;
76: SLIST_ENTRY(pv_entry) pv_link;
77: };
78: #define __pmap_pv_alloc() pool_get(&__pmap_pv_pool, PR_NOWAIT)
79: #define __pmap_pv_free(pv) pool_put(&__pmap_pv_pool, (pv))
80: STATIC void __pmap_pv_enter(pmap_t, struct vm_page *, vaddr_t, vm_prot_t);
81: STATIC void __pmap_pv_remove(pmap_t, struct vm_page *, vaddr_t);
82: STATIC void *__pmap_pv_page_alloc(struct pool *, int);
83: STATIC void __pmap_pv_page_free(struct pool *, void *);
84: STATIC struct pool __pmap_pv_pool;
85: STATIC struct pool_allocator pmap_pv_page_allocator = {
86: __pmap_pv_page_alloc, __pmap_pv_page_free, 0,
87: };
88:
89: /* ASID ops. */
90: STATIC int __pmap_asid_alloc(void);
91: STATIC void __pmap_asid_free(int);
92: STATIC struct {
93: uint32_t map[8];
94: int hint; /* hint for next allocation */
95: } __pmap_asid;
96:
97: /* page table entry ops. */
98: STATIC pt_entry_t *__pmap_pte_alloc(pmap_t, vaddr_t);
99:
100: /* pmap_enter util */
101: STATIC boolean_t __pmap_map_change(pmap_t, vaddr_t, paddr_t, vm_prot_t,
102: pt_entry_t);
103:
104: void
105: pmap_bootstrap()
106: {
107: /* Steal msgbuf area */
108: initmsgbuf((caddr_t)uvm_pageboot_alloc(MSGBUFSIZE), MSGBUFSIZE);
109:
110: avail_start = ptoa(vm_physmem[0].start);
111: avail_end = ptoa(vm_physmem[vm_nphysseg - 1].end);
112: __pmap_kve = VM_MIN_KERNEL_ADDRESS;
113:
114: pmap_kernel()->pm_refcnt = 1;
115: pmap_kernel()->pm_ptp = (pt_entry_t **)uvm_pageboot_alloc(PAGE_SIZE);
116: memset(pmap_kernel()->pm_ptp, 0, PAGE_SIZE);
117:
118: /* Enable MMU */
119: sh_mmu_start();
120: /* Mask all interrupt */
121: _cpu_intr_suspend();
122: /* Enable exception for P3 access */
123: _cpu_exception_resume(0);
124: }
125:
126: vaddr_t
127: pmap_steal_memory(vsize_t size, vaddr_t *vstart, vaddr_t *vend)
128: {
129: struct vm_physseg *bank;
130: int i, j, npage;
131: paddr_t pa;
132: vaddr_t va;
133:
134: KDASSERT(!uvm.page_init_done);
135:
136: size = round_page(size);
137: npage = atop(size);
138:
139: for (i = 0, bank = &vm_physmem[i]; i < vm_nphysseg; i++, bank++)
140: if (npage <= bank->avail_end - bank->avail_start)
141: break;
142: KDASSERT(i != vm_nphysseg);
143:
144: /* Steal pages */
145: bank->avail_end -= npage;
146: bank->end -= npage;
147: pa = ptoa(bank->avail_end);
148:
149: /* GC memory bank */
150: if (bank->avail_start == bank->end) {
151: /* Remove this segment from the list. */
152: vm_nphysseg--;
153: KDASSERT(vm_nphysseg > 0);
154: for (j = i; i < vm_nphysseg; j++)
155: vm_physmem[j] = vm_physmem[j + 1];
156: }
157:
158: va = SH3_PHYS_TO_P1SEG(pa);
159: memset((caddr_t)va, 0, size);
160:
161: if (vstart)
162: *vstart = VM_MIN_KERNEL_ADDRESS;
163: if (vend)
164: *vend = VM_MAX_KERNEL_ADDRESS;
165:
166: return (va);
167: }
168:
169: vaddr_t
170: pmap_growkernel(vaddr_t maxkvaddr)
171: {
172: int i, n;
173:
174: if (maxkvaddr <= __pmap_kve)
175: return (__pmap_kve);
176:
177: i = __PMAP_PTP_INDEX(__pmap_kve - VM_MIN_KERNEL_ADDRESS);
178: __pmap_kve = __PMAP_PTP_TRUNC(maxkvaddr);
179: n = __PMAP_PTP_INDEX(__pmap_kve - VM_MIN_KERNEL_ADDRESS);
180:
181: /* Allocate page table pages */
182: for (;i < n; i++) {
183: if (__pmap_kernel.pm_ptp[i] != NULL)
184: continue;
185:
186: if (uvm.page_init_done) {
187: struct vm_page *pg = uvm_pagealloc(NULL, 0, NULL,
188: UVM_PGA_USERESERVE | UVM_PGA_ZERO);
189: if (pg == NULL)
190: goto error;
191: __pmap_kernel.pm_ptp[i] = (pt_entry_t *)
192: SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg));
193: } else {
194: pt_entry_t *ptp = (pt_entry_t *)
195: uvm_pageboot_alloc(PAGE_SIZE);
196: if (ptp == NULL)
197: goto error;
198: __pmap_kernel.pm_ptp[i] = ptp;
199: memset(ptp, 0, PAGE_SIZE);
200: }
201: }
202:
203: return (__pmap_kve);
204: error:
205: panic("pmap_growkernel: out of memory.");
206: /* NOTREACHED */
207: }
208:
209: void
210: pmap_init()
211: {
212: /* Initialize pmap module */
213: pool_init(&__pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
214: &pool_allocator_nointr);
215: pool_init(&__pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
216: &pmap_pv_page_allocator);
217: pool_setlowat(&__pmap_pv_pool, 16);
218: }
219:
220: pmap_t
221: pmap_create()
222: {
223: pmap_t pmap;
224:
225: pmap = pool_get(&__pmap_pmap_pool, PR_WAITOK);
226: memset(pmap, 0, sizeof(struct pmap));
227: pmap->pm_asid = -1;
228: pmap->pm_refcnt = 1;
229: /* Allocate page table page holder (512 slot) */
230: pmap->pm_ptp = (pt_entry_t **)
231: SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(
232: uvm_pagealloc(NULL, 0, NULL,
233: UVM_PGA_USERESERVE | UVM_PGA_ZERO)));
234:
235: return (pmap);
236: }
237:
238: void
239: pmap_destroy(pmap_t pmap)
240: {
241: int i;
242:
243: if (--pmap->pm_refcnt > 0)
244: return;
245:
246: /* Deallocate all page table page */
247: for (i = 0; i < __PMAP_PTP_N; i++) {
248: vaddr_t va = (vaddr_t)pmap->pm_ptp[i];
249: if (va == 0)
250: continue;
251: #ifdef DEBUG /* Check no mapping exists. */
252: {
253: int j;
254: pt_entry_t *pte = (pt_entry_t *)va;
255: for (j = 0; j < __PMAP_PTP_PG_N; j++, pte++)
256: KDASSERT(*pte == 0);
257: }
258: #endif /* DEBUG */
259: /* Purge cache entry for next use of this page. */
260: if (SH_HAS_VIRTUAL_ALIAS)
261: sh_dcache_inv_range(va, PAGE_SIZE);
262: /* Free page table */
263: uvm_pagefree(PHYS_TO_VM_PAGE(SH3_P1SEG_TO_PHYS(va)));
264: }
265: /* Deallocate page table page holder */
266: if (SH_HAS_VIRTUAL_ALIAS)
267: sh_dcache_inv_range((vaddr_t)pmap->pm_ptp, PAGE_SIZE);
268: uvm_pagefree(PHYS_TO_VM_PAGE(SH3_P1SEG_TO_PHYS((vaddr_t)pmap->pm_ptp)));
269:
270: /* Free ASID */
271: __pmap_asid_free(pmap->pm_asid);
272:
273: pool_put(&__pmap_pmap_pool, pmap);
274: }
275:
276: void
277: pmap_reference(pmap_t pmap)
278: {
279: pmap->pm_refcnt++;
280: }
281:
282: void
283: pmap_activate(struct proc *p)
284: {
285: pmap_t pmap = p->p_vmspace->vm_map.pmap;
286:
287: if (pmap->pm_asid == -1)
288: pmap->pm_asid = __pmap_asid_alloc();
289:
290: KDASSERT(pmap->pm_asid >=0 && pmap->pm_asid < 256);
291: sh_tlb_set_asid(pmap->pm_asid);
292: }
293:
294: int
295: pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
296: {
297: struct vm_page *pg;
298: struct vm_page_md *pvh;
299: pt_entry_t entry, *pte;
300: boolean_t kva = (pmap == pmap_kernel());
301:
302: /* "flags" never exceed "prot" */
303: KDASSERT(prot != 0 && ((flags & VM_PROT_ALL) & ~prot) == 0);
304:
305: pg = PHYS_TO_VM_PAGE(pa);
306: entry = (pa & PG_PPN) | PG_4K;
307: if (flags & PMAP_WIRED)
308: entry |= _PG_WIRED;
309:
310: if (pg != NULL) { /* memory-space */
311: pvh = &pg->mdpage;
312: entry |= PG_C; /* always cached */
313:
314: /* Modified/reference tracking */
315: if (flags & VM_PROT_WRITE) {
316: entry |= PG_V | PG_D;
317: pvh->pvh_flags |= PVH_MODIFIED | PVH_REFERENCED;
318: } else if (flags & VM_PROT_ALL) {
319: entry |= PG_V;
320: pvh->pvh_flags |= PVH_REFERENCED;
321: }
322:
323: /* Protection */
324: if ((prot & VM_PROT_WRITE) && (pvh->pvh_flags & PVH_MODIFIED)) {
325: if (kva)
326: entry |= PG_PR_KRW | PG_SH;
327: else
328: entry |= PG_PR_URW;
329: } else {
330: /* RO, COW page */
331: if (kva)
332: entry |= PG_PR_KRO | PG_SH;
333: else
334: entry |= PG_PR_URO;
335: }
336:
337: /* Check for existing mapping */
338: if (__pmap_map_change(pmap, va, pa, prot, entry))
339: return (0);
340:
341: /* Add to physical-virtual map list of this page */
342: __pmap_pv_enter(pmap, pg, va, prot);
343:
344: } else { /* bus-space (always uncached map) */
345: if (kva) {
346: entry |= PG_V | PG_SH |
347: ((prot & VM_PROT_WRITE) ?
348: (PG_PR_KRW | PG_D) : PG_PR_KRO);
349: } else {
350: entry |= PG_V |
351: ((prot & VM_PROT_WRITE) ?
352: (PG_PR_URW | PG_D) : PG_PR_URO);
353: }
354: }
355:
356: /* Register to page table */
357: if (kva)
358: pte = __pmap_kpte_lookup(va);
359: else {
360: pte = __pmap_pte_alloc(pmap, va);
361: if (pte == NULL) {
362: if (flags & PMAP_CANFAIL)
363: return ENOMEM;
364: panic("pmap_enter: cannot allocate pte");
365: }
366: }
367:
368: *pte = entry;
369:
370: if (pmap->pm_asid != -1)
371: sh_tlb_update(pmap->pm_asid, va, entry);
372:
373: if (!SH_HAS_UNIFIED_CACHE &&
374: (prot == (VM_PROT_READ | VM_PROT_EXECUTE)))
375: sh_icache_sync_range_index(va, PAGE_SIZE);
376:
377: if (entry & _PG_WIRED)
378: pmap->pm_stats.wired_count++;
379: pmap->pm_stats.resident_count++;
380:
381: return (0);
382: }
383:
384: /*
385: * boolean_t __pmap_map_change(pmap_t pmap, vaddr_t va, paddr_t pa,
386: * vm_prot_t prot, pt_entry_t entry):
387: * Handle the situation that pmap_enter() is called to enter a
388: * mapping at a virtual address for which a mapping already
389: * exists.
390: */
391: boolean_t
392: __pmap_map_change(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
393: pt_entry_t entry)
394: {
395: pt_entry_t *pte, oentry;
396: vaddr_t eva = va + PAGE_SIZE;
397:
398: if ((pte = __pmap_pte_lookup(pmap, va)) == NULL ||
399: ((oentry = *pte) == 0))
400: return (FALSE); /* no mapping exists. */
401:
402: if (pa != (oentry & PG_PPN)) {
403: /* Enter a mapping at a mapping to another physical page. */
404: pmap_remove(pmap, va, eva);
405: return (FALSE);
406: }
407:
408: /* Pre-existing mapping */
409:
410: /* Protection change. */
411: if ((oentry & PG_PR_MASK) != (entry & PG_PR_MASK))
412: pmap_protect(pmap, va, eva, prot);
413:
414: /* Wired change */
415: if (oentry & _PG_WIRED) {
416: if (!(entry & _PG_WIRED)) {
417: /* wired -> unwired */
418: *pte = entry;
419: /* "wired" is software bits. no need to update TLB */
420: pmap->pm_stats.wired_count--;
421: }
422: } else if (entry & _PG_WIRED) {
423: /* unwired -> wired. make sure to reflect "flags" */
424: pmap_remove(pmap, va, eva);
425: return (FALSE);
426: }
427:
428: return (TRUE); /* mapping was changed. */
429: }
430:
431: /*
432: * void __pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t vaddr):
433: * Insert physical-virtual map to vm_page.
434: * Assume pre-existed mapping is already removed.
435: */
436: void
437: __pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t va, vm_prot_t prot)
438: {
439: struct vm_page_md *pvh;
440: struct pv_entry *pv;
441: int s;
442: int have_writeable = 0;
443:
444: s = splvm();
445: if (SH_HAS_VIRTUAL_ALIAS) {
446: /* Remove all other mapping on this physical page */
447: pvh = &pg->mdpage;
448: if (prot & VM_PROT_WRITE)
449: have_writeable = 1;
450: else {
451: SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
452: if (pv->pv_prot & VM_PROT_WRITE) {
453: have_writeable = 1;
454: break;
455: }
456: }
457: }
458: if (have_writeable != 0) {
459: while ((pv = SLIST_FIRST(&pvh->pvh_head)) != NULL)
460: pmap_remove(pv->pv_pmap, pv->pv_va,
461: pv->pv_va + PAGE_SIZE);
462: }
463: }
464:
465: /* Register pv map */
466: pvh = &pg->mdpage;
467: pv = __pmap_pv_alloc();
468: pv->pv_pmap = pmap;
469: pv->pv_va = va;
470: pv->pv_prot = prot;
471:
472: SLIST_INSERT_HEAD(&pvh->pvh_head, pv, pv_link);
473: splx(s);
474: }
475:
476: void
477: pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
478: {
479: struct vm_page *pg;
480: pt_entry_t *pte, entry;
481: vaddr_t va;
482:
483: KDASSERT((sva & PGOFSET) == 0);
484:
485: for (va = sva; va < eva; va += PAGE_SIZE) {
486: if ((pte = __pmap_pte_lookup(pmap, va)) == NULL ||
487: (entry = *pte) == 0)
488: continue;
489:
490: if ((pg = PHYS_TO_VM_PAGE(entry & PG_PPN)) != NULL)
491: __pmap_pv_remove(pmap, pg, va);
492:
493: if (entry & _PG_WIRED)
494: pmap->pm_stats.wired_count--;
495: pmap->pm_stats.resident_count--;
496: *pte = 0;
497:
498: /*
499: * When pmap->pm_asid == -1 (invalid ASID), old entry attribute
500: * to this pmap is already removed by pmap_activate().
501: */
502: if (pmap->pm_asid != -1)
503: sh_tlb_invalidate_addr(pmap->pm_asid, va);
504: }
505: }
506:
507: /*
508: * void __pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t vaddr):
509: * Remove physical-virtual map from vm_page.
510: */
511: void
512: __pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t vaddr)
513: {
514: struct vm_page_md *pvh;
515: struct pv_entry *pv;
516: int s;
517:
518: s = splvm();
519: pvh = &pg->mdpage;
520: SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
521: if (pv->pv_pmap == pmap && pv->pv_va == vaddr) {
522: if (SH_HAS_VIRTUAL_ALIAS ||
523: (SH_HAS_WRITEBACK_CACHE &&
524: (pg->mdpage.pvh_flags & PVH_MODIFIED))) {
525: /*
526: * Always use index ops. since I don't want to
527: * worry about address space.
528: */
529: sh_dcache_wbinv_range_index
530: (pv->pv_va, PAGE_SIZE);
531: }
532:
533: SLIST_REMOVE(&pvh->pvh_head, pv, pv_entry, pv_link);
534: __pmap_pv_free(pv);
535: break;
536: }
537: }
538: #ifdef DEBUG
539: /* Check duplicated map. */
540: SLIST_FOREACH(pv, &pvh->pvh_head, pv_link)
541: KDASSERT(!(pv->pv_pmap == pmap && pv->pv_va == vaddr));
542: #endif
543: splx(s);
544: }
545:
546: void
547: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
548: {
549: pt_entry_t *pte, entry;
550:
551: KDASSERT((va & PGOFSET) == 0);
552: KDASSERT(va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS);
553:
554: entry = (pa & PG_PPN) | PG_V | PG_SH | PG_4K;
555: if (prot & VM_PROT_WRITE)
556: entry |= (PG_PR_KRW | PG_D);
557: else
558: entry |= PG_PR_KRO;
559:
560: if (PHYS_TO_VM_PAGE(pa))
561: entry |= PG_C;
562:
563: pte = __pmap_kpte_lookup(va);
564:
565: KDASSERT(*pte == 0);
566: *pte = entry;
567:
568: sh_tlb_update(0, va, entry);
569: }
570:
571: void
572: pmap_kremove(vaddr_t va, vsize_t len)
573: {
574: pt_entry_t *pte;
575: vaddr_t eva = va + len;
576:
577: KDASSERT((va & PGOFSET) == 0);
578: KDASSERT((len & PGOFSET) == 0);
579: KDASSERT(va >= VM_MIN_KERNEL_ADDRESS && eva <= VM_MAX_KERNEL_ADDRESS);
580:
581: for (; va < eva; va += PAGE_SIZE) {
582: pte = __pmap_kpte_lookup(va);
583: KDASSERT(pte != NULL);
584: if (*pte == 0)
585: continue;
586:
587: if (SH_HAS_VIRTUAL_ALIAS && PHYS_TO_VM_PAGE(*pte & PG_PPN))
588: sh_dcache_wbinv_range(va, PAGE_SIZE);
589: *pte = 0;
590:
591: sh_tlb_invalidate_addr(0, va);
592: }
593: }
594:
595: boolean_t
596: pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
597: {
598: pt_entry_t *pte;
599:
600: /* handle P1 and P2 specially: va == pa */
601: if (pmap == pmap_kernel() && (va >> 30) == 2) {
602: if (pap != NULL)
603: *pap = va & SH3_PHYS_MASK;
604: return (TRUE);
605: }
606:
607: pte = __pmap_pte_lookup(pmap, va);
608: if (pte == NULL || *pte == 0)
609: return (FALSE);
610:
611: if (pap != NULL)
612: *pap = (*pte & PG_PPN) | (va & PGOFSET);
613:
614: return (TRUE);
615: }
616:
617: void
618: pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
619: {
620: boolean_t kernel = pmap == pmap_kernel();
621: pt_entry_t *pte, entry;
622: vaddr_t va;
623: paddr_t pa;
624: struct vm_page *pg;
625: struct vm_page_md *pvh;
626: struct pv_entry *pv, *head;
627:
628: sva = trunc_page(sva);
629:
630: if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
631: pmap_remove(pmap, sva, eva);
632: return;
633: }
634:
635: for (va = sva; va < eva; va += PAGE_SIZE) {
636:
637: if (((pte = __pmap_pte_lookup(pmap, va)) == NULL) ||
638: (entry = *pte) == 0)
639: continue;
640:
641: if (SH_HAS_VIRTUAL_ALIAS && (entry & PG_D)) {
642: if (!SH_HAS_UNIFIED_CACHE && (prot & VM_PROT_EXECUTE))
643: sh_icache_sync_range_index(va, PAGE_SIZE);
644: else
645: sh_dcache_wbinv_range_index(va, PAGE_SIZE);
646: }
647:
648: entry &= ~PG_PR_MASK;
649: switch (prot) {
650: default:
651: panic("pmap_protect: invalid protection mode %x", prot);
652: /* NOTREACHED */
653: case VM_PROT_READ:
654: /* FALLTHROUGH */
655: case VM_PROT_READ | VM_PROT_EXECUTE:
656: entry |= kernel ? PG_PR_KRO : PG_PR_URO;
657: break;
658: case VM_PROT_READ | VM_PROT_WRITE:
659: /* FALLTHROUGH */
660: case VM_PROT_ALL:
661: entry |= kernel ? PG_PR_KRW : PG_PR_URW;
662: break;
663: }
664: *pte = entry;
665:
666: if (pmap->pm_asid != -1)
667: sh_tlb_update(pmap->pm_asid, va, entry);
668:
669: pa = entry & PG_PPN;
670: pg = PHYS_TO_VM_PAGE(pa);
671: if (pg == NULL)
672: continue;
673: pvh = &pg->mdpage;
674:
675: while ((pv = SLIST_FIRST(&pvh->pvh_head)) != NULL) {
676: if (pv->pv_pmap == pmap && pv->pv_va == va) {
677: break;
678: }
679: pmap_remove(pv->pv_pmap, pv->pv_va,
680: pv->pv_va + PAGE_SIZE);
681: }
682: /* the matching pv is first in the list */
683: SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
684: if (pv->pv_pmap == pmap && pv->pv_va == va) {
685: pv->pv_prot = prot;
686: break;
687: }
688: }
689: /* remove the rest of the elements */
690: head = SLIST_FIRST(&pvh->pvh_head);
691: if (head != NULL)
692: while((pv = SLIST_NEXT(head, pv_link))!= NULL)
693: pmap_remove(pv->pv_pmap, pv->pv_va,
694: pv->pv_va + PAGE_SIZE);
695: }
696: }
697:
698: void
699: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
700: {
701: struct vm_page_md *pvh = &pg->mdpage;
702: struct pv_entry *pv;
703: struct pmap *pmap;
704: vaddr_t va;
705: int s;
706:
707: switch (prot) {
708: case VM_PROT_READ | VM_PROT_WRITE:
709: /* FALLTHROUGH */
710: case VM_PROT_ALL:
711: break;
712:
713: case VM_PROT_READ:
714: /* FALLTHROUGH */
715: case VM_PROT_READ | VM_PROT_EXECUTE:
716: s = splvm();
717: SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
718: pmap = pv->pv_pmap;
719: va = pv->pv_va;
720:
721: KDASSERT(pmap);
722: pmap_protect(pmap, va, va + PAGE_SIZE, prot);
723: }
724: splx(s);
725: break;
726:
727: default:
728: /* Remove all */
729: s = splvm();
730: while ((pv = SLIST_FIRST(&pvh->pvh_head)) != NULL) {
731: va = pv->pv_va;
732: pmap_remove(pv->pv_pmap, va, va + PAGE_SIZE);
733: }
734: splx(s);
735: }
736: }
737:
738: void
739: pmap_unwire(pmap_t pmap, vaddr_t va)
740: {
741: pt_entry_t *pte, entry;
742:
743: if ((pte = __pmap_pte_lookup(pmap, va)) == NULL ||
744: (entry = *pte) == 0 ||
745: (entry & _PG_WIRED) == 0)
746: return;
747:
748: *pte = entry & ~_PG_WIRED;
749: pmap->pm_stats.wired_count--;
750: }
751:
752: void
753: pmap_proc_iflush(struct proc *p, vaddr_t va, size_t len)
754: {
755: if (!SH_HAS_UNIFIED_CACHE)
756: sh_icache_sync_range_index(va, len);
757: }
758:
759: void
760: pmap_zero_page(vm_page_t pg)
761: {
762: paddr_t phys = VM_PAGE_TO_PHYS(pg);
763:
764: if (SH_HAS_VIRTUAL_ALIAS) { /* don't pollute cache */
765: /* sync cache since we access via P2. */
766: sh_dcache_wbinv_all();
767: memset((void *)SH3_PHYS_TO_P2SEG(phys), 0, PAGE_SIZE);
768: } else {
769: memset((void *)SH3_PHYS_TO_P1SEG(phys), 0, PAGE_SIZE);
770: }
771: }
772:
773: void
774: pmap_copy_page(vm_page_t srcpg, vm_page_t dstpg)
775: {
776: paddr_t src,dst;
777:
778: src = VM_PAGE_TO_PHYS(srcpg);
779: dst = VM_PAGE_TO_PHYS(dstpg);
780:
781: if (SH_HAS_VIRTUAL_ALIAS) { /* don't pollute cache */
782: /* sync cache since we access via P2. */
783: sh_dcache_wbinv_all();
784: memcpy((void *)SH3_PHYS_TO_P2SEG(dst),
785: (void *)SH3_PHYS_TO_P2SEG(src), PAGE_SIZE);
786: } else {
787: memcpy((void *)SH3_PHYS_TO_P1SEG(dst),
788: (void *)SH3_PHYS_TO_P1SEG(src), PAGE_SIZE);
789: }
790: }
791:
792: boolean_t
793: pmap_is_referenced(struct vm_page *pg)
794: {
795: return ((pg->mdpage.pvh_flags & PVH_REFERENCED) ? TRUE : FALSE);
796: }
797:
798: boolean_t
799: pmap_clear_reference(struct vm_page *pg)
800: {
801: struct vm_page_md *pvh = &pg->mdpage;
802: struct pv_entry *pv;
803: pt_entry_t *pte;
804: pmap_t pmap;
805: vaddr_t va;
806: int s;
807:
808: if ((pg->mdpage.pvh_flags & PVH_REFERENCED) == 0)
809: return (FALSE);
810:
811: pg->mdpage.pvh_flags &= ~PVH_REFERENCED;
812:
813: s = splvm();
814: /* Restart reference bit emulation */
815: SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
816: pmap = pv->pv_pmap;
817: va = pv->pv_va;
818:
819: if ((pte = __pmap_pte_lookup(pmap, va)) == NULL)
820: continue;
821: if ((*pte & PG_V) == 0)
822: continue;
823: *pte &= ~PG_V;
824:
825: if (pmap->pm_asid != -1)
826: sh_tlb_invalidate_addr(pmap->pm_asid, va);
827: }
828: splx(s);
829:
830: return (TRUE);
831: }
832:
833: boolean_t
834: pmap_is_modified(struct vm_page *pg)
835: {
836: return ((pg->mdpage.pvh_flags & PVH_MODIFIED) ? TRUE : FALSE);
837: }
838:
839: boolean_t
840: pmap_clear_modify(struct vm_page *pg)
841: {
842: struct vm_page_md *pvh = &pg->mdpage;
843: struct pv_entry *pv;
844: struct pmap *pmap;
845: pt_entry_t *pte, entry;
846: boolean_t modified;
847: vaddr_t va;
848: int s;
849:
850: modified = pvh->pvh_flags & PVH_MODIFIED;
851: if (!modified)
852: return (FALSE);
853:
854: pvh->pvh_flags &= ~PVH_MODIFIED;
855:
856: s = splvm();
857: if (SLIST_EMPTY(&pvh->pvh_head)) {/* no map on this page */
858: splx(s);
859: return (TRUE);
860: }
861:
862: /* Write-back and invalidate TLB entry */
863: if (!SH_HAS_VIRTUAL_ALIAS && SH_HAS_WRITEBACK_CACHE)
864: sh_dcache_wbinv_all();
865:
866: SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
867: pmap = pv->pv_pmap;
868: va = pv->pv_va;
869: if ((pte = __pmap_pte_lookup(pmap, va)) == NULL)
870: continue;
871: entry = *pte;
872: if ((entry & PG_D) == 0)
873: continue;
874:
875: if (SH_HAS_VIRTUAL_ALIAS)
876: sh_dcache_wbinv_range_index(va, PAGE_SIZE);
877:
878: *pte = entry & ~PG_D;
879: if (pmap->pm_asid != -1)
880: sh_tlb_invalidate_addr(pmap->pm_asid, va);
881: }
882: splx(s);
883:
884: return (TRUE);
885: }
886:
887: #ifdef SH4
888: /*
889: * pmap_prefer(vaddr_t foff, vaddr_t *vap)
890: *
891: * Find first virtual address >= *vap that doesn't cause
892: * a virtual cache alias against vaddr_t foff.
893: */
894: void
895: pmap_prefer(vaddr_t foff, vaddr_t *vap)
896: {
897: vaddr_t va;
898:
899: if (SH_HAS_VIRTUAL_ALIAS) {
900: va = *vap;
901:
902: *vap = va + ((foff - va) & sh_cache_prefer_mask);
903: }
904: }
905: #endif /* SH4 */
906:
907: /*
908: * pv_entry pool allocator:
909: * void *__pmap_pv_page_alloc(struct pool *pool, int flags):
910: * void __pmap_pv_page_free(struct pool *pool, void *v):
911: */
912: void *
913: __pmap_pv_page_alloc(struct pool *pool, int flags)
914: {
915: struct vm_page *pg;
916:
917: pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
918: if (pg == NULL)
919: return (NULL);
920:
921: return ((void *)SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg)));
922: }
923:
924: void
925: __pmap_pv_page_free(struct pool *pool, void *v)
926: {
927: vaddr_t va = (vaddr_t)v;
928:
929: /* Invalidate cache for next use of this page */
930: if (SH_HAS_VIRTUAL_ALIAS)
931: sh_icache_sync_range_index(va, PAGE_SIZE);
932: uvm_pagefree(PHYS_TO_VM_PAGE(SH3_P1SEG_TO_PHYS(va)));
933: }
934:
935: /*
936: * pt_entry_t __pmap_pte_alloc(pmap_t pmap, vaddr_t va):
937: * lookup page table entry. if found returns it, else allocate it.
938: * page table is accessed via P1.
939: */
940: pt_entry_t *
941: __pmap_pte_alloc(pmap_t pmap, vaddr_t va)
942: {
943: struct vm_page *pg;
944: pt_entry_t *ptp, *pte;
945:
946: if ((pte = __pmap_pte_lookup(pmap, va)) != NULL)
947: return (pte);
948:
949: /* Allocate page table (not managed page) */
950: pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE | UVM_PGA_ZERO);
951: if (pg == NULL)
952: return NULL;
953:
954: ptp = (pt_entry_t *)SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg));
955: pmap->pm_ptp[__PMAP_PTP_INDEX(va)] = ptp;
956:
957: return (ptp + __PMAP_PTP_OFSET(va));
958: }
959:
960: /*
961: * pt_entry_t *__pmap_pte_lookup(pmap_t pmap, vaddr_t va):
962: * lookup page table entry, if not allocated, returns NULL.
963: */
964: pt_entry_t *
965: __pmap_pte_lookup(pmap_t pmap, vaddr_t va)
966: {
967: pt_entry_t *ptp;
968:
969: if (pmap == pmap_kernel())
970: return (__pmap_kpte_lookup(va));
971:
972: /* Lookup page table page */
973: ptp = pmap->pm_ptp[__PMAP_PTP_INDEX(va)];
974: if (ptp == NULL)
975: return (NULL);
976:
977: return (ptp + __PMAP_PTP_OFSET(va));
978: }
979:
980: /*
981: * pt_entry_t *__pmap_kpte_lookup(vaddr_t va):
982: * kernel virtual only version of __pmap_pte_lookup().
983: */
984: pt_entry_t *
985: __pmap_kpte_lookup(vaddr_t va)
986: {
987: pt_entry_t *ptp;
988:
989: ptp =
990: __pmap_kernel.pm_ptp[__PMAP_PTP_INDEX(va - VM_MIN_KERNEL_ADDRESS)];
991: return (ptp ? ptp + __PMAP_PTP_OFSET(va) : NULL);
992: }
993:
994: /*
995: * boolean_t __pmap_pte_load(pmap_t pmap, vaddr_t va, int flags):
996: * lookup page table entry, if found it, load to TLB.
997: * flags specify do emulate reference and/or modified bit or not.
998: */
999: boolean_t
1000: __pmap_pte_load(pmap_t pmap, vaddr_t va, int flags)
1001: {
1002: struct vm_page *pg;
1003: pt_entry_t *pte;
1004: pt_entry_t entry;
1005:
1006: KDASSERT((((int)va < 0) && (pmap == pmap_kernel())) ||
1007: (((int)va >= 0) && (pmap != pmap_kernel())));
1008:
1009: /* Lookup page table entry */
1010: if (((pte = __pmap_pte_lookup(pmap, va)) == NULL) ||
1011: ((entry = *pte) == 0))
1012: return (FALSE);
1013:
1014: KDASSERT(va != 0);
1015:
1016: /* Emulate reference/modified tracking for managed page. */
1017: if (flags != 0 && (pg = PHYS_TO_VM_PAGE(entry & PG_PPN)) != NULL) {
1018: if (flags & PVH_REFERENCED) {
1019: pg->mdpage.pvh_flags |= PVH_REFERENCED;
1020: entry |= PG_V;
1021: }
1022: if (flags & PVH_MODIFIED) {
1023: pg->mdpage.pvh_flags |= PVH_MODIFIED;
1024: entry |= PG_D;
1025: }
1026: *pte = entry;
1027: }
1028:
1029: /* When pmap has valid ASID, register to TLB */
1030: if (pmap->pm_asid != -1)
1031: sh_tlb_update(pmap->pm_asid, va, entry);
1032:
1033: return (TRUE);
1034: }
1035:
1036: /*
1037: * int __pmap_asid_alloc(void):
1038: * Allocate new ASID. if all ASID is used, steal from other process.
1039: */
1040: int
1041: __pmap_asid_alloc()
1042: {
1043: struct proc *p;
1044: int i, j, k, n, map, asid;
1045:
1046: /* Search free ASID */
1047: i = __pmap_asid.hint >> 5;
1048: n = i + 8;
1049: for (; i < n; i++) {
1050: k = i & 0x7;
1051: map = __pmap_asid.map[k];
1052: for (j = 0; j < 32; j++) {
1053: if ((map & (1 << j)) == 0 && (k + j) != 0) {
1054: __pmap_asid.map[k] |= (1 << j);
1055: __pmap_asid.hint = (k << 5) + j;
1056: return (__pmap_asid.hint);
1057: }
1058: }
1059: }
1060:
1061: /* Steal ASID */
1062: LIST_FOREACH(p, &allproc, p_list) {
1063: if ((asid = p->p_vmspace->vm_map.pmap->pm_asid) > 0) {
1064: pmap_t pmap = p->p_vmspace->vm_map.pmap;
1065: pmap->pm_asid = -1;
1066: __pmap_asid.hint = asid;
1067: /* Invalidate all old ASID entry */
1068: sh_tlb_invalidate_asid(pmap->pm_asid);
1069:
1070: return (__pmap_asid.hint);
1071: }
1072: }
1073:
1074: panic("No ASID allocated.");
1075: /* NOTREACHED */
1076: }
1077:
1078: /*
1079: * void __pmap_asid_free(int asid):
1080: * Return unused ASID to pool. and remove all TLB entry of ASID.
1081: */
1082: void
1083: __pmap_asid_free(int asid)
1084: {
1085: int i;
1086:
1087: if (asid < 1) /* Don't invalidate kernel ASID 0 */
1088: return;
1089:
1090: sh_tlb_invalidate_asid(asid);
1091:
1092: i = asid >> 5;
1093: __pmap_asid.map[i] &= ~(1 << (asid - (i << 5)));
1094: }
CVSweb