Annotation of sys/arch/sparc/sparc/pmap.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: pmap.c,v 1.145 2007/06/06 17:15:12 deraadt Exp $ */
2: /* $NetBSD: pmap.c,v 1.118 1998/05/19 19:00:18 thorpej Exp $ */
3:
4: /*
5: * Copyright (c) 1996
6: * The President and Fellows of Harvard College. All rights reserved.
7: * Copyright (c) 1992, 1993
8: * The Regents of the University of California. All rights reserved.
9: *
10: * This software was developed by the Computer Systems Engineering group
11: * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
12: * contributed to Berkeley.
13: *
14: * All advertising materials mentioning features or use of this software
15: * must display the following acknowledgement:
16: * This product includes software developed by Harvard University.
17: * This product includes software developed by the University of
18: * California, Lawrence Berkeley Laboratory.
19: *
20: * Redistribution and use in source and binary forms, with or without
21: * modification, are permitted provided that the following conditions
22: * are met:
23: *
24: * 1. Redistributions of source code must retain the above copyright
25: * notice, this list of conditions and the following disclaimer.
26: * 2. Redistributions in binary form must reproduce the above copyright
27: * notice, this list of conditions and the following disclaimer in the
28: * documentation and/or other materials provided with the distribution.
29: * 3. All advertising materials mentioning features or use of this software
30: * must display the following acknowledgement:
31: * This product includes software developed by Aaron Brown and
32: * Harvard University.
33: * This product includes software developed by the University of
34: * California, Berkeley and its contributors.
35: * 4. Neither the name of the University nor the names of its contributors
36: * may be used to endorse or promote products derived from this software
37: * without specific prior written permission.
38: *
39: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
40: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
43: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
44: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
45: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
46: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
47: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
48: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
49: * SUCH DAMAGE.
50: *
51: * @(#)pmap.c 8.4 (Berkeley) 2/5/94
52: *
53: */
54:
55: /*
56: * SPARC physical map management code.
57: * Does not function on multiprocessors (yet).
58: */
59:
60: #include <sys/param.h>
61: #include <sys/systm.h>
62: #include <sys/device.h>
63: #include <sys/proc.h>
64: #include <sys/queue.h>
65: #include <sys/malloc.h>
66: #include <sys/exec.h>
67: #include <sys/core.h>
68: #include <sys/kcore.h>
69: #include <sys/lock.h>
70:
71: #include <uvm/uvm.h>
72: #include <sys/pool.h>
73:
74: #include <machine/autoconf.h>
75: #include <machine/bsd_openprom.h>
76: #include <machine/oldmon.h>
77: #include <machine/cpu.h>
78: #include <machine/ctlreg.h>
79: #include <machine/kcore.h>
80:
81: #include <sparc/sparc/asm.h>
82: #include <sparc/sparc/cache.h>
83: #include <sparc/sparc/vaddrs.h>
84: #include <sparc/sparc/cpuvar.h>
85:
86: #ifdef DEBUG
87: #define PTE_BITS "\20\40V\37W\36S\35NC\33IO\32U\31M"
88: #define PTE_BITS4M "\20\10C\7M\6R\5ACC3\4ACC2\3ACC1\2TYP2\1TYP1"
89: #endif
90:
91: /*
92: * The SPARCstation offers us the following challenges:
93: *
94: * 1. A virtual address cache. This is, strictly speaking, not
95: * part of the architecture, but the code below assumes one.
96: * This is a write-through cache on the 4c and a write-back cache
97: * on others.
98: *
99: * 2. (4/4c only) An MMU that acts like a cache. There is not enough
100: * space in the MMU to map everything all the time. Instead, we need
101: * to load MMU with the `working set' of translations for each
102: * process. The sun4m does not act like a cache; tables are maintained
103: * in physical memory.
104: *
105: * 3. Segmented virtual and physical spaces. The upper 12 bits of
106: * a virtual address (the virtual segment) index a segment table,
107: * giving a physical segment. The physical segment selects a
108: * `Page Map Entry Group' (PMEG) and the virtual page number---the
109: * next 5 or 6 bits of the virtual address---select the particular
110: * `Page Map Entry' for the page. We call the latter a PTE and
111: * call each Page Map Entry Group a pmeg (for want of a better name).
112: * Note that the sun4m has an unsegmented 36-bit physical space.
113: *
114: * Since there are no valid bits in the segment table, the only way
115: * to have an invalid segment is to make one full pmeg of invalid PTEs.
116: * We use the last one (since the ROM does as well) (sun4/4c only)
117: *
118: * 4. Discontiguous physical pages. The Mach VM expects physical pages
119: * to be in one sequential lump.
120: *
121: * 5. The MMU is always on: it is not possible to disable it. This is
122: * mainly a startup hassle.
123: */
124:
125: struct pmap_stats {
126: int ps_alias_uncache; /* # of uncaches due to bad aliases */
127: int ps_alias_recache; /* # of recaches due to bad aliases */
128: int ps_unlink_pvfirst; /* # of pv_unlinks on head */
129: int ps_unlink_pvsearch; /* # of pv_unlink searches */
130: int ps_changeprots; /* # of calls to changeprot */
131: int ps_useless_changeprots; /* # of changeprots for wiring */
132: int ps_enter_firstpv; /* pv heads entered */
133: int ps_enter_secondpv; /* pv nonheads entered */
134: int ps_useless_changewire; /* useless wiring changes */
135: int ps_npg_prot_all; /* # of active pages protected */
136: int ps_npg_prot_actual; /* # pages actually affected */
137: int ps_npmeg_free; /* # of free pmegs */
138: int ps_npmeg_locked; /* # of pmegs on locked list */
139: int ps_npmeg_lru; /* # of pmegs on lru list */
140: } pmap_stats;
141:
142: #ifdef DEBUG
143: #define PDB_CREATE 0x0001
144: #define PDB_DESTROY 0x0002
145: #define PDB_REMOVE 0x0004
146: #define PDB_CHANGEPROT 0x0008
147: #define PDB_ENTER 0x0010
148: #define PDB_FOLLOW 0x0020
149:
150: #define PDB_MMU_ALLOC 0x0100
151: #define PDB_MMU_STEAL 0x0200
152: #define PDB_CTX_ALLOC 0x0400
153: #define PDB_CTX_STEAL 0x0800
154: #define PDB_MMUREG_ALLOC 0x1000
155: #define PDB_MMUREG_STEAL 0x2000
156: #define PDB_CACHESTUFF 0x4000
157: #define PDB_SWITCHMAP 0x8000
158: #define PDB_SANITYCHK 0x10000
159: int pmapdebug = 0;
160: #endif
161:
162: /*
163: * Internal helpers.
164: */
165: static __inline struct pvlist *pvhead(int);
166:
167: #if defined(SUN4M)
168: static u_int VA2PA(caddr_t);
169: #endif
170:
171: /*
172: * Given a page number, return the head of its pvlist.
173: */
174: static __inline struct pvlist *
175: pvhead(int pnum)
176: {
177: int bank, off;
178:
179: bank = vm_physseg_find(pnum, &off);
180: if (bank == -1)
181: return NULL;
182:
183: return &vm_physmem[bank].pgs[off].mdpage.pv_head;
184: }
185:
186: struct pool pvpool;
187:
188: #if defined(SUN4M)
189: /*
190: * Memory pools and back-end supplier for SRMMU page tables.
191: * Share a pool between the level 2 and level 3 page tables,
192: * since these are equal in size.
193: */
194: static struct pool L1_pool;
195: static struct pool L23_pool;
196: void *pgt_page_alloc(struct pool *, int);
197: void pgt_page_free(struct pool *, void *);
198:
199: struct pool_allocator pgt_allocator = {
200: pgt_page_alloc, pgt_page_free, 0,
201: };
202:
203: void pcache_flush(caddr_t, caddr_t, int);
204: void
205: pcache_flush(va, pa, n)
206: caddr_t va, pa;
207: int n;
208: {
209: void (*f)(int,int) = cpuinfo.pcache_flush_line;
210:
211: while ((n -= 4) >= 0)
212: (*f)((u_int)va+n, (u_int)pa+n);
213: }
214:
215: /*
216: * Page table pool back-end.
217: */
218: void *
219: pgt_page_alloc(struct pool *pp, int flags)
220: {
221: caddr_t p;
222:
223: p = (caddr_t)uvm_km_kmemalloc(kernel_map, uvm.kernel_object,
224: PAGE_SIZE, UVM_KMF_NOWAIT);
225: if (p != NULL && ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0)) {
226: pcache_flush(p, (caddr_t)VA2PA(p), PAGE_SIZE);
227: kvm_uncache(p, 1);
228: }
229: return (p);
230: }
231:
232: void
233: pgt_page_free(struct pool *pp, void *v)
234: {
235: uvm_km_free(kernel_map, (vaddr_t)v, PAGE_SIZE);
236: }
237: #endif /* SUN4M */
238:
239: /*
240: * Each virtual segment within each pmap is either valid or invalid.
241: * It is valid if pm_npte[VA_VSEG(va)] is not 0. This does not mean
242: * it is in the MMU, however; that is true iff pm_segmap[VA_VSEG(va)]
243: * does not point to the invalid PMEG.
244: *
245: * In the older SPARC architectures (pre-4m), page tables are cached in the
246: * MMU. The following discussion applies to these architectures:
247: *
248: * If a virtual segment is valid and loaded, the correct PTEs appear
249: * in the MMU only. If it is valid and unloaded, the correct PTEs appear
250: * in the pm_pte[VA_VSEG(va)] only. However, some effort is made to keep
251: * the software copies consistent enough with the MMU so that libkvm can
252: * do user address translations. In particular, pv_changepte() and
253: * pmap_enu() maintain consistency, while less critical changes are
254: * not maintained. pm_pte[VA_VSEG(va)] always points to space for those
255: * PTEs, unless this is the kernel pmap, in which case pm_pte[x] is not
256: * used (sigh).
257: *
258: * Each PMEG in the MMU is either free or contains PTEs corresponding to
259: * some pmap and virtual segment. If it contains some PTEs, it also contains
260: * reference and modify bits that belong in the pv_table. If we need
261: * to steal a PMEG from some process (if we need one and none are free)
262: * we must copy the ref and mod bits, and update pm_segmap in the other
263: * pmap to show that its virtual segment is no longer in the MMU.
264: *
265: * There are 128 PMEGs in a small Sun-4, of which only a few dozen are
266: * tied down permanently, leaving `about' 100 to be spread among
267: * running processes. These are managed as an LRU cache. Before
268: * calling the VM paging code for a user page fault, the fault handler
269: * calls mmu_load(pmap, va) to try to get a set of PTEs put into the
270: * MMU. mmu_load will check the validity of the segment and tell whether
271: * it did something.
272: *
273: * Since I hate the name PMEG I call this data structure an `mmu entry'.
274: * Each mmuentry is on exactly one of three `usage' lists: free, LRU,
275: * or locked. The LRU list is for user processes; the locked list is
276: * for kernel entries; both are doubly linked queues headed by `mmuhd's.
277: * The free list is a simple list, headed by a free list pointer.
278: *
279: * In the sun4m architecture using the SPARC Reference MMU (SRMMU), three
280: * levels of page tables are maintained in physical memory. We use the same
281: * structures as with the 3-level old-style MMU (pm_regmap, pm_segmap,
282: * rg_segmap, sg_pte, etc) to maintain kernel-edible page tables; we also
283: * build a parallel set of physical tables that can be used by the MMU.
284: * (XXX: This seems redundant, but is it necessary for the unified kernel?)
285: *
286: * If a virtual segment is valid, its entries will be in both parallel lists.
287: * If it is not valid, then its entry in the kernel tables will be zero, and
288: * its entry in the MMU tables will either be nonexistent or zero as well.
289: *
290: * The Reference MMU generally uses a Translation Look-aside Buffer (TLB)
291: * to cache the result of recently executed page table walks. When
292: * manipulating page tables, we need to ensure consistency of the
293: * in-memory and TLB copies of the page table entries. This is handled
294: * by flushing (and invalidating) a TLB entry when appropriate before
295: * altering an in-memory page table entry.
296: */
297: struct mmuentry {
298: TAILQ_ENTRY(mmuentry) me_list; /* usage list link */
299: TAILQ_ENTRY(mmuentry) me_pmchain; /* pmap owner link */
300: struct pmap *me_pmap; /* pmap, if in use */
301: u_short me_vreg; /* associated virtual region/segment */
302: u_short me_vseg; /* associated virtual region/segment */
303: u_short me_cookie; /* hardware SMEG/PMEG number */
304: };
305: struct mmuentry *mmusegments; /* allocated in pmap_bootstrap */
306: struct mmuentry *mmuregions; /* allocated in pmap_bootstrap */
307:
308: struct mmuhd segm_freelist, segm_lru, segm_locked;
309: struct mmuhd region_freelist, region_lru, region_locked;
310:
311: int seginval; /* [4/4c] the invalid segment number */
312: int reginval; /* [4/3mmu] the invalid region number */
313:
314: /*
315: * (sun4/4c)
316: * A context is simply a small number that dictates which set of 4096
317: * segment map entries the MMU uses. The Sun 4c has eight such sets.
318: * These are alloted in an `almost MRU' fashion.
319: * (sun4m)
320: * A context is simply a small number that indexes the context table, the
321: * root-level page table mapping 4G areas. Each entry in this table points
322: * to a 1st-level region table. A SPARC reference MMU will usually use 16
323: * such contexts, but some offer as many as 64k contexts; the theoretical
324: * maximum is 2^32 - 1, but this would create overlarge context tables.
325: *
326: * Each context is either free or attached to a pmap.
327: *
328: * Since the virtual address cache is tagged by context, when we steal
329: * a context we have to flush (that part of) the cache.
330: */
331: union ctxinfo {
332: union ctxinfo *c_nextfree; /* free list (if free) */
333: struct pmap *c_pmap; /* pmap (if busy) */
334: };
335:
336: #define ncontext (cpuinfo.mmu_ncontext)
337: #define ctx_kick (cpuinfo.ctx_kick)
338: #define ctx_kickdir (cpuinfo.ctx_kickdir)
339: #define ctx_freelist (cpuinfo.ctx_freelist)
340:
341: #if 0
342: union ctxinfo *ctxinfo; /* allocated at in pmap_bootstrap */
343:
344: union ctxinfo *ctx_freelist; /* context free list */
345: int ctx_kick; /* allocation rover when none free */
346: int ctx_kickdir; /* ctx_kick roves both directions */
347:
348: char *ctxbusyvector; /* [4m] tells what contexts are busy (XXX)*/
349: #endif
350:
351: caddr_t vpage[2]; /* two reserved MD virtual pages */
352:
353: smeg_t tregion; /* [4/3mmu] Region for temporary mappings */
354:
355: struct pmap kernel_pmap_store; /* the kernel's pmap */
356: struct regmap kernel_regmap_store[NKREG]; /* the kernel's regmap */
357: struct segmap kernel_segmap_store[NKREG*NSEGRG];/* the kernel's segmaps */
358:
359: #if defined(SUN4M)
360: u_int *kernel_regtable_store; /* 1k of storage to map the kernel */
361: u_int *kernel_segtable_store; /* 2k of storage to map the kernel */
362: u_int *kernel_pagtable_store; /* 128k of storage to map the kernel */
363: #endif
364:
365: vaddr_t virtual_avail; /* first free virtual page number */
366: vaddr_t virtual_end; /* last free virtual page number */
367: paddr_t phys_avail; /* first free physical page
368: XXX - pmap_pa_exists needs this */
369: vaddr_t pagetables_start, pagetables_end;
370:
371: /*
372: * XXX - these have to be global for dumpsys()
373: */
374: #define MA_SIZE 32 /* size of memory descriptor arrays */
375: struct memarr pmemarr[MA_SIZE];/* physical memory regions */
376: int npmemarr; /* number of entries in pmemarr */
377:
378: static void pmap_page_upload(paddr_t);
379: void pmap_pinit(pmap_t);
380: void pmap_release(pmap_t);
381:
382: #if defined(SUN4) || defined(SUN4C)
383: int mmu_has_hole;
384: #endif
385:
386: vaddr_t prom_vstart; /* For /dev/kmem */
387: vaddr_t prom_vend;
388:
389: #if defined(SUN4)
390: /*
391: * [sun4]: segfixmask: on some systems (4/110) "getsegmap()" returns a
392: * partly invalid value. getsegmap returns a 16 bit value on the sun4,
393: * but only the first 8 or so bits are valid (the rest are *supposed* to
394: * be zero. On the 4/110 the bits that are supposed to be zero are
395: * all one instead. e.g. KERNBASE is usually mapped by pmeg number zero.
396: * On a 4/300 getsegmap(KERNBASE) == 0x0000, but
397: * on a 4/100 getsegmap(KERNBASE) == 0xff00
398: *
399: * This confuses mmu_reservemon() and causes it to not reserve the PROM's
400: * pmegs. Then the PROM's pmegs get used during autoconfig and everything
401: * falls apart! (not very fun to debug, BTW.)
402: *
403: * solution: mask the invalid bits in the getsetmap macro.
404: */
405:
406: static u_long segfixmask = 0xffffffff; /* all bits valid to start */
407: #else
408: #define segfixmask 0xffffffff /* It's in getsegmap's scope */
409: #endif
410:
411: /*
412: * pseudo-functions for mnemonic value
413: */
414: #define getcontext4() lduba(AC_CONTEXT, ASI_CONTROL)
415: #define getcontext4m() lda(SRMMU_CXR, ASI_SRMMU)
416: #define getcontext() (CPU_ISSUN4M \
417: ? getcontext4m() \
418: : getcontext4() )
419:
420: #define setcontext4(c) stba(AC_CONTEXT, ASI_CONTROL, c)
421: #define setcontext4m(c) sta(SRMMU_CXR, ASI_SRMMU, c)
422: #define setcontext(c) (CPU_ISSUN4M \
423: ? setcontext4m(c) \
424: : setcontext4(c) )
425:
426: #define getsegmap(va) (CPU_ISSUN4C \
427: ? lduba(va, ASI_SEGMAP) \
428: : (lduha(va, ASI_SEGMAP) & segfixmask))
429: #define setsegmap(va, pmeg) (CPU_ISSUN4C \
430: ? stba(va, ASI_SEGMAP, pmeg) \
431: : stha(va, ASI_SEGMAP, pmeg))
432:
433: /* 3-level sun4 MMU only: */
434: #define getregmap(va) ((unsigned)lduha((va)+2, ASI_REGMAP) >> 8)
435: #define setregmap(va, smeg) stha((va)+2, ASI_REGMAP, (smeg << 8))
436:
437: #if defined(SUN4M)
438: #define getpte4m(va) lda((va & 0xFFFFF000) | ASI_SRMMUFP_L3, \
439: ASI_SRMMUFP)
440: u_int *getptep4m(struct pmap *, vaddr_t);
441: static __inline void setpgt4m(int *, int);
442: void setpte4m(vaddr_t va, int pte);
443: #endif
444:
445: #if defined(SUN4) || defined(SUN4C)
446: #define getpte4(va) lda(va, ASI_PTE)
447: #define setpte4(va, pte) sta(va, ASI_PTE, pte)
448: #endif
449:
450: /* Function pointer messiness for supporting multiple sparc architectures
451: * within a single kernel: notice that there are two versions of many of the
452: * functions within this file/module, one for the sun4/sun4c and the other
453: * for the sun4m. For performance reasons (since things like pte bits don't
454: * map nicely between the two architectures), there are separate functions
455: * rather than unified functions which test the cputyp variable. If only
456: * one architecture is being used, then the non-suffixed function calls
457: * are macro-translated into the appropriate xxx4_4c or xxx4m call. If
458: * multiple architectures are defined, the calls translate to (*xxx_p),
459: * i.e. they indirect through function pointers initialized as appropriate
460: * to the run-time architecture in pmap_bootstrap. See also pmap.h.
461: */
462:
463: #if defined(SUN4M)
464: static void mmu_setup4m_L1(int, struct pmap *);
465: static void mmu_setup4m_L2(int, struct regmap *);
466: static void mmu_setup4m_L3(int, struct segmap *);
467: void mmu_reservemon4m(struct pmap *);
468:
469: void pmap_rmk4m(struct pmap *, vaddr_t, vaddr_t, int, int);
470: void pmap_rmu4m(struct pmap *, vaddr_t, vaddr_t, int, int);
471: int pmap_enk4m(struct pmap *, vaddr_t, vm_prot_t,
472: int, struct pvlist *, int);
473: int pmap_enu4m(struct pmap *, vaddr_t, vm_prot_t,
474: int, struct pvlist *, int);
475: void pv_changepte4m(struct pvlist *, int, int);
476: int pv_syncflags4m(struct pvlist *);
477: int pv_link4m(struct pvlist *, struct pmap *, vaddr_t, int);
478: void pv_unlink4m(struct pvlist *, struct pmap *, vaddr_t);
479: #endif
480:
481: #if defined(SUN4) || defined(SUN4C)
482: void mmu_reservemon4_4c(int *, int *);
483: void pmap_rmk4_4c(struct pmap *, vaddr_t, vaddr_t, int, int);
484: void pmap_rmu4_4c(struct pmap *, vaddr_t, vaddr_t, int, int);
485: int pmap_enk4_4c(struct pmap *, vaddr_t, vm_prot_t,
486: int, struct pvlist *, int);
487: int pmap_enu4_4c(struct pmap *, vaddr_t, vm_prot_t,
488: int, struct pvlist *, int);
489: void pv_changepte4_4c(struct pvlist *, int, int);
490: int pv_syncflags4_4c(struct pvlist *);
491: int pv_link4_4c(struct pvlist *, struct pmap *, vaddr_t, int);
492: void pv_unlink4_4c(struct pvlist *, struct pmap *, vaddr_t);
493: #endif
494:
495: #if !defined(SUN4M) && (defined(SUN4) || defined(SUN4C))
496: #define pmap_rmk pmap_rmk4_4c
497: #define pmap_rmu pmap_rmu4_4c
498:
499: #elif defined(SUN4M) && !(defined(SUN4) || defined(SUN4C))
500: #define pmap_rmk pmap_rmk4m
501: #define pmap_rmu pmap_rmu4m
502:
503: #else /* must use function pointers */
504:
505: /* function pointer declarations */
506: /* from pmap.h: */
507: boolean_t (*pmap_clear_modify_p)(struct vm_page *);
508: boolean_t (*pmap_clear_reference_p)(struct vm_page *);
509: int (*pmap_enter_p)(pmap_t, vaddr_t, paddr_t, vm_prot_t, int);
510: boolean_t (*pmap_extract_p)(pmap_t, vaddr_t, paddr_t *);
511: boolean_t (*pmap_is_modified_p)(struct vm_page *);
512: boolean_t (*pmap_is_referenced_p)(struct vm_page *);
513: void (*pmap_kenter_pa_p)(vaddr_t, paddr_t, vm_prot_t);
514: void (*pmap_page_protect_p)(struct vm_page *, vm_prot_t);
515: void (*pmap_protect_p)(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
516: void (*pmap_copy_page_p)(struct vm_page *, struct vm_page *);
517: void (*pmap_zero_page_p)(struct vm_page *);
518: void (*pmap_changeprot_p)(pmap_t, vaddr_t, vm_prot_t, int);
519: /* local: */
520: void (*pmap_rmk_p)(struct pmap *, vaddr_t, vaddr_t, int, int);
521: void (*pmap_rmu_p)(struct pmap *, vaddr_t, vaddr_t, int, int);
522:
523: #define pmap_rmk (*pmap_rmk_p)
524: #define pmap_rmu (*pmap_rmu_p)
525:
526: #endif
527:
528: /* --------------------------------------------------------------*/
529:
530: /*
531: * Next we have some Sun4m-specific routines which have no 4/4c
532: * counterparts, or which are 4/4c macros.
533: */
534:
535: #if defined(SUN4M)
536:
537: /*
538: * Macros which implement SRMMU TLB flushing/invalidation
539: */
540:
541: #define tlb_flush_page(va) \
542: sta(((vaddr_t)(va) & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP,0)
543: #define tlb_flush_segment(vr, vs) \
544: sta(((vr)<<RGSHIFT) | ((vs)<<SGSHIFT) | ASI_SRMMUFP_L2, ASI_SRMMUFP,0)
545: #define tlb_flush_context() sta(ASI_SRMMUFP_L1, ASI_SRMMUFP, 0)
546: #define tlb_flush_all() sta(ASI_SRMMUFP_LN, ASI_SRMMUFP, 0)
547:
548: /*
549: * VA2PA(addr) -- converts a virtual address to a physical address using
550: * the MMU's currently-installed page tables. As a side effect, the address
551: * translation used may cause the associated pte to be encached. The correct
552: * context for VA must be set before this is called.
553: *
554: * This routine should work with any level of mapping, as it is used
555: * during bootup to interact with the ROM's initial L1 mapping of the kernel.
556: */
557: static u_int
558: VA2PA(addr)
559: caddr_t addr;
560: {
561: u_int pte;
562:
563: /* we'll use that handy SRMMU flush/probe! %%%: make consts below! */
564: /* Try each level in turn until we find a valid pte. Otherwise panic */
565:
566: pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP);
567: /* Unlock fault status; required on Hypersparc modules */
568: (void)lda(SRMMU_SFSR, ASI_SRMMU);
569: if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
570: return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
571: ((u_int)addr & 0xfff));
572:
573: /* A `TLB Flush Entire' is required before any L0, L1 or L2 probe */
574: tlb_flush_all();
575:
576: pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L2, ASI_SRMMUFP);
577: if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
578: return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
579: ((u_int)addr & 0x3ffff));
580: pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L1, ASI_SRMMUFP);
581: if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
582: return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
583: ((u_int)addr & 0xffffff));
584: pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L0, ASI_SRMMUFP);
585: if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
586: return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
587: ((u_int)addr & 0xffffffff));
588:
589: panic("VA2PA: Asked to translate unmapped VA %p", addr);
590: }
591:
592: /*
593: * Get the pointer to the pte for the given (pmap, va).
594: *
595: * Assumes level 3 mapping (for now).
596: */
597: u_int *
598: getptep4m(pm, va)
599: struct pmap *pm;
600: vaddr_t va;
601: {
602: struct regmap *rm;
603: struct segmap *sm;
604: int vr, vs;
605: vr = VA_VREG(va);
606: vs = VA_VSEG(va);
607:
608: rm = &pm->pm_regmap[vr];
609: #ifdef notyet
610: if ((rm->rg_seg_ptps[vs] & SRMMU_TETYPE) == SRMMU_TEPTE)
611: return &rm->rg_seg_ptps[vs];
612: #endif
613: if (rm->rg_segmap == NULL)
614: return NULL;
615:
616: sm = &rm->rg_segmap[vs];
617:
618: if (sm->sg_pte == NULL)
619: return NULL;
620:
621: return &sm->sg_pte[VA_SUN4M_VPG(va)];
622: }
623:
624: /*
625: * Set the pte at "ptep" to "pte".
626: */
627: static __inline void
628: setpgt4m(ptep, pte)
629: int *ptep;
630: int pte;
631: {
632: swap(ptep, pte);
633: }
634:
635: /*
636: * Set the page table entry for va to pte. Only legal for kernel mappings.
637: */
638: void
639: setpte4m(va, pte)
640: vaddr_t va;
641: int pte;
642: {
643: int *ptep;
644:
645: ptep = getptep4m(pmap_kernel(), va);
646: tlb_flush_page(va);
647: setpgt4m(ptep, pte);
648: }
649:
650: /*
651: * Translation table for kernel vs. PTE protection bits.
652: */
653: u_int protection_codes[2][8];
654: #define pte_prot4m(pm, prot) (protection_codes[pm == pmap_kernel()?0:1][prot])
655:
656: static void
657: sparc_protection_init4m(void)
658: {
659: u_int prot, *kp, *up;
660:
661: kp = protection_codes[0];
662: up = protection_codes[1];
663:
664: for (prot = 0; prot < 8; prot++) {
665: switch (prot) {
666: case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
667: kp[prot] = PPROT_N_RWX;
668: up[prot] = PPROT_RWX_RWX;
669: break;
670: case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
671: kp[prot] = PPROT_N_RWX;
672: up[prot] = PPROT_RW_RW;
673: break;
674: case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
675: kp[prot] = PPROT_N_RX;
676: up[prot] = PPROT_RX_RX;
677: break;
678: case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
679: kp[prot] = PPROT_N_RX;
680: up[prot] = PPROT_R_R;
681: break;
682: case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
683: kp[prot] = PPROT_N_RWX;
684: up[prot] = PPROT_RWX_RWX;
685: break;
686: case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
687: kp[prot] = PPROT_N_RWX;
688: up[prot] = PPROT_RW_RW;
689: break;
690: case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
691: kp[prot] = PPROT_N_RX;
692: up[prot] = PPROT_X_X;
693: break;
694: case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
695: kp[prot] = PPROT_N_RX;
696: up[prot] = PPROT_R_R;
697: break;
698: }
699: }
700: }
701:
702: #endif /* 4m only */
703:
704: /*----------------------------------------------------------------*/
705:
706: /*
707: * The following three macros are to be used in sun4/sun4c code only.
708: */
709: #if defined(SUN4_MMU3L)
710: #define CTX_USABLE(pm,rp) ( \
711: ((pm)->pm_ctx != NULL && \
712: (!HASSUN4_MMU3L || (rp)->rg_smeg != reginval)) \
713: )
714: #else
715: #define CTX_USABLE(pm,rp) ((pm)->pm_ctx != NULL )
716: #endif
717:
718: #define GAP_WIDEN(pm,vr) do if (CPU_ISSUN4OR4C) { \
719: if (vr + 1 == pm->pm_gap_start) \
720: pm->pm_gap_start = vr; \
721: if (vr == pm->pm_gap_end) \
722: pm->pm_gap_end = vr + 1; \
723: } while (0)
724:
725: #define GAP_SHRINK(pm,vr) do if (CPU_ISSUN4OR4C) { \
726: int x; \
727: x = pm->pm_gap_start + (pm->pm_gap_end - pm->pm_gap_start) / 2; \
728: if (vr > x) { \
729: if (vr < pm->pm_gap_end) \
730: pm->pm_gap_end = vr; \
731: } else { \
732: if (vr >= pm->pm_gap_start && x != pm->pm_gap_start) \
733: pm->pm_gap_start = vr + 1; \
734: } \
735: } while (0)
736:
737:
738: static void sortm(struct memarr *, int);
739: void ctx_alloc(struct pmap *);
740: void ctx_free(struct pmap *);
741: void pg_flushcache(struct vm_page *);
742: #ifdef DEBUG
743: void pm_check(char *, struct pmap *);
744: void pm_check_k(char *, struct pmap *);
745: void pm_check_u(char *, struct pmap *);
746: #endif
747:
748: /*
749: * Sort a memory array by address.
750: */
751: static void
752: sortm(mp, n)
753: struct memarr *mp;
754: int n;
755: {
756: struct memarr *mpj;
757: int i, j;
758: paddr_t addr;
759: psize_t len;
760:
761: /* Insertion sort. This is O(n^2), but so what? */
762: for (i = 1; i < n; i++) {
763: /* save i'th entry */
764: addr = mp[i].addr;
765: len = mp[i].len;
766: /* find j such that i'th entry goes before j'th */
767: for (j = 0, mpj = mp; j < i; j++, mpj++)
768: if (addr < mpj->addr)
769: break;
770: /* slide up any additional entries */
771: ovbcopy(mpj, mpj + 1, (i - j) * sizeof(*mp));
772: mpj->addr = addr;
773: mpj->len = len;
774: }
775: }
776:
777: /*
778: * For our convenience, vm_page.c implements:
779: * vm_bootstrap_steal_memory()
780: * using the functions:
781: * pmap_virtual_space(), pmap_free_pages(), pmap_next_page(),
782: * which are much simpler to implement.
783: */
784:
785: /*
786: * How much virtual space does this kernel have?
787: * (After mapping kernel text, data, etc.)
788: */
789: void
790: pmap_virtual_space(v_start, v_end)
791: vaddr_t *v_start;
792: vaddr_t *v_end;
793: {
794: *v_start = virtual_avail;
795: *v_end = virtual_end;
796: }
797:
798: /*
799: * Helper routine that hands off available physical pages to the VM system.
800: */
801: static void
802: pmap_page_upload(first_pa)
803: paddr_t first_pa;
804: {
805: int n = 0;
806: paddr_t start, end;
807:
808: phys_avail = first_pa;
809:
810: npmemarr = makememarr(pmemarr, MA_SIZE, MEMARR_AVAILPHYS);
811: sortm(pmemarr, npmemarr);
812:
813: if (pmemarr[0].addr != 0)
814: panic("pmap_page_upload: no memory?");
815:
816: /*
817: * Compute physmem
818: */
819: physmem = 0;
820: for (n = 0; n < npmemarr; n++)
821: physmem += btoc(pmemarr[n].len);
822:
823: for (n = 0; n < npmemarr; n++) {
824: start = (first_pa > pmemarr[n].addr) ? first_pa :
825: pmemarr[n].addr;
826: end = pmemarr[n].addr + pmemarr[n].len;
827: if (start >= end)
828: continue;
829:
830: uvm_page_physload(atop(start), atop(end),
831: atop(start), atop(end), VM_FREELIST_DEFAULT);
832: }
833: }
834:
835: int
836: pmap_pa_exists(pa)
837: paddr_t pa;
838: {
839: return (pa < phys_avail || (pvhead(atop(pa)) != NULL));
840: }
841:
842: /* update pv_flags given a valid pte */
843: #define MR4_4C(pte) (((pte) >> PG_M_SHIFT) & (PV_MOD | PV_REF))
844: #define MR4M(pte) (((pte) >> PG_M_SHIFT4M) & (PV_MOD4M | PV_REF4M))
845:
846: /*----------------------------------------------------------------*/
847:
848: /*
849: * Agree with the monitor ROM as to how many MMU entries are
850: * to be reserved, and map all of its segments into all contexts.
851: *
852: * Unfortunately, while the Version 0 PROM had a nice linked list of
853: * taken virtual memory, the Version 2 PROM provides instead a convoluted
854: * description of *free* virtual memory. Rather than invert this, we
855: * resort to two magic constants from the PROM vector description file.
856: */
857: #if defined(SUN4) || defined(SUN4C)
858: void
859: mmu_reservemon4_4c(nrp, nsp)
860: int *nrp, *nsp;
861: {
862: u_int va = 0, eva = 0;
863: int mmuseg, i, nr, ns, vr, lastvr;
864: #if defined(SUN4_MMU3L)
865: int mmureg;
866: #endif
867: struct regmap *rp;
868:
869: #if defined(SUN4)
870: if (CPU_ISSUN4) {
871: prom_vstart = va = OLDMON_STARTVADDR;
872: prom_vend = eva = OLDMON_ENDVADDR;
873: }
874: #endif
875: #if defined(SUN4C)
876: if (CPU_ISSUN4C) {
877: prom_vstart = va = OPENPROM_STARTVADDR;
878: prom_vend = eva = OPENPROM_ENDVADDR;
879: }
880: #endif
881: ns = *nsp;
882: nr = *nrp;
883: lastvr = 0;
884: while (va < eva) {
885: vr = VA_VREG(va);
886: rp = &pmap_kernel()->pm_regmap[vr];
887:
888: #if defined(SUN4_MMU3L)
889: if (HASSUN4_MMU3L && vr != lastvr) {
890: lastvr = vr;
891: mmureg = getregmap(va);
892: if (mmureg < nr)
893: rp->rg_smeg = nr = mmureg;
894: /*
895: * On 3-level MMU machines, we distribute regions,
896: * rather than segments, amongst the contexts.
897: */
898: for (i = ncontext; --i > 0;)
899: (*promvec->pv_setctxt)(i, (caddr_t)va, mmureg);
900: }
901: #endif
902: mmuseg = getsegmap(va);
903: if (mmuseg < ns)
904: ns = mmuseg;
905:
906: if (!HASSUN4_MMU3L)
907: for (i = ncontext; --i > 0;)
908: (*promvec->pv_setctxt)(i, (caddr_t)va, mmuseg);
909:
910: if (mmuseg == seginval) {
911: va += NBPSG;
912: continue;
913: }
914: /*
915: * Another PROM segment. Enter into region map.
916: * Assume the entire segment is valid.
917: */
918: rp->rg_nsegmap += 1;
919: rp->rg_segmap[VA_VSEG(va)].sg_pmeg = mmuseg;
920: rp->rg_segmap[VA_VSEG(va)].sg_npte = NPTESG;
921:
922: /* PROM maps its memory user-accessible: fix it. */
923: for (i = NPTESG; --i >= 0; va += NBPG)
924: setpte4(va, getpte4(va) | PG_S);
925: }
926: *nsp = ns;
927: *nrp = nr;
928: return;
929: }
930: #endif
931:
932: #if defined(SUN4M) /* Sun4M versions of above */
933:
934: /*
935: * Take the monitor's initial page table layout, convert it to 3rd-level pte's
936: * (it starts out as a L1 mapping), and install it along with a set of kernel
937: * mapping tables as the kernel's initial page table setup. Also create and
938: * enable a context table. I suppose we also want to block user-mode access
939: * to the new kernel/ROM mappings.
940: */
941:
942: /*
943: * mmu_reservemon4m(): Copies the existing (ROM) page tables to kernel space,
944: * converting any L1/L2 PTEs to L3 PTEs. Does *not* copy the L1 entry mapping
945: * the kernel at KERNBASE since we don't want to map 16M of physical
946: * memory for the kernel. Thus the kernel must be installed later!
947: * Also installs ROM mappings into the kernel pmap.
948: * NOTE: This also revokes all user-mode access to the mapped regions.
949: */
950: void
951: mmu_reservemon4m(kpmap)
952: struct pmap *kpmap;
953: {
954: unsigned int rom_ctxtbl;
955: int te;
956: unsigned int mmupcrsave;
957:
958: /*
959: * XXX: although the Sun4M can handle 36 bits of physical
960: * address space, we assume that all these page tables, etc
961: * are in the lower 4G (32-bits) of address space, i.e. out of I/O
962: * space. Eventually this should be changed to support the 36 bit
963: * physical addressing, in case some crazed ROM designer decides to
964: * stick the pagetables up there. In that case, we should use MMU
965: * transparent mode, (i.e. ASI 0x20 to 0x2f) to access
966: * physical memory.
967: */
968:
969: rom_ctxtbl = (lda(SRMMU_CXTPTR,ASI_SRMMU) << SRMMU_PPNPASHIFT);
970:
971: /* We're going to have to use MMU passthrough. If we're on a
972: * Viking MicroSparc without an mbus, we need to turn off traps
973: * and set the AC bit at 0x8000 in the MMU's control register. Ugh.
974: * XXX: Once we've done this, can we still access kernel vm?
975: */
976: if (cpuinfo.cpu_vers == 4 && cpuinfo.mxcc) {
977: sta(SRMMU_PCR, ASI_SRMMU, /* set MMU AC bit */
978: ((mmupcrsave = lda(SRMMU_PCR,ASI_SRMMU)) | VIKING_PCR_AC));
979: }
980:
981: te = lda(rom_ctxtbl, ASI_BYPASS); /* i.e. context 0 */
982: switch (te & SRMMU_TETYPE) {
983: case SRMMU_TEINVALID:
984: cpuinfo.ctx_tbl[0] = SRMMU_TEINVALID;
985: panic("mmu_reservemon4m: no existing L0 mapping! "
986: "(How are we running?");
987: break;
988: case SRMMU_TEPTE:
989: #ifdef DEBUG
990: printf("mmu_reservemon4m: trying to remap 4G segment!\n");
991: #endif
992: panic("mmu_reservemon4m: can't handle ROM 4G page size");
993: /* XXX: Should make this work, however stupid it is */
994: break;
995: case SRMMU_TEPTD:
996: mmu_setup4m_L1(te, kpmap);
997: break;
998: default:
999: panic("mmu_reservemon4m: unknown pagetable entry type");
1000: }
1001:
1002: if (cpuinfo.cpu_vers == 4 && cpuinfo.mxcc) {
1003: sta(SRMMU_PCR, ASI_SRMMU, mmupcrsave);
1004: }
1005: }
1006:
1007: void
1008: mmu_setup4m_L1(regtblptd, kpmap)
1009: int regtblptd; /* PTD for region table to be remapped */
1010: struct pmap *kpmap;
1011: {
1012: unsigned int regtblrover;
1013: int i;
1014: unsigned int te;
1015: struct regmap *rp;
1016: int j, k;
1017:
1018: /*
1019: * Here we scan the region table to copy any entries which appear.
1020: * We are only concerned with regions in kernel space and above
1021: * (i.e. regions VA_VREG(VM_MIN_KERNEL_ADDRESS)+1 to 0xff). We ignore
1022: * the first region (at VA_VREG(VM_MIN_KERNEL_ADDRESS)), since that
1023: * is the 16MB L1 mapping that the ROM used to map the kernel in
1024: * initially. Later, we will rebuild a new L3 mapping for the kernel
1025: * and install it before switching to the new pagetables.
1026: */
1027: regtblrover =
1028: ((regtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT) +
1029: (VA_VREG(VM_MIN_KERNEL_ADDRESS)+1) * sizeof(long); /* kernel only */
1030:
1031: for (i = VA_VREG(VM_MIN_KERNEL_ADDRESS) + 1; i < SRMMU_L1SIZE;
1032: i++, regtblrover += sizeof(long)) {
1033:
1034: /* The region we're dealing with */
1035: rp = &kpmap->pm_regmap[i];
1036:
1037: te = lda(regtblrover, ASI_BYPASS);
1038: switch(te & SRMMU_TETYPE) {
1039: case SRMMU_TEINVALID:
1040: break;
1041:
1042: case SRMMU_TEPTE:
1043: #ifdef DEBUG
1044: printf("mmu_setup4m_L1: "
1045: "converting region 0x%x from L1->L3\n", i);
1046: #endif
1047: /*
1048: * This region entry covers 64MB of memory -- or
1049: * (NSEGRG * NPTESG) pages -- which we must convert
1050: * into a 3-level description.
1051: */
1052:
1053: for (j = 0; j < SRMMU_L2SIZE; j++) {
1054: struct segmap *sp = &rp->rg_segmap[j];
1055:
1056: for (k = 0; k < SRMMU_L3SIZE; k++) {
1057: sp->sg_npte++;
1058: setpgt4m(&sp->sg_pte[k],
1059: (te & SRMMU_L1PPNMASK) |
1060: (j << SRMMU_L2PPNSHFT) |
1061: (k << SRMMU_L3PPNSHFT) |
1062: (te & SRMMU_PGBITSMSK) |
1063: ((te & SRMMU_PROT_MASK) |
1064: PPROT_U2S_OMASK) |
1065: SRMMU_TEPTE);
1066: }
1067: }
1068: break;
1069:
1070: case SRMMU_TEPTD:
1071: mmu_setup4m_L2(te, rp);
1072: break;
1073:
1074: default:
1075: panic("mmu_setup4m_L1: unknown pagetable entry type");
1076: }
1077: }
1078: }
1079:
1080: void
1081: mmu_setup4m_L2(segtblptd, rp)
1082: int segtblptd;
1083: struct regmap *rp;
1084: {
1085: unsigned int segtblrover;
1086: int i, k;
1087: unsigned int te;
1088: struct segmap *sp;
1089:
1090: segtblrover = (segtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT;
1091: for (i = 0; i < SRMMU_L2SIZE; i++, segtblrover += sizeof(long)) {
1092:
1093: sp = &rp->rg_segmap[i];
1094:
1095: te = lda(segtblrover, ASI_BYPASS);
1096: switch(te & SRMMU_TETYPE) {
1097: case SRMMU_TEINVALID:
1098: break;
1099:
1100: case SRMMU_TEPTE:
1101: #ifdef DEBUG
1102: printf("mmu_setup4m_L2: converting L2 entry at segment 0x%x to L3\n",i);
1103: #endif
1104: /*
1105: * This segment entry covers 256KB of memory -- or
1106: * (NPTESG) pages -- which we must convert
1107: * into a 3-level description.
1108: */
1109: for (k = 0; k < SRMMU_L3SIZE; k++) {
1110: sp->sg_npte++;
1111: setpgt4m(&sp->sg_pte[k],
1112: (te & SRMMU_L1PPNMASK) |
1113: (te & SRMMU_L2PPNMASK) |
1114: (k << SRMMU_L3PPNSHFT) |
1115: (te & SRMMU_PGBITSMSK) |
1116: ((te & SRMMU_PROT_MASK) |
1117: PPROT_U2S_OMASK) |
1118: SRMMU_TEPTE);
1119: }
1120: break;
1121:
1122: case SRMMU_TEPTD:
1123: mmu_setup4m_L3(te, sp);
1124: break;
1125:
1126: default:
1127: panic("mmu_setup4m_L2: unknown pagetable entry type");
1128: }
1129: }
1130: }
1131:
1132: void
1133: mmu_setup4m_L3(pagtblptd, sp)
1134: int pagtblptd;
1135: struct segmap *sp;
1136: {
1137: unsigned int pagtblrover;
1138: int i;
1139: unsigned int te;
1140:
1141: pagtblrover = (pagtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT;
1142: for (i = 0; i < SRMMU_L3SIZE; i++, pagtblrover += sizeof(long)) {
1143: te = lda(pagtblrover, ASI_BYPASS);
1144: switch(te & SRMMU_TETYPE) {
1145: case SRMMU_TEINVALID:
1146: break;
1147: case SRMMU_TEPTE:
1148: sp->sg_npte++;
1149: setpgt4m(&sp->sg_pte[i], te | PPROT_U2S_OMASK);
1150: break;
1151: case SRMMU_TEPTD:
1152: panic("mmu_setup4m_L3: PTD found in L3 page table");
1153: default:
1154: panic("mmu_setup4m_L3: unknown pagetable entry type");
1155: }
1156: }
1157: }
1158: #endif /* defined SUN4M */
1159:
1160: /*----------------------------------------------------------------*/
1161:
1162: /*
1163: * MMU management.
1164: */
1165: struct mmuentry *me_alloc(struct mmuhd *, struct pmap *, int, int);
1166: void me_free(struct pmap *, u_int);
1167: struct mmuentry *region_alloc(struct mmuhd *, struct pmap *, int);
1168: void region_free(struct pmap *, u_int);
1169:
1170: /*
1171: * Change contexts. We need the old context number as well as the new
1172: * one. If the context is changing, we must write all user windows
1173: * first, lest an interrupt cause them to be written to the (other)
1174: * user whose context we set here.
1175: */
1176: #define CHANGE_CONTEXTS(old, new) \
1177: if ((old) != (new)) { \
1178: write_user_windows(); \
1179: setcontext(new); \
1180: }
1181:
1182: #if defined(SUN4) || defined(SUN4C) /* This is old sun MMU stuff */
1183: /*
1184: * Allocate an MMU entry (i.e., a PMEG).
1185: * If necessary, steal one from someone else.
1186: * Put it on the tail of the given queue
1187: * (which is either the LRU list or the locked list).
1188: * The locked list is not actually ordered, but this is easiest.
1189: * Also put it on the given (new) pmap's chain,
1190: * enter its pmeg number into that pmap's segmap,
1191: * and store the pmeg's new virtual segment number (me->me_vseg).
1192: *
1193: * This routine is large and complicated, but it must be fast
1194: * since it implements the dynamic allocation of MMU entries.
1195: */
1196: struct mmuentry *
1197: me_alloc(mh, newpm, newvreg, newvseg)
1198: struct mmuhd *mh;
1199: struct pmap *newpm;
1200: int newvreg, newvseg;
1201: {
1202: struct mmuentry *me;
1203: struct pmap *pm;
1204: int i, va, *pte, tpte;
1205: int ctx;
1206: struct regmap *rp;
1207: struct segmap *sp;
1208:
1209: /* try free list first */
1210: if (!TAILQ_EMPTY(&segm_freelist)) {
1211: me = TAILQ_FIRST(&segm_freelist);
1212: TAILQ_REMOVE(&segm_freelist, me, me_list);
1213: #ifdef DEBUG
1214: if (me->me_pmap != NULL)
1215: panic("me_alloc: freelist entry has pmap");
1216: if (pmapdebug & PDB_MMU_ALLOC)
1217: printf("me_alloc: got pmeg %d\n", me->me_cookie);
1218: #endif
1219: TAILQ_INSERT_TAIL(mh, me, me_list);
1220:
1221: /* onto on pmap chain; pmap is already locked, if needed */
1222: TAILQ_INSERT_TAIL(&newpm->pm_seglist, me, me_pmchain);
1223: #ifdef DIAGNOSTIC
1224: pmap_stats.ps_npmeg_free--;
1225: if (mh == &segm_locked)
1226: pmap_stats.ps_npmeg_locked++;
1227: else
1228: pmap_stats.ps_npmeg_lru++;
1229: #endif
1230:
1231: /* into pmap segment table, with backpointers */
1232: newpm->pm_regmap[newvreg].rg_segmap[newvseg].sg_pmeg = me->me_cookie;
1233: me->me_pmap = newpm;
1234: me->me_vseg = newvseg;
1235: me->me_vreg = newvreg;
1236:
1237: return (me);
1238: }
1239:
1240: /* no luck, take head of LRU list */
1241: if ((me = TAILQ_FIRST(&segm_lru)) == NULL)
1242: panic("me_alloc: all pmegs gone");
1243:
1244: pm = me->me_pmap;
1245: if (pm == NULL)
1246: panic("me_alloc: LRU entry has no pmap");
1247: if (pm == pmap_kernel())
1248: panic("me_alloc: stealing from kernel");
1249: #ifdef DEBUG
1250: if (pmapdebug & (PDB_MMU_ALLOC | PDB_MMU_STEAL))
1251: printf("me_alloc: stealing pmeg 0x%x from pmap %p\n",
1252: me->me_cookie, pm);
1253: #endif
1254: /*
1255: * Remove from LRU list, and insert at end of new list
1256: * (probably the LRU list again, but so what?).
1257: */
1258: TAILQ_REMOVE(&segm_lru, me, me_list);
1259: TAILQ_INSERT_TAIL(mh, me, me_list);
1260:
1261: #ifdef DIAGNOSTIC
1262: if (mh == &segm_locked) {
1263: pmap_stats.ps_npmeg_lru--;
1264: pmap_stats.ps_npmeg_locked++;
1265: }
1266: #endif
1267:
1268: rp = &pm->pm_regmap[me->me_vreg];
1269: if (rp->rg_segmap == NULL)
1270: panic("me_alloc: LRU entry's pmap has no segments");
1271: sp = &rp->rg_segmap[me->me_vseg];
1272: pte = sp->sg_pte;
1273: if (pte == NULL)
1274: panic("me_alloc: LRU entry's pmap has no ptes");
1275:
1276: /*
1277: * The PMEG must be mapped into some context so that we can
1278: * read its PTEs. Use its current context if it has one;
1279: * if not, and since context 0 is reserved for the kernel,
1280: * the simplest method is to switch to 0 and map the PMEG
1281: * to virtual address 0---which, being a user space address,
1282: * is by definition not in use.
1283: *
1284: * XXX for ncpus>1 must use per-cpu VA?
1285: * XXX do not have to flush cache immediately
1286: */
1287: ctx = getcontext4();
1288: if (CTX_USABLE(pm,rp)) {
1289: CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
1290: cache_flush_segment(me->me_vreg, me->me_vseg);
1291: va = VSTOVA(me->me_vreg,me->me_vseg);
1292: } else {
1293: CHANGE_CONTEXTS(ctx, 0);
1294: if (HASSUN4_MMU3L)
1295: setregmap(0, tregion);
1296: setsegmap(0, me->me_cookie);
1297: /*
1298: * No cache flush needed: it happened earlier when
1299: * the old context was taken.
1300: */
1301: va = 0;
1302: }
1303:
1304: /*
1305: * Record reference and modify bits for each page,
1306: * and copy PTEs into kernel memory so that they can
1307: * be reloaded later.
1308: */
1309: i = NPTESG;
1310: do {
1311: tpte = getpte4(va);
1312: if ((tpte & (PG_V | PG_TYPE)) == (PG_V | PG_OBMEM)) {
1313: struct pvlist *pv;
1314:
1315: pv = pvhead(tpte & PG_PFNUM);
1316: if (pv)
1317: pv->pv_flags |= MR4_4C(tpte);
1318: }
1319: *pte++ = tpte & ~(PG_U|PG_M);
1320: va += NBPG;
1321: } while (--i > 0);
1322:
1323: /* update segment tables */
1324: simple_lock(&pm->pm_lock); /* what if other cpu takes mmuentry ?? */
1325: if (CTX_USABLE(pm,rp))
1326: setsegmap(VSTOVA(me->me_vreg,me->me_vseg), seginval);
1327: sp->sg_pmeg = seginval;
1328:
1329: /* off old pmap chain */
1330: TAILQ_REMOVE(&pm->pm_seglist, me, me_pmchain);
1331: simple_unlock(&pm->pm_lock);
1332: setcontext4(ctx); /* done with old context */
1333:
1334: /* onto new pmap chain; new pmap is already locked, if needed */
1335: TAILQ_INSERT_TAIL(&newpm->pm_seglist, me, me_pmchain);
1336:
1337: /* into new segment table, with backpointers */
1338: newpm->pm_regmap[newvreg].rg_segmap[newvseg].sg_pmeg = me->me_cookie;
1339: me->me_pmap = newpm;
1340: me->me_vseg = newvseg;
1341: me->me_vreg = newvreg;
1342:
1343: return (me);
1344: }
1345:
1346: /*
1347: * Free an MMU entry.
1348: *
1349: * Assumes the corresponding pmap is already locked.
1350: * Does NOT flush cache, but does record ref and mod bits.
1351: * The rest of each PTE is discarded.
1352: * CALLER MUST SET CONTEXT to pm->pm_ctxnum (if pmap has
1353: * a context) or to 0 (if not). Caller must also update
1354: * pm->pm_segmap and (possibly) the hardware.
1355: */
1356: void
1357: me_free(pm, pmeg)
1358: struct pmap *pm;
1359: u_int pmeg;
1360: {
1361: struct mmuentry *me = &mmusegments[pmeg];
1362: int i, va, tpte;
1363: int vr;
1364: struct regmap *rp;
1365:
1366: vr = me->me_vreg;
1367:
1368: #ifdef DEBUG
1369: if (pmapdebug & PDB_MMU_ALLOC)
1370: printf("me_free: freeing pmeg %d from pmap %p\n",
1371: me->me_cookie, pm);
1372: if (me->me_cookie != pmeg)
1373: panic("me_free: wrong mmuentry");
1374: if (pm != me->me_pmap)
1375: panic("me_free: pm != me_pmap");
1376: #endif
1377:
1378: rp = &pm->pm_regmap[vr];
1379:
1380: /* just like me_alloc, but no cache flush, and context already set */
1381: if (CTX_USABLE(pm,rp)) {
1382: va = VSTOVA(vr,me->me_vseg);
1383: } else {
1384: #ifdef DEBUG
1385: if (getcontext4() != 0) panic("me_free: ctx != 0");
1386: #endif
1387: if (HASSUN4_MMU3L)
1388: setregmap(0, tregion);
1389: setsegmap(0, me->me_cookie);
1390: va = 0;
1391: }
1392: i = NPTESG;
1393: do {
1394: tpte = getpte4(va);
1395: if ((tpte & (PG_V | PG_TYPE)) == (PG_V | PG_OBMEM)) {
1396: struct pvlist *pv;
1397:
1398: pv = pvhead(tpte & PG_PFNUM);
1399: if (pv)
1400: pv->pv_flags |= MR4_4C(tpte);
1401: }
1402: va += NBPG;
1403: } while (--i > 0);
1404:
1405: /* take mmu entry off pmap chain */
1406: TAILQ_REMOVE(&pm->pm_seglist, me, me_pmchain);
1407: /* ... and remove from segment map */
1408: if (rp->rg_segmap == NULL)
1409: panic("me_free: no segments in pmap");
1410: rp->rg_segmap[me->me_vseg].sg_pmeg = seginval;
1411:
1412: /* off LRU or lock chain */
1413: if (pm == pmap_kernel()) {
1414: TAILQ_REMOVE(&segm_locked, me, me_list);
1415: #ifdef DIAGNOSTIC
1416: pmap_stats.ps_npmeg_locked--;
1417: #endif
1418: } else {
1419: TAILQ_REMOVE(&segm_lru, me, me_list);
1420: #ifdef DIAGNOSTIC
1421: pmap_stats.ps_npmeg_lru--;
1422: #endif
1423: }
1424:
1425: /* no associated pmap; on free list */
1426: me->me_pmap = NULL;
1427: TAILQ_INSERT_TAIL(&segm_freelist, me, me_list);
1428: #ifdef DIAGNOSTIC
1429: pmap_stats.ps_npmeg_free++;
1430: #endif
1431: }
1432:
1433: #if defined(SUN4_MMU3L)
1434:
1435: /* XXX - Merge with segm_alloc/segm_free ? */
1436:
1437: struct mmuentry *
1438: region_alloc(mh, newpm, newvr)
1439: struct mmuhd *mh;
1440: struct pmap *newpm;
1441: int newvr;
1442: {
1443: struct mmuentry *me;
1444: struct pmap *pm;
1445: int ctx;
1446: struct regmap *rp;
1447:
1448: /* try free list first */
1449: if (!TAILQ_EMPTY(®ion_freelist)) {
1450: me = TAILQ_FIRST(®ion_freelist);
1451: TAILQ_REMOVE(®ion_freelist, me, me_list);
1452: #ifdef DEBUG
1453: if (me->me_pmap != NULL)
1454: panic("region_alloc: freelist entry has pmap");
1455: if (pmapdebug & PDB_MMUREG_ALLOC)
1456: printf("region_alloc: got smeg 0x%x\n", me->me_cookie);
1457: #endif
1458: TAILQ_INSERT_TAIL(mh, me, me_list);
1459:
1460: /* onto on pmap chain; pmap is already locked, if needed */
1461: TAILQ_INSERT_TAIL(&newpm->pm_reglist, me, me_pmchain);
1462:
1463: /* into pmap segment table, with backpointers */
1464: newpm->pm_regmap[newvr].rg_smeg = me->me_cookie;
1465: me->me_pmap = newpm;
1466: me->me_vreg = newvr;
1467:
1468: return (me);
1469: }
1470:
1471: /* no luck, take head of LRU list */
1472: if ((me = TAILQ_FIRST(®ion_lru)) == NULL)
1473: panic("region_alloc: all smegs gone");
1474:
1475: pm = me->me_pmap;
1476: if (pm == NULL)
1477: panic("region_alloc: LRU entry has no pmap");
1478: if (pm == pmap_kernel())
1479: panic("region_alloc: stealing from kernel");
1480: #ifdef DEBUG
1481: if (pmapdebug & (PDB_MMUREG_ALLOC | PDB_MMUREG_STEAL))
1482: printf("region_alloc: stealing smeg 0x%x from pmap %p\n",
1483: me->me_cookie, pm);
1484: #endif
1485: /*
1486: * Remove from LRU list, and insert at end of new list
1487: * (probably the LRU list again, but so what?).
1488: */
1489: TAILQ_REMOVE(®ion_lru, me, me_list);
1490: TAILQ_INSERT_TAIL(mh, me, me_list);
1491:
1492: rp = &pm->pm_regmap[me->me_vreg];
1493: ctx = getcontext4();
1494: if (pm->pm_ctx) {
1495: CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
1496: cache_flush_region(me->me_vreg);
1497: }
1498:
1499: /* update region tables */
1500: simple_lock(&pm->pm_lock); /* what if other cpu takes mmuentry ?? */
1501: if (pm->pm_ctx)
1502: setregmap(VRTOVA(me->me_vreg), reginval);
1503: rp->rg_smeg = reginval;
1504:
1505: /* off old pmap chain */
1506: TAILQ_REMOVE(&pm->pm_reglist, me, me_pmchain);
1507: simple_unlock(&pm->pm_lock);
1508: setcontext4(ctx); /* done with old context */
1509:
1510: /* onto new pmap chain; new pmap is already locked, if needed */
1511: TAILQ_INSERT_TAIL(&newpm->pm_reglist, me, me_pmchain);
1512:
1513: /* into new segment table, with backpointers */
1514: newpm->pm_regmap[newvr].rg_smeg = me->me_cookie;
1515: me->me_pmap = newpm;
1516: me->me_vreg = newvr;
1517:
1518: return (me);
1519: }
1520:
1521: /*
1522: * Free an MMU entry.
1523: *
1524: * Assumes the corresponding pmap is already locked.
1525: * Does NOT flush cache. ???
1526: * CALLER MUST SET CONTEXT to pm->pm_ctxnum (if pmap has
1527: * a context) or to 0 (if not). Caller must also update
1528: * pm->pm_regmap and (possibly) the hardware.
1529: */
1530: void
1531: region_free(pm, smeg)
1532: struct pmap *pm;
1533: u_int smeg;
1534: {
1535: struct mmuentry *me = &mmuregions[smeg];
1536:
1537: #ifdef DEBUG
1538: if (pmapdebug & PDB_MMUREG_ALLOC)
1539: printf("region_free: freeing smeg 0x%x from pmap %p\n",
1540: me->me_cookie, pm);
1541: if (me->me_cookie != smeg)
1542: panic("region_free: wrong mmuentry");
1543: if (pm != me->me_pmap)
1544: panic("region_free: pm != me_pmap");
1545: #endif
1546:
1547: if (pm->pm_ctx)
1548: cache_flush_region(me->me_vreg);
1549:
1550: /* take mmu entry off pmap chain */
1551: TAILQ_REMOVE(&pm->pm_reglist, me, me_pmchain);
1552: /* ... and remove from segment map */
1553: pm->pm_regmap[smeg].rg_smeg = reginval;
1554:
1555: /* off LRU or lock chain */
1556: if (pm == pmap_kernel()) {
1557: TAILQ_REMOVE(®ion_locked, me, me_list);
1558: } else {
1559: TAILQ_REMOVE(®ion_lru, me, me_list);
1560: }
1561:
1562: /* no associated pmap; on free list */
1563: me->me_pmap = NULL;
1564: TAILQ_INSERT_TAIL(®ion_freelist, me, me_list);
1565: }
1566: #endif
1567:
1568: /*
1569: * `Page in' (load or inspect) an MMU entry; called on page faults.
1570: * Returns 1 if we reloaded the segment, -1 if the segment was
1571: * already loaded and the page was marked valid (in which case the
1572: * fault must be a bus error or something), or 0 (segment loaded but
1573: * PTE not valid, or segment not loaded at all).
1574: */
1575: int
1576: mmu_pagein(pm, va, prot)
1577: struct pmap *pm;
1578: vaddr_t va;
1579: int prot;
1580: {
1581: int *pte;
1582: int vr, vs, pmeg, i, s, bits;
1583: struct regmap *rp;
1584: struct segmap *sp;
1585:
1586: if (prot != VM_PROT_NONE)
1587: bits = PG_V | ((prot & VM_PROT_WRITE) ? PG_W : 0);
1588: else
1589: bits = 0;
1590:
1591: vr = VA_VREG(va);
1592: vs = VA_VSEG(va);
1593: rp = &pm->pm_regmap[vr];
1594: #ifdef DEBUG
1595: if (pm == pmap_kernel())
1596: printf("mmu_pagein: kernel wants map at va 0x%x, vr %d, vs %d\n", va, vr, vs);
1597: #endif
1598:
1599: /* return 0 if we have no PMEGs to load */
1600: if (rp->rg_segmap == NULL)
1601: return (0);
1602: #if defined(SUN4_MMU3L)
1603: if (HASSUN4_MMU3L && rp->rg_smeg == reginval) {
1604: smeg_t smeg;
1605: unsigned int tva = VA_ROUNDDOWNTOREG(va);
1606: struct segmap *sp = rp->rg_segmap;
1607:
1608: s = splvm(); /* paranoid */
1609: smeg = region_alloc(®ion_lru, pm, vr)->me_cookie;
1610: setregmap(tva, smeg);
1611: i = NSEGRG;
1612: do {
1613: setsegmap(tva, sp++->sg_pmeg);
1614: tva += NBPSG;
1615: } while (--i > 0);
1616: splx(s);
1617: }
1618: #endif
1619: sp = &rp->rg_segmap[vs];
1620:
1621: /* return 0 if we have no PTEs to load */
1622: if ((pte = sp->sg_pte) == NULL)
1623: return (0);
1624:
1625: /* return -1 if the fault is `hard', 0 if not */
1626: if (sp->sg_pmeg != seginval)
1627: return (bits && (getpte4(va) & bits) == bits ? -1 : 0);
1628:
1629: /* reload segment: write PTEs into a new LRU entry */
1630: va = VA_ROUNDDOWNTOSEG(va);
1631: s = splvm(); /* paranoid */
1632: pmeg = me_alloc(&segm_lru, pm, vr, vs)->me_cookie;
1633: setsegmap(va, pmeg);
1634: i = NPTESG;
1635: do {
1636: setpte4(va, *pte++);
1637: va += NBPG;
1638: } while (--i > 0);
1639: splx(s);
1640: return (1);
1641: }
1642: #endif /* defined SUN4 or SUN4C */
1643:
1644: /*
1645: * Allocate a context. If necessary, steal one from someone else.
1646: * Changes hardware context number and loads segment map.
1647: *
1648: * This routine is only ever called from locore.s just after it has
1649: * saved away the previous process, so there are no active user windows.
1650: */
1651: void
1652: ctx_alloc(pm)
1653: struct pmap *pm;
1654: {
1655: union ctxinfo *c;
1656: int s, cnum, i, doflush;
1657: struct regmap *rp;
1658: int gap_start, gap_end;
1659: unsigned long va;
1660:
1661: #ifdef DEBUG
1662: if (pm->pm_ctx)
1663: panic("ctx_alloc pm_ctx");
1664: if (pmapdebug & PDB_CTX_ALLOC)
1665: printf("ctx_alloc(%p)\n", pm);
1666: #endif
1667: if (CPU_ISSUN4OR4C) {
1668: gap_start = pm->pm_gap_start;
1669: gap_end = pm->pm_gap_end;
1670: }
1671:
1672: s = splvm();
1673: if ((c = ctx_freelist) != NULL) {
1674: ctx_freelist = c->c_nextfree;
1675: cnum = c - cpuinfo.ctxinfo;
1676: doflush = 0;
1677: } else {
1678: if ((ctx_kick += ctx_kickdir) >= ncontext) {
1679: ctx_kick = ncontext - 1;
1680: ctx_kickdir = -1;
1681: } else if (ctx_kick < 1) {
1682: ctx_kick = 1;
1683: ctx_kickdir = 1;
1684: }
1685: c = &cpuinfo.ctxinfo[cnum = ctx_kick];
1686: #ifdef DEBUG
1687: if (c->c_pmap == NULL)
1688: panic("ctx_alloc cu_pmap");
1689: if (pmapdebug & (PDB_CTX_ALLOC | PDB_CTX_STEAL))
1690: printf("ctx_alloc: steal context %d from %p\n",
1691: cnum, c->c_pmap);
1692: #endif
1693: c->c_pmap->pm_ctx = NULL;
1694: doflush = (CACHEINFO.c_vactype != VAC_NONE);
1695: if (CPU_ISSUN4OR4C) {
1696: if (gap_start < c->c_pmap->pm_gap_start)
1697: gap_start = c->c_pmap->pm_gap_start;
1698: if (gap_end > c->c_pmap->pm_gap_end)
1699: gap_end = c->c_pmap->pm_gap_end;
1700: }
1701: }
1702:
1703: c->c_pmap = pm;
1704: pm->pm_ctx = c;
1705: pm->pm_ctxnum = cnum;
1706:
1707: if (CPU_ISSUN4OR4C) {
1708: /*
1709: * Write pmap's region (3-level MMU) or segment table into
1710: * the MMU.
1711: *
1712: * Only write those entries that actually map something in
1713: * this context by maintaining a pair of region numbers in
1714: * between which the pmap has no valid mappings.
1715: *
1716: * If a context was just allocated from the free list, trust
1717: * that all its pmeg numbers are `seginval'. We make sure this
1718: * is the case initially in pmap_bootstrap(). Otherwise, the
1719: * context was freed by calling ctx_free() in pmap_release(),
1720: * which in turn is supposedly called only when all mappings
1721: * have been removed.
1722: *
1723: * On the other hand, if the context had to be stolen from
1724: * another pmap, we possibly shrink the gap to be the
1725: * disjuction of the new and the previous map.
1726: */
1727:
1728: setcontext4(cnum);
1729: if (doflush)
1730: cache_flush_context();
1731:
1732: rp = pm->pm_regmap;
1733: for (va = 0, i = NUREG; --i >= 0; ) {
1734: if (VA_VREG(va) >= gap_start) {
1735: va = VRTOVA(gap_end);
1736: i -= gap_end - gap_start;
1737: rp += gap_end - gap_start;
1738: if (i < 0)
1739: break;
1740: /* mustn't re-enter this branch */
1741: gap_start = NUREG;
1742: }
1743: if (HASSUN4_MMU3L) {
1744: setregmap(va, rp++->rg_smeg);
1745: va += NBPRG;
1746: } else {
1747: int j;
1748: struct segmap *sp = rp->rg_segmap;
1749: for (j = NSEGRG; --j >= 0; va += NBPSG)
1750: setsegmap(va,
1751: sp?sp++->sg_pmeg:seginval);
1752: rp++;
1753: }
1754: }
1755: splx(s);
1756:
1757: } else if (CPU_ISSUN4M) {
1758:
1759: #if defined(SUN4M)
1760: /*
1761: * Reload page and context tables to activate the page tables
1762: * for this context.
1763: *
1764: * The gap stuff isn't really needed in the Sun4m architecture,
1765: * since we don't have to worry about excessive mappings (all
1766: * mappings exist since the page tables must be complete for
1767: * the mmu to be happy).
1768: *
1769: * If a context was just allocated from the free list, trust
1770: * that all of its mmu-edible page tables are zeroed out
1771: * (except for those associated with the kernel). We make
1772: * sure this is the case initially in pmap_bootstrap() and
1773: * pmap_init() (?).
1774: * Otherwise, the context was freed by calling ctx_free() in
1775: * pmap_release(), which in turn is supposedly called only
1776: * when all mappings have been removed.
1777: *
1778: * XXX: Do we have to flush cache after reloading ctx tbl?
1779: */
1780:
1781: /* Do any cache flush needed on context switch */
1782: (*cpuinfo.pure_vcache_flush)();
1783: #ifdef DEBUG
1784: #if 0
1785: ctxbusyvector[cnum] = 1; /* mark context as busy */
1786: #endif
1787: if (pm->pm_reg_ptps_pa == 0)
1788: panic("ctx_alloc: no region table in current pmap");
1789: #endif
1790: /*setcontext(0); * paranoia? can we modify curr. ctx? */
1791: setpgt4m(&cpuinfo.ctx_tbl[cnum],
1792: (pm->pm_reg_ptps_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1793:
1794: setcontext4m(cnum);
1795: if (doflush)
1796: cache_flush_context();
1797: tlb_flush_context(); /* remove any remnant garbage from tlb */
1798: #endif
1799: splx(s);
1800: }
1801: }
1802:
1803: /*
1804: * Give away a context. Flushes cache and sets current context to 0.
1805: */
1806: void
1807: ctx_free(pm)
1808: struct pmap *pm;
1809: {
1810: union ctxinfo *c;
1811: int newc, oldc;
1812:
1813: if ((c = pm->pm_ctx) == NULL)
1814: panic("ctx_free");
1815: pm->pm_ctx = NULL;
1816:
1817: if (CPU_ISSUN4M) {
1818: #if defined(SUN4M)
1819: oldc = getcontext4m();
1820: /* Do any cache flush needed on context switch */
1821: (*cpuinfo.pure_vcache_flush)();
1822: newc = pm->pm_ctxnum;
1823: if (oldc != newc) {
1824: write_user_windows();
1825: setcontext4m(newc);
1826: }
1827: cache_flush_context();
1828: tlb_flush_context();
1829: setcontext4m(0);
1830: #endif
1831: } else {
1832: oldc = getcontext4();
1833: if (CACHEINFO.c_vactype != VAC_NONE) {
1834: newc = pm->pm_ctxnum;
1835: CHANGE_CONTEXTS(oldc, newc);
1836: cache_flush_context();
1837: setcontext4(0);
1838: } else {
1839: CHANGE_CONTEXTS(oldc, 0);
1840: }
1841: }
1842:
1843: c->c_nextfree = ctx_freelist;
1844: ctx_freelist = c;
1845:
1846: #if 0
1847: #if defined(SUN4M)
1848: if (CPU_ISSUN4M) {
1849: /* Map kernel back into unused context */
1850: newc = pm->pm_ctxnum;
1851: cpuinfo.ctx_tbl[newc] = cpuinfo.ctx_tbl[0];
1852: if (newc)
1853: ctxbusyvector[newc] = 0; /* mark as free */
1854: }
1855: #endif
1856: #endif
1857: }
1858:
1859:
1860: /*----------------------------------------------------------------*/
1861:
1862: /*
1863: * pvlist functions.
1864: */
1865:
1866: /*
1867: * Walk the given pv list, and for each PTE, set or clear some bits
1868: * (e.g., PG_W or PG_NC).
1869: *
1870: * As a special case, this never clears PG_W on `pager' pages.
1871: * These, being kernel addresses, are always in hardware and have
1872: * a context.
1873: *
1874: * This routine flushes the cache for any page whose PTE changes,
1875: * as long as the process has a context; this is overly conservative.
1876: * It also copies ref and mod bits to the pvlist, on the theory that
1877: * this might save work later. (XXX should test this theory)
1878: *
1879: * In addition, if the cacheable bit (PG_NC) is updated in the PTE
1880: * the corresponding PV_NC flag is also updated in each pv entry. This
1881: * is done so kvm_uncache() can use this routine and have the uncached
1882: * status stick.
1883: */
1884:
1885: #if defined(SUN4) || defined(SUN4C)
1886:
1887: void
1888: pv_changepte4_4c(pv0, bis, bic)
1889: struct pvlist *pv0;
1890: int bis, bic;
1891: {
1892: int *pte;
1893: struct pvlist *pv;
1894: struct pmap *pm;
1895: int va, vr, vs;
1896: int ctx, s;
1897: struct regmap *rp;
1898: struct segmap *sp;
1899:
1900: write_user_windows(); /* paranoid? */
1901:
1902: s = splvm(); /* paranoid? */
1903: if (pv0->pv_pmap == NULL) {
1904: splx(s);
1905: return;
1906: }
1907: ctx = getcontext4();
1908: for (pv = pv0; pv != NULL; pv = pv->pv_next) {
1909: pm = pv->pv_pmap;
1910: #ifdef DIAGNOSTIC
1911: if(pm == NULL)
1912: panic("pv_changepte: pm == NULL");
1913: #endif
1914: va = pv->pv_va;
1915: vr = VA_VREG(va);
1916: vs = VA_VSEG(va);
1917: rp = &pm->pm_regmap[vr];
1918: if (rp->rg_segmap == NULL)
1919: panic("pv_changepte: no segments");
1920:
1921: sp = &rp->rg_segmap[vs];
1922: pte = sp->sg_pte;
1923:
1924: if (sp->sg_pmeg == seginval) {
1925: /* not in hardware: just fix software copy */
1926: if (pte == NULL)
1927: panic("pv_changepte: pte == NULL");
1928: pte += VA_VPG(va);
1929: *pte = (*pte | bis) & ~bic;
1930: } else {
1931: int tpte;
1932:
1933: /* in hardware: fix hardware copy */
1934: if (CTX_USABLE(pm,rp)) {
1935: /*
1936: * Bizarreness: we never clear PG_W on
1937: * pager pages, nor PG_NC on DVMA pages.
1938: */
1939: if (bic == PG_W &&
1940: va >= uvm.pager_sva && va < uvm.pager_eva)
1941: continue;
1942: if (bic == PG_NC &&
1943: va >= DVMA_BASE && va < DVMA_END)
1944: continue;
1945: setcontext4(pm->pm_ctxnum);
1946: /* XXX should flush only when necessary */
1947: tpte = getpte4(va);
1948: /*
1949: * XXX: always flush cache; conservative, but
1950: * needed to invalidate cache tag protection
1951: * bits and when disabling caching.
1952: */
1953: cache_flush_page(va);
1954: } else {
1955: /* XXX per-cpu va? */
1956: setcontext4(0);
1957: if (HASSUN4_MMU3L)
1958: setregmap(0, tregion);
1959: setsegmap(0, sp->sg_pmeg);
1960: va = VA_VPG(va) << PGSHIFT;
1961: tpte = getpte4(va);
1962: }
1963: if (tpte & PG_V)
1964: pv0->pv_flags |= MR4_4C(tpte);
1965: tpte = (tpte | bis) & ~bic;
1966: setpte4(va, tpte);
1967: if (pte != NULL) /* update software copy */
1968: pte[VA_VPG(va)] = tpte;
1969:
1970: /* Update PV_NC flag if required */
1971: if (bis & PG_NC)
1972: pv->pv_flags |= PV_NC;
1973: if (bic & PG_NC)
1974: pv->pv_flags &= ~PV_NC;
1975: }
1976: }
1977: setcontext4(ctx);
1978: splx(s);
1979: }
1980:
1981: /*
1982: * Sync ref and mod bits in pvlist (turns off same in hardware PTEs).
1983: * Returns the new flags.
1984: *
1985: * This is just like pv_changepte, but we never add or remove bits,
1986: * hence never need to adjust software copies.
1987: */
1988: int
1989: pv_syncflags4_4c(pv0)
1990: struct pvlist *pv0;
1991: {
1992: struct pvlist *pv;
1993: struct pmap *pm;
1994: int tpte, va, vr, vs, pmeg, flags;
1995: int ctx, s;
1996: struct regmap *rp;
1997: struct segmap *sp;
1998:
1999: write_user_windows(); /* paranoid? */
2000:
2001: s = splvm(); /* paranoid? */
2002: if (pv0->pv_pmap == NULL) { /* paranoid */
2003: splx(s);
2004: return (0);
2005: }
2006: ctx = getcontext4();
2007: flags = pv0->pv_flags;
2008: for (pv = pv0; pv != NULL; pv = pv->pv_next) {
2009: pm = pv->pv_pmap;
2010: va = pv->pv_va;
2011: vr = VA_VREG(va);
2012: vs = VA_VSEG(va);
2013: rp = &pm->pm_regmap[vr];
2014: if (rp->rg_segmap == NULL)
2015: panic("pv_syncflags: no segments");
2016: sp = &rp->rg_segmap[vs];
2017:
2018: if ((pmeg = sp->sg_pmeg) == seginval)
2019: continue;
2020:
2021: if (CTX_USABLE(pm,rp)) {
2022: setcontext4(pm->pm_ctxnum);
2023: /* XXX should flush only when necessary */
2024: tpte = getpte4(va);
2025: if (tpte & PG_M)
2026: cache_flush_page(va);
2027: } else {
2028: /* XXX per-cpu va? */
2029: setcontext4(0);
2030: if (HASSUN4_MMU3L)
2031: setregmap(0, tregion);
2032: setsegmap(0, pmeg);
2033: va = VA_VPG(va) << PGSHIFT;
2034: tpte = getpte4(va);
2035: }
2036: if (tpte & (PG_M|PG_U) && tpte & PG_V) {
2037: flags |= MR4_4C(tpte);
2038: tpte &= ~(PG_M|PG_U);
2039: setpte4(va, tpte);
2040: }
2041: }
2042: pv0->pv_flags = flags;
2043: setcontext4(ctx);
2044: splx(s);
2045: return (flags);
2046: }
2047:
2048: /*
2049: * pv_unlink is a helper function for pmap_remove.
2050: * It takes a pointer to the pv_table head for some physical address
2051: * and removes the appropriate (pmap, va) entry.
2052: *
2053: * Once the entry is removed, if the pv_table head has the cache
2054: * inhibit bit set, see if we can turn that off; if so, walk the
2055: * pvlist and turn off PG_NC in each PTE. (The pvlist is by
2056: * definition nonempty, since it must have at least two elements
2057: * in it to have PV_NC set, and we only remove one here.)
2058: */
2059: void
2060: pv_unlink4_4c(pv, pm, va)
2061: struct pvlist *pv;
2062: struct pmap *pm;
2063: vaddr_t va;
2064: {
2065: struct pvlist *npv;
2066:
2067: #ifdef DIAGNOSTIC
2068: if (pv->pv_pmap == NULL)
2069: panic("pv_unlink0");
2070: #endif
2071: /*
2072: * First entry is special (sigh).
2073: */
2074: npv = pv->pv_next;
2075: if (pv->pv_pmap == pm && pv->pv_va == va) {
2076: pmap_stats.ps_unlink_pvfirst++;
2077: if (npv != NULL) {
2078: /*
2079: * Shift next entry into the head.
2080: * Make sure to retain the REF, MOD and ANC flags.
2081: */
2082: pv->pv_next = npv->pv_next;
2083: pv->pv_pmap = npv->pv_pmap;
2084: pv->pv_va = npv->pv_va;
2085: pv->pv_flags &= ~PV_NC;
2086: pv->pv_flags |= npv->pv_flags & PV_NC;
2087: pool_put(&pvpool, npv);
2088: } else {
2089: /*
2090: * No mappings left; we still need to maintain
2091: * the REF and MOD flags. since pmap_is_modified()
2092: * can still be called for this page.
2093: */
2094: if (pv->pv_flags & PV_ANC)
2095: pmap_stats.ps_alias_recache++;
2096: pv->pv_pmap = NULL;
2097: pv->pv_flags &= ~(PV_NC|PV_ANC);
2098: return;
2099: }
2100: } else {
2101: struct pvlist *prev;
2102:
2103: for (prev = pv;; prev = npv, npv = npv->pv_next) {
2104: pmap_stats.ps_unlink_pvsearch++;
2105: if (npv == NULL)
2106: panic("pv_unlink");
2107: if (npv->pv_pmap == pm && npv->pv_va == va)
2108: break;
2109: }
2110: prev->pv_next = npv->pv_next;
2111: pool_put(&pvpool, npv);
2112: }
2113: if (pv->pv_flags & PV_ANC && (pv->pv_flags & PV_NC) == 0) {
2114: /*
2115: * Not cached: check to see if we can fix that now.
2116: */
2117: va = pv->pv_va;
2118: for (npv = pv->pv_next; npv != NULL; npv = npv->pv_next)
2119: if (BADALIAS(va, npv->pv_va) || (npv->pv_flags & PV_NC))
2120: return;
2121: pmap_stats.ps_alias_recache++;
2122: pv->pv_flags &= ~PV_ANC;
2123: pv_changepte4_4c(pv, 0, PG_NC);
2124: }
2125: }
2126:
2127: /*
2128: * pv_link is the inverse of pv_unlink, and is used in pmap_enter.
2129: * It returns PG_NC if the (new) pvlist says that the address cannot
2130: * be cached.
2131: */
2132: int
2133: pv_link4_4c(pv, pm, va, nc)
2134: struct pvlist *pv;
2135: struct pmap *pm;
2136: vaddr_t va;
2137: int nc;
2138: {
2139: struct pvlist *npv;
2140: int ret;
2141:
2142: ret = nc ? PG_NC : 0;
2143:
2144: if (pv->pv_pmap == NULL) {
2145: /* no pvlist entries yet */
2146: pmap_stats.ps_enter_firstpv++;
2147: pv->pv_next = NULL;
2148: pv->pv_pmap = pm;
2149: pv->pv_va = va;
2150: pv->pv_flags |= nc ? PV_NC : 0;
2151: return (ret);
2152: }
2153:
2154: /*
2155: * Before entering the new mapping, see if
2156: * it will cause old mappings to become aliased
2157: * and thus need to be `discached'.
2158: */
2159: pmap_stats.ps_enter_secondpv++;
2160: if (pv->pv_flags & (PV_NC|PV_ANC)) {
2161: /* already uncached, just stay that way */
2162: ret = PG_NC;
2163: } else {
2164: for (npv = pv; npv != NULL; npv = npv->pv_next) {
2165: if (npv->pv_flags & PV_NC) {
2166: ret = PG_NC;
2167: break;
2168: }
2169: if (BADALIAS(va, npv->pv_va)) {
2170: #ifdef DEBUG
2171: if (pmapdebug & PDB_CACHESTUFF)
2172: printf(
2173: "pv_link: badalias: pid %d, 0x%lx<=>0x%lx, pa 0x%lx\n",
2174: curproc ? curproc->p_pid : -1,
2175: va, npv->pv_va, -1); /* XXX -1 */
2176: #endif
2177: /* Mark list head `uncached due to aliases' */
2178: pmap_stats.ps_alias_uncache++;
2179: pv->pv_flags |= PV_ANC;
2180: pv_changepte4_4c(pv, ret = PG_NC, 0);
2181: break;
2182: }
2183: }
2184: }
2185:
2186: npv = pool_get(&pvpool, PR_NOWAIT);
2187: if (npv == NULL)
2188: panic("pv_link_4_4c: allocation failed");
2189: npv->pv_next = pv->pv_next;
2190: npv->pv_pmap = pm;
2191: npv->pv_va = va;
2192: npv->pv_flags = nc ? PV_NC : 0;
2193: pv->pv_next = npv;
2194: return (ret);
2195: }
2196:
2197: #endif /* sun4, sun4c code */
2198:
2199: #if defined(SUN4M) /* Sun4M versions of above */
2200: /*
2201: * Walk the given pv list, and for each PTE, set or clear some bits
2202: * (e.g., PG_W or PG_NC).
2203: *
2204: * As a special case, this never clears PG_W on `pager' pages.
2205: * These, being kernel addresses, are always in hardware and have
2206: * a context.
2207: *
2208: * This routine flushes the cache for any page whose PTE changes,
2209: * as long as the process has a context; this is overly conservative.
2210: * It also copies ref and mod bits to the pvlist, on the theory that
2211: * this might save work later. (XXX should test this theory)
2212: *
2213: * In addition, if the cacheable bit (SRMMU_PG_C) is updated in the PTE
2214: * the corresponding PV_C4M flag is also updated in each pv entry. This
2215: * is done so kvm_uncache() can use this routine and have the uncached
2216: * status stick.
2217: */
2218: void
2219: pv_changepte4m(pv0, bis, bic)
2220: struct pvlist *pv0;
2221: int bis, bic;
2222: {
2223: struct pvlist *pv;
2224: struct pmap *pm;
2225: int ctx, s;
2226: vaddr_t va;
2227:
2228: write_user_windows(); /* paranoid? */
2229:
2230: s = splvm(); /* paranoid? */
2231: if (pv0->pv_pmap == NULL) {
2232: splx(s);
2233: return;
2234: }
2235: ctx = getcontext4m();
2236: for (pv = pv0; pv != NULL; pv = pv->pv_next) {
2237: int tpte;
2238: int *ptep;
2239:
2240: pm = pv->pv_pmap;
2241: va = pv->pv_va;
2242: #ifdef DIAGNOSTIC
2243: if (pm == NULL)
2244: panic("pv_changepte4m: pmap == NULL");
2245: #endif
2246:
2247: ptep = getptep4m(pm, va);
2248:
2249: if (pm->pm_ctx) {
2250: /*
2251: * Bizarreness: we never clear PG_W on
2252: * pager pages, nor set PG_C on DVMA pages.
2253: */
2254: if ((bic & PPROT_WRITE) &&
2255: va >= uvm.pager_sva && va < uvm.pager_eva)
2256: continue;
2257: if ((bis & SRMMU_PG_C) &&
2258: va >= DVMA_BASE && va < DVMA_END)
2259: continue;
2260:
2261: setcontext4m(pm->pm_ctxnum);
2262:
2263: /*
2264: * XXX: always flush cache; conservative, but
2265: * needed to invalidate cache tag protection
2266: * bits and when disabling caching.
2267: */
2268: cache_flush_page(va);
2269:
2270: tlb_flush_page(va);
2271:
2272: }
2273:
2274: tpte = *ptep;
2275: #ifdef DIAGNOSTIC
2276: if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE)
2277: panic("pv_changepte: invalid PTE for 0x%lx", va);
2278: #endif
2279:
2280: pv0->pv_flags |= MR4M(tpte);
2281: tpte = (tpte | bis) & ~bic;
2282: setpgt4m(ptep, tpte);
2283:
2284: /* Update PV_C4M flag if required */
2285: /*
2286: * XXX - this is incorrect. The PV_C4M means that _this_
2287: * mapping should be kept uncached. This way we
2288: * effectively uncache this pa until all mappings
2289: * to it are gone (see also the XXX in pv_link4m and
2290: * pv_unlink4m).
2291: */
2292: if (bis & SRMMU_PG_C)
2293: pv->pv_flags |= PV_C4M;
2294: if (bic & SRMMU_PG_C)
2295: pv->pv_flags &= ~PV_C4M;
2296: }
2297: setcontext4m(ctx);
2298: splx(s);
2299: }
2300:
2301: /*
2302: * Sync ref and mod bits in pvlist. If page has been ref'd or modified,
2303: * update ref/mod bits in pvlist, and clear the hardware bits.
2304: *
2305: * Return the new flags.
2306: */
2307: int
2308: pv_syncflags4m(pv0)
2309: struct pvlist *pv0;
2310: {
2311: struct pvlist *pv;
2312: struct pmap *pm;
2313: int tpte, va, flags;
2314: int ctx, s;
2315:
2316: write_user_windows(); /* paranoid? */
2317:
2318: s = splvm(); /* paranoid? */
2319: if (pv0->pv_pmap == NULL) { /* paranoid */
2320: splx(s);
2321: return (0);
2322: }
2323: ctx = getcontext4m();
2324: flags = pv0->pv_flags;
2325: for (pv = pv0; pv != NULL; pv = pv->pv_next) {
2326: int *ptep;
2327:
2328: pm = pv->pv_pmap;
2329: va = pv->pv_va;
2330:
2331: ptep = getptep4m(pm, va);
2332:
2333: /*
2334: * XXX - This can't happen?!?
2335: */
2336: if (ptep == NULL) { /* invalid */
2337: printf("pv_syncflags4m: no pte pmap: %p, va: 0x%x\n",
2338: pm, va);
2339: continue;
2340: }
2341:
2342: /*
2343: * We need the PTE from memory as the TLB version will
2344: * always have the SRMMU_PG_R bit on.
2345: */
2346: if (pm->pm_ctx) {
2347: setcontext4m(pm->pm_ctxnum);
2348: tlb_flush_page(va);
2349: }
2350:
2351: tpte = *ptep;
2352:
2353: if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE && /* if valid pte */
2354: (tpte & (SRMMU_PG_M|SRMMU_PG_R))) { /* and mod/refd */
2355:
2356: flags |= MR4M(tpte);
2357:
2358: if (pm->pm_ctx && (tpte & SRMMU_PG_M)) {
2359: cache_flush_page(va); /* XXX:do we need this?*/
2360: tlb_flush_page(va);
2361: }
2362:
2363: /* Clear mod/ref bits from PTE and write it back */
2364: tpte &= ~(SRMMU_PG_M | SRMMU_PG_R);
2365: setpgt4m(ptep, tpte);
2366: }
2367: }
2368: pv0->pv_flags = flags;
2369: setcontext4m(ctx);
2370: splx(s);
2371: return (flags);
2372: }
2373:
2374: void
2375: pv_unlink4m(pv, pm, va)
2376: struct pvlist *pv;
2377: struct pmap *pm;
2378: vaddr_t va;
2379: {
2380: struct pvlist *npv;
2381:
2382: #ifdef DIAGNOSTIC
2383: if (pv->pv_pmap == NULL)
2384: panic("pv_unlink0");
2385: #endif
2386: /*
2387: * First entry is special (sigh).
2388: */
2389: npv = pv->pv_next;
2390: if (pv->pv_pmap == pm && pv->pv_va == va) {
2391: pmap_stats.ps_unlink_pvfirst++;
2392: if (npv != NULL) {
2393: /*
2394: * Shift next entry into the head.
2395: * Make sure to retain the REF, MOD and ANC flags.
2396: */
2397: pv->pv_next = npv->pv_next;
2398: pv->pv_pmap = npv->pv_pmap;
2399: pv->pv_va = npv->pv_va;
2400: pv->pv_flags &= ~PV_C4M;
2401: pv->pv_flags |= (npv->pv_flags & PV_C4M);
2402: pool_put(&pvpool, npv);
2403: } else {
2404: /*
2405: * No mappings left; we still need to maintain
2406: * the REF and MOD flags. since pmap_is_modified()
2407: * can still be called for this page.
2408: */
2409: if (pv->pv_flags & PV_ANC)
2410: pmap_stats.ps_alias_recache++;
2411: pv->pv_pmap = NULL;
2412: pv->pv_flags &= ~(PV_C4M|PV_ANC);
2413: return;
2414: }
2415: } else {
2416: struct pvlist *prev;
2417:
2418: for (prev = pv;; prev = npv, npv = npv->pv_next) {
2419: pmap_stats.ps_unlink_pvsearch++;
2420: if (npv == NULL)
2421: panic("pv_unlink");
2422: if (npv->pv_pmap == pm && npv->pv_va == va)
2423: break;
2424: }
2425: prev->pv_next = npv->pv_next;
2426: pool_put(&pvpool, npv);
2427: }
2428: if ((pv->pv_flags & (PV_C4M|PV_ANC)) == (PV_C4M|PV_ANC)) {
2429: /*
2430: * Not cached: check to see if we can fix that now.
2431: */
2432: /*
2433: * XXX - This code is incorrect. Even if the bad alias
2434: * has disappeared we keep the PV_ANC flag because
2435: * one of the mappings is not PV_C4M.
2436: */
2437: va = pv->pv_va;
2438: for (npv = pv->pv_next; npv != NULL; npv = npv->pv_next)
2439: if (BADALIAS(va, npv->pv_va) ||
2440: (npv->pv_flags & PV_C4M) == 0)
2441: return;
2442: pmap_stats.ps_alias_recache++;
2443: pv->pv_flags &= ~PV_ANC;
2444: pv_changepte4m(pv, SRMMU_PG_C, 0);
2445: }
2446: }
2447:
2448: /*
2449: * pv_link is the inverse of pv_unlink, and is used in pmap_enter.
2450: * It returns SRMMU_PG_C if the (new) pvlist says that the address cannot
2451: * be cached (i.e. its results must be (& ~)'d in.
2452: */
2453: int
2454: pv_link4m(pv, pm, va, nc)
2455: struct pvlist *pv;
2456: struct pmap *pm;
2457: vaddr_t va;
2458: int nc;
2459: {
2460: struct pvlist *npv, *mpv;
2461: int ret;
2462:
2463: ret = nc ? SRMMU_PG_C : 0;
2464:
2465: if (pv->pv_pmap == NULL) {
2466: /* no pvlist entries yet */
2467: pmap_stats.ps_enter_firstpv++;
2468: pv->pv_next = NULL;
2469: pv->pv_pmap = pm;
2470: pv->pv_va = va;
2471: /*
2472: * XXX - should we really keep the MOD/REF flags?
2473: */
2474: pv->pv_flags |= nc ? 0 : PV_C4M;
2475: return (ret);
2476: }
2477:
2478: /*
2479: * We do the malloc early so that we catch all changes that happen
2480: * during the (possible) sleep.
2481: */
2482: mpv = pool_get(&pvpool, PR_NOWAIT);
2483: if (mpv == NULL)
2484: panic("pv_link4m: allocation failed");
2485:
2486: /*
2487: * Before entering the new mapping, see if
2488: * it will cause old mappings to become aliased
2489: * and thus need to be `discached'.
2490: */
2491: pmap_stats.ps_enter_secondpv++;
2492: if ((pv->pv_flags & PV_ANC) != 0 || (pv->pv_flags & PV_C4M) == 0) {
2493: /* already uncached, just stay that way */
2494: ret = SRMMU_PG_C;
2495: } else {
2496: for (npv = pv; npv != NULL; npv = npv->pv_next) {
2497: /*
2498: * XXX - This code is incorrect. Even when we have
2499: * a bad alias we can fail to set PV_ANC because
2500: * one of the mappings doesn't have PV_C4M set.
2501: */
2502: if ((npv->pv_flags & PV_C4M) == 0) {
2503: ret = SRMMU_PG_C;
2504: break;
2505: }
2506: if (BADALIAS(va, npv->pv_va)) {
2507: #ifdef DEBUG
2508: if (pmapdebug & PDB_CACHESTUFF)
2509: printf(
2510: "pv_link: badalias: pid %d, 0x%lx<=>0x%lx, pa 0x%lx\n",
2511: curproc ? curproc->p_pid : -1,
2512: va, npv->pv_va, -1); /* XXX -1 */
2513: #endif
2514: /* Mark list head `uncached due to aliases' */
2515: pmap_stats.ps_alias_uncache++;
2516: pv->pv_flags |= PV_ANC;
2517: pv_changepte4m(pv, 0, ret = SRMMU_PG_C);
2518: /* cache_flush_page(va); XXX: needed? */
2519: break;
2520: }
2521: }
2522: }
2523:
2524: mpv->pv_next = pv->pv_next;
2525: mpv->pv_pmap = pm;
2526: mpv->pv_va = va;
2527: mpv->pv_flags = nc ? 0 : PV_C4M;
2528: pv->pv_next = mpv;
2529: return (ret);
2530: }
2531: #endif
2532:
2533: /*
2534: * Walk the given list and flush the cache for each (MI) page that is
2535: * potentially in the cache. Called only if vactype != VAC_NONE.
2536: */
2537: void
2538: pg_flushcache(struct vm_page *pg)
2539: {
2540: struct pvlist *pv = &pg->mdpage.pv_head;
2541: struct pmap *pm;
2542: int s, ctx;
2543:
2544: write_user_windows(); /* paranoia? */
2545:
2546: s = splvm(); /* XXX extreme paranoia */
2547: if ((pm = pv->pv_pmap) != NULL) {
2548: ctx = getcontext();
2549: for (;;) {
2550: if (pm->pm_ctx) {
2551: setcontext(pm->pm_ctxnum);
2552: cache_flush_page(pv->pv_va);
2553: }
2554: pv = pv->pv_next;
2555: if (pv == NULL)
2556: break;
2557: pm = pv->pv_pmap;
2558: }
2559: setcontext(ctx);
2560: }
2561: splx(s);
2562: }
2563:
2564: /*----------------------------------------------------------------*/
2565:
2566: /*
2567: * At last, pmap code.
2568: */
2569:
2570: #if defined(SUN4) && defined(SUN4C)
2571: int nptesg;
2572: #endif
2573:
2574: #if defined(SUN4M)
2575: static void pmap_bootstrap4m(void);
2576: #endif
2577: #if defined(SUN4) || defined(SUN4C)
2578: static void pmap_bootstrap4_4c(int, int, int);
2579: #endif
2580:
2581: /*
2582: * Bootstrap the system enough to run with VM enabled.
2583: *
2584: * nsegment is the number of mmu segment entries (``PMEGs'');
2585: * nregion is the number of mmu region entries (``SMEGs'');
2586: * nctx is the number of contexts.
2587: */
2588: void
2589: pmap_bootstrap(nctx, nregion, nsegment)
2590: int nsegment, nctx, nregion;
2591: {
2592: extern int nbpg; /* locore.s */
2593:
2594: uvmexp.pagesize = nbpg;
2595: uvm_setpagesize();
2596:
2597: #if defined(SUN4) && (defined(SUN4C) || defined(SUN4M))
2598: /* In this case NPTESG is not a #define */
2599: nptesg = (NBPSG >> uvmexp.pageshift);
2600: #endif
2601:
2602: #if 0
2603: ncontext = nctx;
2604: #endif
2605:
2606: #if defined(SUN4M)
2607: if (CPU_ISSUN4M) {
2608: pmap_bootstrap4m();
2609: return;
2610: }
2611: #endif
2612: #if defined(SUN4) || defined(SUN4C)
2613: if (CPU_ISSUN4OR4C) {
2614: pmap_bootstrap4_4c(nctx, nregion, nsegment);
2615: return;
2616: }
2617: #endif
2618: }
2619:
2620: #if defined(SUN4) || defined(SUN4C)
2621: void
2622: pmap_bootstrap4_4c(nctx, nregion, nsegment)
2623: int nsegment, nctx, nregion;
2624: {
2625: union ctxinfo *ci;
2626: struct mmuentry *mmuseg;
2627: #if defined(SUN4_MMU3L)
2628: struct mmuentry *mmureg;
2629: #endif
2630: struct regmap *rp;
2631: int i, j;
2632: int npte, zseg, vr, vs;
2633: int rcookie, scookie;
2634: caddr_t p;
2635: void (*rom_setmap)(int ctx, caddr_t va, int pmeg);
2636: int lastpage;
2637: paddr_t avail_start;
2638: extern char end[];
2639: #ifdef DDB
2640: extern char *esym;
2641: #endif
2642:
2643: switch (cputyp) {
2644: case CPU_SUN4C:
2645: mmu_has_hole = 1;
2646: break;
2647: case CPU_SUN4:
2648: if (cpuinfo.cpu_type != CPUTYP_4_400) {
2649: mmu_has_hole = 1;
2650: break;
2651: }
2652: }
2653:
2654: #if defined(SUN4)
2655: /*
2656: * set up the segfixmask to mask off invalid bits
2657: */
2658: segfixmask = nsegment - 1; /* assume nsegment is a power of 2 */
2659: #ifdef DIAGNOSTIC
2660: if (((nsegment & segfixmask) | (nsegment & ~segfixmask)) != nsegment) {
2661: printf("pmap_bootstrap: unsuitable number of segments (%d)\n",
2662: nsegment);
2663: callrom();
2664: }
2665: #endif
2666: #endif
2667:
2668: #if defined(SUN4M) /* We're in a dual-arch kernel. Setup 4/4c fn. ptrs */
2669: pmap_clear_modify_p = pmap_clear_modify4_4c;
2670: pmap_clear_reference_p = pmap_clear_reference4_4c;
2671: pmap_copy_page_p = pmap_copy_page4_4c;
2672: pmap_enter_p = pmap_enter4_4c;
2673: pmap_extract_p = pmap_extract4_4c;
2674: pmap_is_modified_p = pmap_is_modified4_4c;
2675: pmap_is_referenced_p = pmap_is_referenced4_4c;
2676: pmap_kenter_pa_p = pmap_kenter_pa4_4c;
2677: pmap_page_protect_p = pmap_page_protect4_4c;
2678: pmap_protect_p = pmap_protect4_4c;
2679: pmap_zero_page_p = pmap_zero_page4_4c;
2680: pmap_changeprot_p = pmap_changeprot4_4c;
2681: pmap_rmk_p = pmap_rmk4_4c;
2682: pmap_rmu_p = pmap_rmu4_4c;
2683: #endif /* defined SUN4M */
2684:
2685: /*
2686: * Last segment is the `invalid' one (one PMEG of pte's with !pg_v).
2687: * It will never be used for anything else.
2688: */
2689: seginval = --nsegment;
2690:
2691: #if defined(SUN4_MMU3L)
2692: if (HASSUN4_MMU3L)
2693: reginval = --nregion;
2694: #endif
2695:
2696: /*
2697: * Initialize the kernel pmap.
2698: */
2699: /* kernel_pmap_store.pm_ctxnum = 0; */
2700: simple_lock_init(&kernel_pmap_store.pm_lock);
2701: kernel_pmap_store.pm_refcount = 1;
2702: #if defined(SUN4_MMU3L)
2703: TAILQ_INIT(&kernel_pmap_store.pm_reglist);
2704: #endif
2705: TAILQ_INIT(&kernel_pmap_store.pm_seglist);
2706:
2707: kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG];
2708: for (i = NKREG; --i >= 0;) {
2709: #if defined(SUN4_MMU3L)
2710: kernel_regmap_store[i].rg_smeg = reginval;
2711: #endif
2712: kernel_regmap_store[i].rg_segmap =
2713: &kernel_segmap_store[i * NSEGRG];
2714: for (j = NSEGRG; --j >= 0;)
2715: kernel_segmap_store[i * NSEGRG + j].sg_pmeg = seginval;
2716: }
2717:
2718: /*
2719: * Preserve the monitor ROM's reserved VM region, so that
2720: * we can use L1-A or the monitor's debugger. As a side
2721: * effect we map the ROM's reserved VM into all contexts
2722: * (otherwise L1-A crashes the machine!).
2723: */
2724:
2725: mmu_reservemon4_4c(&nregion, &nsegment);
2726:
2727: #if defined(SUN4_MMU3L)
2728: /* Reserve one region for temporary mappings */
2729: tregion = --nregion;
2730: #endif
2731:
2732: /*
2733: * Allocate and clear mmu entries and context structures.
2734: */
2735: p = end;
2736: #ifdef DDB
2737: if (esym != 0)
2738: p = esym;
2739: #endif
2740: #if defined(SUN4_MMU3L)
2741: mmuregions = mmureg = (struct mmuentry *)p;
2742: p += nregion * sizeof(struct mmuentry);
2743: bzero(mmuregions, nregion * sizeof(struct mmuentry));
2744: #endif
2745: mmusegments = mmuseg = (struct mmuentry *)p;
2746: p += nsegment * sizeof(struct mmuentry);
2747: bzero(mmusegments, nsegment * sizeof(struct mmuentry));
2748:
2749: pmap_kernel()->pm_ctx = cpuinfo.ctxinfo = ci = (union ctxinfo *)p;
2750: p += nctx * sizeof *ci;
2751:
2752: /* Initialize MMU resource queues */
2753: #if defined(SUN4_MMU3L)
2754: TAILQ_INIT(®ion_freelist);
2755: TAILQ_INIT(®ion_lru);
2756: TAILQ_INIT(®ion_locked);
2757: #endif
2758: TAILQ_INIT(&segm_freelist);
2759: TAILQ_INIT(&segm_lru);
2760: TAILQ_INIT(&segm_locked);
2761:
2762: /*
2763: * Set up the `constants' for the call to vm_init()
2764: * in main(). All pages beginning at p (rounded up to
2765: * the next whole page) and continuing through the number
2766: * of available pages are free, but they start at a higher
2767: * virtual address. This gives us two mappable MD pages
2768: * for pmap_zero_page and pmap_copy_page, and some pages
2769: * for dumpsys(), all with no associated physical memory.
2770: */
2771: p = (caddr_t)round_page((vaddr_t)p);
2772: avail_start = (paddr_t)p - KERNBASE;
2773:
2774: i = (int)p;
2775: vpage[0] = p, p += NBPG;
2776: vpage[1] = p, p += NBPG;
2777: p = reserve_dumppages(p);
2778:
2779: virtual_avail = (vaddr_t)p;
2780: virtual_end = VM_MAX_KERNEL_ADDRESS;
2781:
2782: p = (caddr_t)i; /* retract to first free phys */
2783:
2784: /*
2785: * All contexts are free except the kernel's.
2786: *
2787: * XXX sun4c could use context 0 for users?
2788: */
2789: ci->c_pmap = pmap_kernel();
2790: ctx_freelist = ci + 1;
2791: for (i = 1; i < ncontext; i++) {
2792: ci++;
2793: ci->c_nextfree = ci + 1;
2794: }
2795: ci->c_nextfree = NULL;
2796: ctx_kick = 0;
2797: ctx_kickdir = -1;
2798:
2799: /*
2800: * Init mmu entries that map the kernel physical addresses.
2801: *
2802: * All the other MMU entries are free.
2803: *
2804: * THIS ASSUMES SEGMENT i IS MAPPED BY MMU ENTRY i DURING THE
2805: * BOOT PROCESS
2806: */
2807:
2808: rom_setmap = promvec->pv_setctxt;
2809: zseg = ((((u_int)p + NBPSG - 1) & ~SGOFSET) - KERNBASE) >> SGSHIFT;
2810: lastpage = VA_VPG(p);
2811: if (lastpage == 0)
2812: /*
2813: * If the page bits in p are 0, we filled the last segment
2814: * exactly (now how did that happen?); if not, it is
2815: * the last page filled in the last segment.
2816: */
2817: lastpage = NPTESG;
2818:
2819: p = (caddr_t)VM_MIN_KERNEL_ADDRESS; /* first va */
2820: vs = VA_VSEG(VM_MIN_KERNEL_ADDRESS); /* first virtual segment */
2821: vr = VA_VREG(VM_MIN_KERNEL_ADDRESS); /* first virtual region */
2822: rp = &pmap_kernel()->pm_regmap[vr];
2823:
2824: for (rcookie = 0, scookie = 0;;) {
2825:
2826: /*
2827: * Distribute each kernel region/segment into all contexts.
2828: * This is done through the monitor ROM, rather than
2829: * directly here: if we do a setcontext we will fault,
2830: * as we are not (yet) mapped in any other context.
2831: */
2832:
2833: if ((vs % NSEGRG) == 0) {
2834: /* Entering a new region */
2835: if (VA_VREG(p) > vr) {
2836: #ifdef DEBUG
2837: printf("note: giant kernel!\n");
2838: #endif
2839: vr++, rp++;
2840: }
2841: #if defined(SUN4_MMU3L)
2842: if (HASSUN4_MMU3L) {
2843: for (i = 1; i < nctx; i++)
2844: rom_setmap(i, p, rcookie);
2845:
2846: TAILQ_INSERT_TAIL(®ion_locked,
2847: mmureg, me_list);
2848: TAILQ_INSERT_TAIL(&pmap_kernel()->pm_reglist,
2849: mmureg, me_pmchain);
2850: mmureg->me_cookie = rcookie;
2851: mmureg->me_pmap = pmap_kernel();
2852: mmureg->me_vreg = vr;
2853: rp->rg_smeg = rcookie;
2854: mmureg++;
2855: rcookie++;
2856: }
2857: #endif
2858: }
2859:
2860: #if defined(SUN4_MMU3L)
2861: if (!HASSUN4_MMU3L)
2862: #endif
2863: for (i = 1; i < nctx; i++)
2864: rom_setmap(i, p, scookie);
2865:
2866: /* set up the mmu entry */
2867: TAILQ_INSERT_TAIL(&segm_locked, mmuseg, me_list);
2868: TAILQ_INSERT_TAIL(&pmap_kernel()->pm_seglist, mmuseg, me_pmchain);
2869: pmap_stats.ps_npmeg_locked++;
2870: mmuseg->me_cookie = scookie;
2871: mmuseg->me_pmap = pmap_kernel();
2872: mmuseg->me_vreg = vr;
2873: mmuseg->me_vseg = vs % NSEGRG;
2874: rp->rg_segmap[vs % NSEGRG].sg_pmeg = scookie;
2875: npte = ++scookie < zseg ? NPTESG : lastpage;
2876: rp->rg_segmap[vs % NSEGRG].sg_npte = npte;
2877: rp->rg_nsegmap += 1;
2878: mmuseg++;
2879: vs++;
2880: if (scookie < zseg) {
2881: p += NBPSG;
2882: continue;
2883: }
2884:
2885: /*
2886: * Unmap the pages, if any, that are not part of
2887: * the final segment.
2888: */
2889: for (p += npte << PGSHIFT; npte < NPTESG; npte++, p += NBPG)
2890: setpte4(p, 0);
2891:
2892: #if defined(SUN4_MMU3L)
2893: if (HASSUN4_MMU3L) {
2894: /*
2895: * Unmap the segments, if any, that are not part of
2896: * the final region.
2897: */
2898: for (i = rp->rg_nsegmap; i < NSEGRG; i++, p += NBPSG)
2899: setsegmap(p, seginval);
2900: }
2901: #endif
2902: break;
2903: }
2904:
2905: #if defined(SUN4_MMU3L)
2906: if (HASSUN4_MMU3L)
2907: for (; rcookie < nregion; rcookie++, mmureg++) {
2908: mmureg->me_cookie = rcookie;
2909: TAILQ_INSERT_TAIL(®ion_freelist, mmureg, me_list);
2910: }
2911: #endif
2912:
2913: for (; scookie < nsegment; scookie++, mmuseg++) {
2914: mmuseg->me_cookie = scookie;
2915: TAILQ_INSERT_TAIL(&segm_freelist, mmuseg, me_list);
2916: pmap_stats.ps_npmeg_free++;
2917: }
2918:
2919: /* Erase all spurious user-space segmaps */
2920: for (i = 1; i < ncontext; i++) {
2921: setcontext4(i);
2922: if (HASSUN4_MMU3L)
2923: for (p = 0, j = NUREG; --j >= 0; p += NBPRG)
2924: setregmap(p, reginval);
2925: else
2926: for (p = 0, vr = 0; vr < NUREG; vr++) {
2927: if (VA_INHOLE(p)) {
2928: p = (caddr_t)MMU_HOLE_END;
2929: vr = VA_VREG(p);
2930: }
2931: for (j = NSEGRG; --j >= 0; p += NBPSG)
2932: setsegmap(p, seginval);
2933: }
2934: }
2935: setcontext4(0);
2936:
2937: /*
2938: * write protect & encache kernel text;
2939: * set red zone at kernel base; enable cache on message buffer.
2940: */
2941: {
2942: extern char etext[];
2943: #ifdef KGDB
2944: int mask = ~PG_NC; /* XXX chgkprot is busted */
2945: #else
2946: int mask = ~(PG_W | PG_NC);
2947: #endif
2948:
2949: for (p = (caddr_t)trapbase; p < etext; p += NBPG)
2950: setpte4(p, getpte4(p) & mask);
2951: }
2952:
2953: pmap_page_upload(avail_start);
2954: }
2955: #endif
2956:
2957: #if defined(SUN4M) /* Sun4M version of pmap_bootstrap */
2958: /*
2959: * Bootstrap the system enough to run with VM enabled on a Sun4M machine.
2960: *
2961: * Switches from ROM to kernel page tables, and sets up initial mappings.
2962: */
2963: static void
2964: pmap_bootstrap4m(void)
2965: {
2966: int i, j;
2967: caddr_t p;
2968: caddr_t q;
2969: union ctxinfo *ci;
2970: int reg, seg;
2971: unsigned int ctxtblsize;
2972: paddr_t avail_start;
2973: extern char end[];
2974: extern char etext[];
2975: extern caddr_t reserve_dumppages(caddr_t);
2976: #ifdef DDB
2977: extern char *esym;
2978: #endif
2979:
2980: #if defined(SUN4) || defined(SUN4C) /* setup 4M fn. ptrs for dual-arch kernel */
2981: pmap_clear_modify_p = pmap_clear_modify4m;
2982: pmap_clear_reference_p = pmap_clear_reference4m;
2983: pmap_copy_page_p = pmap_copy_page4m;
2984: pmap_enter_p = pmap_enter4m;
2985: pmap_extract_p = pmap_extract4m;
2986: pmap_is_modified_p = pmap_is_modified4m;
2987: pmap_is_referenced_p = pmap_is_referenced4m;
2988: pmap_kenter_pa_p = pmap_kenter_pa4m;
2989: pmap_page_protect_p = pmap_page_protect4m;
2990: pmap_protect_p = pmap_protect4m;
2991: pmap_zero_page_p = pmap_zero_page4m;
2992: pmap_changeprot_p = pmap_changeprot4m;
2993: pmap_rmk_p = pmap_rmk4m;
2994: pmap_rmu_p = pmap_rmu4m;
2995: #endif /* defined Sun4/Sun4c */
2996:
2997: /*
2998: * Initialize the kernel pmap.
2999: */
3000: /* kernel_pmap_store.pm_ctxnum = 0; */
3001: simple_lock_init(&kernel_pmap_store.pm_lock);
3002: kernel_pmap_store.pm_refcount = 1;
3003:
3004: /*
3005: * Set up pm_regmap for kernel to point NUREG *below* the beginning
3006: * of kernel regmap storage. Since the kernel only uses regions
3007: * above NUREG, we save storage space and can index kernel and
3008: * user regions in the same way
3009: */
3010: kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG];
3011: kernel_pmap_store.pm_reg_ptps = NULL;
3012: kernel_pmap_store.pm_reg_ptps_pa = 0;
3013: bzero(kernel_regmap_store, NKREG * sizeof(struct regmap));
3014: bzero(kernel_segmap_store, NKREG * NSEGRG * sizeof(struct segmap));
3015: for (i = NKREG; --i >= 0;) {
3016: kernel_regmap_store[i].rg_segmap =
3017: &kernel_segmap_store[i * NSEGRG];
3018: kernel_regmap_store[i].rg_seg_ptps = NULL;
3019: for (j = NSEGRG; --j >= 0;)
3020: kernel_segmap_store[i * NSEGRG + j].sg_pte = NULL;
3021: }
3022:
3023: p = end; /* p points to top of kernel mem */
3024: #ifdef DDB
3025: if (esym != 0)
3026: p = esym;
3027: #endif
3028:
3029: /* Allocate context administration */
3030: pmap_kernel()->pm_ctx = cpuinfo.ctxinfo = ci = (union ctxinfo *)p;
3031: p += ncontext * sizeof *ci;
3032: bzero((caddr_t)ci, (u_int)p - (u_int)ci);
3033: #if 0
3034: ctxbusyvector = p;
3035: p += ncontext;
3036: bzero(ctxbusyvector, ncontext);
3037: ctxbusyvector[0] = 1; /* context 0 is always in use */
3038: #endif
3039:
3040: /*
3041: * Set up the `constants' for the call to vm_init()
3042: * in main(). All pages beginning at p (rounded up to
3043: * the next whole page) and continuing through the number
3044: * of available pages are free.
3045: */
3046: p = (caddr_t)round_page((vaddr_t)p);
3047:
3048: /*
3049: * Reserve memory for MMU pagetables. Some of these have severe
3050: * alignment restrictions.
3051: */
3052: pagetables_start = (vaddr_t)p;
3053: /*
3054: * Allocate context table.
3055: * To keep supersparc happy, minimum aligment is on a 4K boundary.
3056: */
3057: ctxtblsize = max(ncontext,1024) * sizeof(int);
3058: cpuinfo.ctx_tbl = (int *)roundup((u_int)p, ctxtblsize);
3059: p = (caddr_t)((u_int)cpuinfo.ctx_tbl + ctxtblsize);
3060: qzero(cpuinfo.ctx_tbl, ctxtblsize);
3061:
3062: /*
3063: * Reserve memory for segment and page tables needed to map the entire
3064: * kernel. This takes (2k + NKREG * 16k) of space, but
3065: * unfortunately is necessary since pmap_enk *must* be able to enter
3066: * a kernel mapping without resorting to malloc, or else the
3067: * possibility of deadlock arises (pmap_enk4m is called to enter a
3068: * mapping; it needs to malloc a page table; malloc then calls
3069: * pmap_enk4m to enter the new malloc'd page; pmap_enk4m needs to
3070: * malloc a page table to enter _that_ mapping; malloc deadlocks since
3071: * it is already allocating that object).
3072: */
3073: p = (caddr_t) roundup((u_int)p, SRMMU_L1SIZE * sizeof(long));
3074: kernel_regtable_store = (u_int *)p;
3075: p += SRMMU_L1SIZE * sizeof(long);
3076: bzero(kernel_regtable_store,
3077: p - (caddr_t) kernel_regtable_store);
3078:
3079: p = (caddr_t) roundup((u_int)p, SRMMU_L2SIZE * sizeof(long));
3080: kernel_segtable_store = (u_int *)p;
3081: p += (SRMMU_L2SIZE * sizeof(long)) * NKREG;
3082: bzero(kernel_segtable_store,
3083: p - (caddr_t) kernel_segtable_store);
3084:
3085: p = (caddr_t) roundup((u_int)p, SRMMU_L3SIZE * sizeof(long));
3086: kernel_pagtable_store = (u_int *)p;
3087: p += ((SRMMU_L3SIZE * sizeof(long)) * NKREG) * NSEGRG;
3088: bzero(kernel_pagtable_store,
3089: p - (caddr_t) kernel_pagtable_store);
3090:
3091: /* Round to next page and mark end of stolen pages */
3092: p = (caddr_t)round_page((vaddr_t)p);
3093: pagetables_end = (vaddr_t)p;
3094:
3095: avail_start = (paddr_t)p - KERNBASE;
3096:
3097: /*
3098: * Since we've statically allocated space to map the entire kernel,
3099: * we might as well pre-wire the mappings to save time in pmap_enter.
3100: * This also gets around nasty problems with caching of L1/L2 ptp's.
3101: *
3102: * XXX WHY DO WE HAVE THIS CACHING PROBLEM WITH L1/L2 PTPS????? %%%
3103: */
3104:
3105: pmap_kernel()->pm_reg_ptps = (int *) kernel_regtable_store;
3106: pmap_kernel()->pm_reg_ptps_pa =
3107: VA2PA((caddr_t)pmap_kernel()->pm_reg_ptps);
3108:
3109: /* Install L1 table in context 0 */
3110: setpgt4m(&cpuinfo.ctx_tbl[0],
3111: (pmap_kernel()->pm_reg_ptps_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
3112:
3113: /* XXX:rethink - Store pointer to region table address */
3114: cpuinfo.L1_ptps = pmap_kernel()->pm_reg_ptps;
3115:
3116: for (reg = 0; reg < NKREG; reg++) {
3117: struct regmap *rp;
3118: caddr_t kphyssegtbl;
3119:
3120: /*
3121: * Entering new region; install & build segtbl
3122: */
3123:
3124: rp = &pmap_kernel()->pm_regmap[reg + VA_VREG(VM_MIN_KERNEL_ADDRESS)];
3125:
3126: kphyssegtbl = (caddr_t)
3127: &kernel_segtable_store[reg * SRMMU_L2SIZE];
3128:
3129: setpgt4m(&pmap_kernel()->pm_reg_ptps[reg + VA_VREG(VM_MIN_KERNEL_ADDRESS)],
3130: (VA2PA(kphyssegtbl) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
3131:
3132: rp->rg_seg_ptps = (int *)kphyssegtbl;
3133:
3134: if (rp->rg_segmap == NULL) {
3135: printf("rp->rg_segmap == NULL!\n");
3136: rp->rg_segmap = &kernel_segmap_store[reg * NSEGRG];
3137: }
3138:
3139: for (seg = 0; seg < NSEGRG; seg++) {
3140: struct segmap *sp;
3141: caddr_t kphyspagtbl;
3142:
3143: rp->rg_nsegmap++;
3144:
3145: sp = &rp->rg_segmap[seg];
3146: kphyspagtbl = (caddr_t)
3147: &kernel_pagtable_store
3148: [((reg * NSEGRG) + seg) * SRMMU_L3SIZE];
3149:
3150: setpgt4m(&rp->rg_seg_ptps[seg],
3151: (VA2PA(kphyspagtbl) >> SRMMU_PPNPASHIFT) |
3152: SRMMU_TEPTD);
3153: sp->sg_pte = (int *) kphyspagtbl;
3154: }
3155: }
3156:
3157: /*
3158: * Preserve the monitor ROM's reserved VM region, so that
3159: * we can use L1-A or the monitor's debugger.
3160: */
3161: mmu_reservemon4m(&kernel_pmap_store);
3162:
3163: /*
3164: * Reserve virtual address space for two mappable MD pages
3165: * for pmap_zero_page and pmap_copy_page, and some more for
3166: * dumpsys().
3167: */
3168: q = p;
3169: vpage[0] = p, p += NBPG;
3170: vpage[1] = p, p += NBPG;
3171: p = reserve_dumppages(p);
3172:
3173: virtual_avail = (vaddr_t)p;
3174: virtual_end = VM_MAX_KERNEL_ADDRESS;
3175:
3176: p = q; /* retract to first free phys */
3177:
3178: /*
3179: * Set up the ctxinfo structures (freelist of contexts)
3180: */
3181: ci->c_pmap = pmap_kernel();
3182: ctx_freelist = ci + 1;
3183: for (i = 1; i < ncontext; i++) {
3184: ci++;
3185: ci->c_nextfree = ci + 1;
3186: }
3187: ci->c_nextfree = NULL;
3188: ctx_kick = 0;
3189: ctx_kickdir = -1;
3190:
3191: /*
3192: * Now map the kernel into our new set of page tables, then
3193: * (finally) switch over to our running page tables.
3194: * We map from VM_MIN_KERNEL_ADDRESS to p into context 0's
3195: * page tables (and the kernel pmap).
3196: */
3197: #ifdef DEBUG /* Sanity checks */
3198: if ((u_int)p % NBPG != 0)
3199: panic("pmap_bootstrap4m: p misaligned?!?");
3200: if (VM_MIN_KERNEL_ADDRESS % NBPRG != 0)
3201: panic("pmap_bootstrap4m: VM_MIN_KERNEL_ADDRESS not region-aligned");
3202: #endif
3203:
3204: for (q = (caddr_t) VM_MIN_KERNEL_ADDRESS; q < p; q += NBPG) {
3205: struct regmap *rp;
3206: struct segmap *sp;
3207: int pte;
3208:
3209: /*
3210: * Now install entry for current page.
3211: */
3212: rp = &pmap_kernel()->pm_regmap[VA_VREG(q)];
3213: sp = &rp->rg_segmap[VA_VSEG(q)];
3214: sp->sg_npte++;
3215:
3216: pte = ((int)q - VM_MIN_KERNEL_ADDRESS) >> SRMMU_PPNPASHIFT;
3217: pte |= PPROT_N_RX | SRMMU_TEPTE;
3218:
3219: if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0 ||
3220: q < (caddr_t)pagetables_start ||
3221: q >= (caddr_t)pagetables_end)
3222: pte |= SRMMU_PG_C;
3223:
3224: /* write-protect kernel text */
3225: if (q < (caddr_t) trapbase || q >= etext)
3226: pte |= PPROT_WRITE;
3227:
3228: setpgt4m(&sp->sg_pte[VA_VPG(q)], pte);
3229: }
3230:
3231: #if 0
3232: /*
3233: * We also install the kernel mapping into all other contexts by
3234: * copying the context 0 L1 PTP from cpuinfo.ctx_tbl[0] into the
3235: * remainder of the context table (i.e. we share the kernel page-
3236: * tables). Each user pmap automatically gets the kernel mapped
3237: * into it when it is created, but we do this extra step early on
3238: * in case some twit decides to switch to a context with no user
3239: * pmap associated with it.
3240: */
3241: for (i = 1; i < ncontext; i++)
3242: cpuinfo.ctx_tbl[i] = cpuinfo.ctx_tbl[0];
3243: #endif
3244:
3245: if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0)
3246: /* Flush page tables from cache */
3247: pcache_flush((caddr_t)pagetables_start,
3248: (caddr_t)VA2PA((caddr_t)pagetables_start),
3249: pagetables_end - pagetables_start);
3250:
3251: /*
3252: * Now switch to kernel pagetables (finally!)
3253: */
3254: mmu_install_tables(&cpuinfo);
3255:
3256: pmap_page_upload(avail_start);
3257: sparc_protection_init4m();
3258: }
3259:
3260: void
3261: mmu_install_tables(sc)
3262: struct cpu_softc *sc;
3263: {
3264:
3265: #ifdef DEBUG
3266: printf("pmap_bootstrap: installing kernel page tables...");
3267: #endif
3268: setcontext4m(0); /* paranoia? %%%: Make 0x3 a define! below */
3269:
3270: /* Enable MMU tablewalk caching, flush TLB */
3271: if (sc->mmu_enable != 0)
3272: sc->mmu_enable();
3273:
3274: tlb_flush_all();
3275:
3276: sta(SRMMU_CXTPTR, ASI_SRMMU,
3277: (VA2PA((caddr_t)sc->ctx_tbl) >> SRMMU_PPNPASHIFT) & ~0x3);
3278:
3279: tlb_flush_all();
3280:
3281: #ifdef DEBUG
3282: printf("done.\n");
3283: #endif
3284: }
3285:
3286: /*
3287: * Allocate per-CPU page tables.
3288: * Note: this routine is called in the context of the boot CPU
3289: * during autoconfig.
3290: */
3291: void
3292: pmap_alloc_cpu(sc)
3293: struct cpu_softc *sc;
3294: {
3295: caddr_t cpustore;
3296: int *ctxtable;
3297: int *regtable;
3298: int *segtable;
3299: int *pagtable;
3300: int vr, vs, vpg;
3301: struct regmap *rp;
3302: struct segmap *sp;
3303:
3304: /* Allocate properly aligned and physically contiguous memory here */
3305: cpustore = 0;
3306: ctxtable = 0;
3307: regtable = 0;
3308: segtable = 0;
3309: pagtable = 0;
3310:
3311: vr = VA_VREG(CPUINFO_VA);
3312: vs = VA_VSEG(CPUINFO_VA);
3313: vpg = VA_VPG(CPUINFO_VA);
3314: rp = &pmap_kernel()->pm_regmap[vr];
3315: sp = &rp->rg_segmap[vs];
3316:
3317: /*
3318: * Copy page tables, then modify entry for CPUINFO_VA so that
3319: * it points at the per-CPU pages.
3320: */
3321: bcopy(cpuinfo.L1_ptps, regtable, SRMMU_L1SIZE * sizeof(int));
3322: regtable[vr] =
3323: (VA2PA((caddr_t)segtable) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD;
3324:
3325: bcopy(rp->rg_seg_ptps, segtable, SRMMU_L2SIZE * sizeof(int));
3326: segtable[vs] =
3327: (VA2PA((caddr_t)pagtable) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD;
3328:
3329: bcopy(sp->sg_pte, pagtable, SRMMU_L3SIZE * sizeof(int));
3330: pagtable[vpg] =
3331: (VA2PA((caddr_t)cpustore) >> SRMMU_PPNPASHIFT) |
3332: (SRMMU_TEPTE | PPROT_RWX_RWX | SRMMU_PG_C);
3333:
3334: /* Install L1 table in context 0 */
3335: ctxtable[0] = ((u_int)regtable >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD;
3336:
3337: sc->ctx_tbl = ctxtable;
3338: sc->L1_ptps = regtable;
3339:
3340: #if 0
3341: if ((sc->flags & CPUFLG_CACHEPAGETABLES) == 0) {
3342: kvm_uncache((caddr_t)0, 1);
3343: }
3344: #endif
3345: }
3346: #endif /* defined sun4m */
3347:
3348:
3349: void
3350: pmap_init()
3351: {
3352: pool_init(&pvpool, sizeof(struct pvlist), 0, 0, 0, "pvpl", NULL);
3353:
3354: #if defined(SUN4M)
3355: if (CPU_ISSUN4M) {
3356: /*
3357: * The SRMMU only ever needs chunks in one of two sizes:
3358: * 1024 (for region level tables) and 256 (for segment
3359: * and page level tables).
3360: */
3361: int n;
3362:
3363: n = SRMMU_L1SIZE * sizeof(int);
3364: pool_init(&L1_pool, n, n, 0, 0, "L1 pagetable",
3365: &pgt_allocator);
3366:
3367: n = SRMMU_L2SIZE * sizeof(int);
3368: pool_init(&L23_pool, n, n, 0, 0, "L2/L3 pagetable",
3369: &pgt_allocator);
3370: }
3371: #endif
3372: }
3373:
3374: /*
3375: * Called just after enabling cache (so that CPUFLG_CACHEPAGETABLES is
3376: * set correctly).
3377: */
3378: void
3379: pmap_cache_enable()
3380: {
3381: #ifdef SUN4M
3382: if (CPU_ISSUN4M) {
3383: int pte;
3384:
3385: /*
3386: * Deal with changed CPUFLG_CACHEPAGETABLES.
3387: *
3388: * If the tables were uncached during the initial mapping
3389: * and cache_enable set the flag we recache the tables.
3390: */
3391:
3392: pte = getpte4m(pagetables_start);
3393:
3394: if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0 &&
3395: (pte & SRMMU_PG_C) == 0)
3396: kvm_recache((caddr_t)pagetables_start,
3397: atop(pagetables_end - pagetables_start));
3398: }
3399: #endif
3400: }
3401:
3402:
3403: /*
3404: * Map physical addresses into kernel VM.
3405: */
3406: vaddr_t
3407: pmap_map(va, pa, endpa, prot)
3408: vaddr_t va;
3409: paddr_t pa, endpa;
3410: int prot;
3411: {
3412: int pgsize = PAGE_SIZE;
3413:
3414: while (pa < endpa) {
3415: pmap_kenter_pa(va, pa, prot);
3416: va += pgsize;
3417: pa += pgsize;
3418: }
3419: return (va);
3420: }
3421:
3422: /*
3423: * Create and return a physical map.
3424: *
3425: * If size is nonzero, the map is useless. (ick)
3426: */
3427: struct pmap *
3428: pmap_create()
3429: {
3430: struct pmap *pm;
3431: int size;
3432: void *urp;
3433:
3434: pm = (struct pmap *)malloc(sizeof *pm, M_VMPMAP, M_WAITOK);
3435: #ifdef DEBUG
3436: if (pmapdebug & PDB_CREATE)
3437: printf("pmap_create: created %p\n", pm);
3438: #endif
3439: bzero((caddr_t)pm, sizeof *pm);
3440:
3441: size = NUREG * sizeof(struct regmap);
3442:
3443: pm->pm_regstore = urp = malloc(size, M_VMPMAP, M_WAITOK);
3444: qzero((caddr_t)urp, size);
3445: /* pm->pm_ctx = NULL; */
3446: simple_lock_init(&pm->pm_lock);
3447: pm->pm_refcount = 1;
3448: pm->pm_regmap = urp;
3449:
3450: if (CPU_ISSUN4OR4C) {
3451: TAILQ_INIT(&pm->pm_seglist);
3452: #if defined(SUN4_MMU3L)
3453: TAILQ_INIT(&pm->pm_reglist);
3454: if (HASSUN4_MMU3L) {
3455: int i;
3456: for (i = NUREG; --i >= 0;)
3457: pm->pm_regmap[i].rg_smeg = reginval;
3458: }
3459: #endif
3460: }
3461: #if defined(SUN4M)
3462: else {
3463: int i;
3464:
3465: /*
3466: * We must allocate and initialize hardware-readable (MMU)
3467: * pagetables. We must also map the kernel regions into this
3468: * pmap's pagetables, so that we can access the kernel from
3469: * this user context.
3470: *
3471: * Note: pm->pm_regmap's have been zeroed already, so we don't
3472: * need to explicitly mark them as invalid (a null
3473: * rg_seg_ptps pointer indicates invalid for the 4m)
3474: */
3475: urp = pool_get(&L1_pool, PR_WAITOK);
3476: pm->pm_reg_ptps = urp;
3477: pm->pm_reg_ptps_pa = VA2PA(urp);
3478:
3479: /* Invalidate user mappings */
3480: for (i = 0; i < NUREG; i++)
3481: setpgt4m(&pm->pm_reg_ptps[i], SRMMU_TEINVALID);
3482:
3483: /* Copy kernel regions */
3484: for (i = 0; i < NKREG; i++) {
3485: setpgt4m(&pm->pm_reg_ptps[VA_VREG(VM_MIN_KERNEL_ADDRESS) + i],
3486: cpuinfo.L1_ptps[VA_VREG(VM_MIN_KERNEL_ADDRESS) + i]);
3487: }
3488: }
3489: #endif
3490:
3491: pm->pm_gap_end = VA_VREG(VM_MAXUSER_ADDRESS);
3492:
3493: return (pm);
3494: }
3495:
3496: /*
3497: * Retire the given pmap from service.
3498: * Should only be called if the map contains no valid mappings.
3499: */
3500: void
3501: pmap_destroy(pm)
3502: struct pmap *pm;
3503: {
3504: int count;
3505:
3506: if (pm == NULL)
3507: return;
3508: #ifdef DEBUG
3509: if (pmapdebug & PDB_DESTROY)
3510: printf("pmap_destroy(%p)\n", pm);
3511: #endif
3512: simple_lock(&pm->pm_lock);
3513: count = --pm->pm_refcount;
3514: simple_unlock(&pm->pm_lock);
3515: if (count == 0) {
3516: pmap_release(pm);
3517: free(pm, M_VMPMAP);
3518: }
3519: }
3520:
3521: /*
3522: * Release any resources held by the given physical map.
3523: * Called when a pmap initialized by pmap_pinit is being released.
3524: */
3525: void
3526: pmap_release(pm)
3527: struct pmap *pm;
3528: {
3529: union ctxinfo *c;
3530: int s = splvm(); /* paranoia */
3531:
3532: #ifdef DEBUG
3533: if (pmapdebug & PDB_DESTROY)
3534: printf("pmap_release(%p)\n", pm);
3535: #endif
3536:
3537: if (CPU_ISSUN4OR4C) {
3538: #if defined(SUN4_MMU3L)
3539: if (!TAILQ_EMPTY(&pm->pm_reglist))
3540: panic("pmap_release: region list not empty");
3541: #endif
3542: if (!TAILQ_EMPTY(&pm->pm_seglist))
3543: panic("pmap_release: segment list not empty");
3544:
3545: if ((c = pm->pm_ctx) != NULL) {
3546: if (pm->pm_ctxnum == 0)
3547: panic("pmap_release: releasing kernel");
3548: ctx_free(pm);
3549: }
3550: }
3551: splx(s);
3552:
3553: #ifdef DEBUG
3554: if (pmapdebug) {
3555: int vs, vr;
3556: for (vr = 0; vr < NUREG; vr++) {
3557: struct regmap *rp = &pm->pm_regmap[vr];
3558: if (rp->rg_nsegmap != 0)
3559: printf("pmap_release: %d segments remain in "
3560: "region %d\n", rp->rg_nsegmap, vr);
3561: if (rp->rg_segmap != NULL) {
3562: printf("pmap_release: segments still "
3563: "allocated in region %d\n", vr);
3564: for (vs = 0; vs < NSEGRG; vs++) {
3565: struct segmap *sp = &rp->rg_segmap[vs];
3566: if (sp->sg_npte != 0)
3567: printf("pmap_release: %d ptes "
3568: "remain in segment %d\n",
3569: sp->sg_npte, vs);
3570: if (sp->sg_pte != NULL) {
3571: printf("pmap_release: ptes still "
3572: "allocated in segment %d\n", vs);
3573: }
3574: }
3575: }
3576: }
3577: }
3578: #endif
3579: if (pm->pm_regstore)
3580: free(pm->pm_regstore, M_VMPMAP);
3581:
3582: #if defined(SUN4M)
3583: if (CPU_ISSUN4M) {
3584: if ((c = pm->pm_ctx) != NULL) {
3585: if (pm->pm_ctxnum == 0)
3586: panic("pmap_release: releasing kernel");
3587: ctx_free(pm);
3588: }
3589: pool_put(&L1_pool, pm->pm_reg_ptps);
3590: pm->pm_reg_ptps = NULL;
3591: pm->pm_reg_ptps_pa = 0;
3592: }
3593: #endif
3594: }
3595:
3596: /*
3597: * Add a reference to the given pmap.
3598: */
3599: void
3600: pmap_reference(pm)
3601: struct pmap *pm;
3602: {
3603:
3604: if (pm != NULL) {
3605: simple_lock(&pm->pm_lock);
3606: pm->pm_refcount++;
3607: simple_unlock(&pm->pm_lock);
3608: }
3609: }
3610:
3611: /*
3612: * Remove the given range of mapping entries.
3613: * The starting and ending addresses are already rounded to pages.
3614: * Sheer lunacy: pmap_remove is often asked to remove nonexistent
3615: * mappings.
3616: */
3617: void
3618: pmap_remove(pm, va, endva)
3619: struct pmap *pm;
3620: vaddr_t va, endva;
3621: {
3622: vaddr_t nva;
3623: int vr, vs, s, ctx;
3624: void (*rm)(struct pmap *, vaddr_t, vaddr_t, int, int);
3625:
3626: if (pm == NULL)
3627: return;
3628:
3629: #ifdef DEBUG
3630: if (pmapdebug & PDB_REMOVE)
3631: printf("pmap_remove(%p, 0x%lx, 0x%lx)\n", pm, va, endva);
3632: #endif
3633:
3634: if (pm == pmap_kernel()) {
3635: /*
3636: * Removing from kernel address space.
3637: */
3638: rm = pmap_rmk;
3639: } else {
3640: /*
3641: * Removing from user address space.
3642: */
3643: write_user_windows();
3644: rm = pmap_rmu;
3645: }
3646:
3647: ctx = getcontext();
3648: s = splvm(); /* XXX conservative */
3649: simple_lock(&pm->pm_lock);
3650: for (; va < endva; va = nva) {
3651: /* do one virtual segment at a time */
3652: vr = VA_VREG(va);
3653: vs = VA_VSEG(va);
3654: nva = VSTOVA(vr, vs + 1);
3655: if (nva == 0 || nva > endva)
3656: nva = endva;
3657: if (pm->pm_regmap[vr].rg_nsegmap != 0)
3658: (*rm)(pm, va, nva, vr, vs);
3659: }
3660: simple_unlock(&pm->pm_lock);
3661: splx(s);
3662: setcontext(ctx);
3663: }
3664:
3665: void
3666: pmap_kremove(va, len)
3667: vaddr_t va;
3668: vsize_t len;
3669: {
3670: struct pmap *pm = pmap_kernel();
3671: vaddr_t nva, endva = va + len;
3672: int vr, vs, s, ctx;
3673:
3674: #ifdef DEBUG
3675: if (pmapdebug & PDB_REMOVE)
3676: printf("pmap_kremove(0x%lx, 0x%lx)\n", va, len);
3677: #endif
3678:
3679: ctx = getcontext();
3680: s = splvm(); /* XXX conservative */
3681: simple_lock(pm->pm_lock);
3682:
3683: for (; va < endva; va = nva) {
3684: /* do one virtual segment at a time */
3685: vr = VA_VREG(va);
3686: vs = VA_VSEG(va);
3687: nva = VSTOVA(vr, vs + 1);
3688: if (nva == 0 || nva > endva)
3689: nva = endva;
3690: if (pm->pm_regmap[vr].rg_nsegmap != 0)
3691: pmap_rmk(pm, va, nva, vr, vs);
3692: }
3693:
3694: simple_unlock(pm->pm_lock);
3695: splx(s);
3696: setcontext(ctx);
3697: }
3698:
3699: /*
3700: * The following magic number was chosen because:
3701: * 1. It is the same amount of work to cache_flush_page 4 pages
3702: * as to cache_flush_segment 1 segment (so at 4 the cost of
3703: * flush is the same).
3704: * 2. Flushing extra pages is bad (causes cache not to work).
3705: * 3. The current code, which malloc()s 5 pages for each process
3706: * for a user vmspace/pmap, almost never touches all 5 of those
3707: * pages.
3708: */
3709: #if 0
3710: #define PMAP_RMK_MAGIC (cacheinfo.c_hwflush?5:64) /* if > magic, use cache_flush_segment */
3711: #else
3712: #define PMAP_RMK_MAGIC 5 /* if > magic, use cache_flush_segment */
3713: #endif
3714:
3715: /*
3716: * Remove a range contained within a single segment.
3717: * These are egregiously complicated routines.
3718: */
3719:
3720: #if defined(SUN4) || defined(SUN4C)
3721:
3722: /* remove from kernel */
3723: void
3724: pmap_rmk4_4c(pm, va, endva, vr, vs)
3725: struct pmap *pm;
3726: vaddr_t va, endva;
3727: int vr, vs;
3728: {
3729: int i, tpte, perpage, npg;
3730: struct pvlist *pv;
3731: int nleft, pmeg;
3732: struct regmap *rp;
3733: struct segmap *sp;
3734: int s;
3735:
3736: rp = &pm->pm_regmap[vr];
3737: sp = &rp->rg_segmap[vs];
3738:
3739: if (rp->rg_nsegmap == 0)
3740: return;
3741:
3742: #ifdef DEBUG
3743: if (rp->rg_segmap == NULL)
3744: panic("pmap_rmk: no segments");
3745: #endif
3746:
3747: if ((nleft = sp->sg_npte) == 0)
3748: return;
3749:
3750: pmeg = sp->sg_pmeg;
3751:
3752: #ifdef DEBUG
3753: if (pmeg == seginval)
3754: panic("pmap_rmk: not loaded");
3755: if (pm->pm_ctx == NULL)
3756: panic("pmap_rmk: lost context");
3757: #endif
3758:
3759: setcontext4(0);
3760: /* decide how to flush cache */
3761: npg = (endva - va) >> PGSHIFT;
3762: if (npg > PMAP_RMK_MAGIC) {
3763: /* flush the whole segment */
3764: perpage = 0;
3765: cache_flush_segment(vr, vs);
3766: } else {
3767: /* flush each page individually; some never need flushing */
3768: perpage = (CACHEINFO.c_vactype != VAC_NONE);
3769: }
3770: while (va < endva) {
3771: tpte = getpte4(va);
3772: if ((tpte & PG_V) == 0) {
3773: va += NBPG;
3774: continue;
3775: }
3776: if ((tpte & PG_TYPE) == PG_OBMEM) {
3777: /* if cacheable, flush page as needed */
3778: if (perpage && (tpte & PG_NC) == 0)
3779: cache_flush_page(va);
3780: pv = pvhead(tpte & PG_PFNUM);
3781: if (pv) {
3782: pv->pv_flags |= MR4_4C(tpte);
3783: s = splvm();
3784: pv_unlink4_4c(pv, pm, va);
3785: splx(s);
3786: }
3787: }
3788: nleft--;
3789: setpte4(va, 0);
3790: va += NBPG;
3791: }
3792:
3793: /*
3794: * If the segment is all gone, remove it from everyone and
3795: * free the MMU entry.
3796: */
3797: if ((sp->sg_npte = nleft) == 0) {
3798: va = VSTOVA(vr,vs); /* retract */
3799: #if defined(SUN4_MMU3L)
3800: if (HASSUN4_MMU3L)
3801: setsegmap(va, seginval);
3802: else
3803: #endif
3804: for (i = ncontext; --i >= 0;) {
3805: setcontext4(i);
3806: setsegmap(va, seginval);
3807: }
3808: me_free(pm, pmeg);
3809: if (--rp->rg_nsegmap == 0) {
3810: #if defined(SUN4_MMU3L)
3811: if (HASSUN4_MMU3L) {
3812: for (i = ncontext; --i >= 0;) {
3813: setcontext4(i);
3814: setregmap(va, reginval);
3815: }
3816: /* note: context is 0 */
3817: region_free(pm, rp->rg_smeg);
3818: }
3819: #endif
3820: }
3821: }
3822: }
3823:
3824: #endif /* sun4, sun4c */
3825:
3826: #if defined(SUN4M) /* 4M version of pmap_rmk */
3827: /* remove from kernel (4m)*/
3828: void
3829: pmap_rmk4m(pm, va, endva, vr, vs)
3830: struct pmap *pm;
3831: vaddr_t va, endva;
3832: int vr, vs;
3833: {
3834: int tpte, perpage, npg;
3835: struct pvlist *pv;
3836: int nleft;
3837: struct regmap *rp;
3838: struct segmap *sp;
3839:
3840: rp = &pm->pm_regmap[vr];
3841: sp = &rp->rg_segmap[vs];
3842:
3843: if (rp->rg_nsegmap == 0)
3844: return;
3845:
3846: #ifdef DEBUG
3847: if (rp->rg_segmap == NULL)
3848: panic("pmap_rmk: no segments");
3849: #endif
3850:
3851: if ((nleft = sp->sg_npte) == 0)
3852: return;
3853:
3854: #ifdef DEBUG
3855: if (sp->sg_pte == NULL || rp->rg_seg_ptps == NULL)
3856: panic("pmap_rmk: segment/region does not exist");
3857: if (pm->pm_ctx == NULL)
3858: panic("pmap_rmk: lost context");
3859: #endif
3860:
3861: setcontext4m(0);
3862: /* decide how to flush cache */
3863: npg = (endva - va) >> PGSHIFT;
3864: if (npg > PMAP_RMK_MAGIC) {
3865: /* flush the whole segment */
3866: perpage = 0;
3867: if (CACHEINFO.c_vactype != VAC_NONE)
3868: cache_flush_segment(vr, vs);
3869: } else {
3870: /* flush each page individually; some never need flushing */
3871: perpage = (CACHEINFO.c_vactype != VAC_NONE);
3872: }
3873: while (va < endva) {
3874: tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
3875: if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
3876: #ifdef DEBUG
3877: if ((pmapdebug & PDB_SANITYCHK) &&
3878: (getpte4m(va) & SRMMU_TETYPE) == SRMMU_TEPTE)
3879: panic("pmap_rmk: Spurious kTLB entry for 0x%lx",
3880: va);
3881: #endif
3882: va += NBPG;
3883: continue;
3884: }
3885: if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
3886: /* if cacheable, flush page as needed */
3887: if (perpage && (tpte & SRMMU_PG_C))
3888: cache_flush_page(va);
3889: pv = pvhead((tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
3890: if (pv) {
3891: pv->pv_flags |= MR4M(tpte);
3892: pv_unlink4m(pv, pm, va);
3893: }
3894: }
3895: nleft--;
3896: tlb_flush_page(va);
3897: setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], SRMMU_TEINVALID);
3898: va += NBPG;
3899: }
3900:
3901: sp->sg_npte = nleft;
3902: }
3903: #endif /* sun4m */
3904:
3905: /*
3906: * Just like pmap_rmk_magic, but we have a different threshold.
3907: * Note that this may well deserve further tuning work.
3908: */
3909: #if 0
3910: #define PMAP_RMU_MAGIC (cacheinfo.c_hwflush?4:64) /* if > magic, use cache_flush_segment */
3911: #else
3912: #define PMAP_RMU_MAGIC 4 /* if > magic, use cache_flush_segment */
3913: #endif
3914:
3915: #if defined(SUN4) || defined(SUN4C)
3916:
3917: /* remove from user */
3918: void
3919: pmap_rmu4_4c(pm, va, endva, vr, vs)
3920: struct pmap *pm;
3921: vaddr_t va, endva;
3922: int vr, vs;
3923: {
3924: int *pte0, pteva, tpte, perpage, npg;
3925: struct pvlist *pv;
3926: int nleft, pmeg;
3927: struct regmap *rp;
3928: struct segmap *sp;
3929: int s;
3930:
3931: rp = &pm->pm_regmap[vr];
3932: if (rp->rg_nsegmap == 0)
3933: return;
3934: if (rp->rg_segmap == NULL)
3935: panic("pmap_rmu: no segments");
3936:
3937: sp = &rp->rg_segmap[vs];
3938: if ((nleft = sp->sg_npte) == 0)
3939: return;
3940:
3941: if (sp->sg_pte == NULL)
3942: panic("pmap_rmu: no pages");
3943:
3944: pmeg = sp->sg_pmeg;
3945: pte0 = sp->sg_pte;
3946:
3947: if (pmeg == seginval) {
3948: int *pte = pte0 + VA_VPG(va);
3949:
3950: /*
3951: * PTEs are not in MMU. Just invalidate software copies.
3952: */
3953: for (; va < endva; pte++, va += NBPG) {
3954: tpte = *pte;
3955: if ((tpte & PG_V) == 0) {
3956: /* nothing to remove (braindead VM layer) */
3957: continue;
3958: }
3959: if ((tpte & PG_TYPE) == PG_OBMEM) {
3960: struct pvlist *pv;
3961:
3962: pv = pvhead(tpte & PG_PFNUM);
3963: if (pv) {
3964: s = splvm();
3965: pv_unlink4_4c(pv, pm, va);
3966: splx(s);
3967: }
3968: }
3969: nleft--;
3970: *pte = 0;
3971: }
3972: if ((sp->sg_npte = nleft) == 0) {
3973: free(pte0, M_VMPMAP);
3974: sp->sg_pte = NULL;
3975: if (--rp->rg_nsegmap == 0) {
3976: free(rp->rg_segmap, M_VMPMAP);
3977: rp->rg_segmap = NULL;
3978: #if defined(SUN4_MMU3L)
3979: if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
3980: if (pm->pm_ctx) {
3981: setcontext4(pm->pm_ctxnum);
3982: setregmap(va, reginval);
3983: } else
3984: setcontext4(0);
3985: region_free(pm, rp->rg_smeg);
3986: }
3987: #endif
3988: }
3989: }
3990: return;
3991: }
3992:
3993: /*
3994: * PTEs are in MMU. Invalidate in hardware, update ref &
3995: * mod bits, and flush cache if required.
3996: */
3997: if (CTX_USABLE(pm,rp)) {
3998: /* process has a context, must flush cache */
3999: setcontext4(pm->pm_ctxnum);
4000: npg = (endva - va) >> PGSHIFT;
4001: if (npg > PMAP_RMU_MAGIC) {
4002: perpage = 0; /* flush the whole segment */
4003: cache_flush_segment(vr, vs);
4004: } else
4005: perpage = (CACHEINFO.c_vactype != VAC_NONE);
4006: pteva = va;
4007: } else {
4008: /* no context, use context 0; cache flush unnecessary */
4009: setcontext4(0);
4010: if (HASSUN4_MMU3L)
4011: setregmap(0, tregion);
4012: /* XXX use per-cpu pteva? */
4013: setsegmap(0, pmeg);
4014: pteva = VA_VPG(va) << PGSHIFT;
4015: perpage = 0;
4016: }
4017: for (; va < endva; pteva += NBPG, va += NBPG) {
4018: tpte = getpte4(pteva);
4019: if ((tpte & PG_V) == 0)
4020: continue;
4021: if ((tpte & PG_TYPE) == PG_OBMEM) {
4022: /* if cacheable, flush page as needed */
4023: if (perpage && (tpte & PG_NC) == 0)
4024: cache_flush_page(va);
4025: pv = pvhead(tpte & PG_PFNUM);
4026: if (pv) {
4027: pv->pv_flags |= MR4_4C(tpte);
4028: s = splvm();
4029: pv_unlink4_4c(pv, pm, va);
4030: splx(s);
4031: }
4032: }
4033: nleft--;
4034: setpte4(pteva, 0);
4035: pte0[VA_VPG(pteva)] = 0;
4036: }
4037:
4038: /*
4039: * If the segment is all gone, and the context is loaded, give
4040: * the segment back.
4041: */
4042: if ((sp->sg_npte = nleft) == 0 /* ??? && pm->pm_ctx != NULL*/) {
4043: #ifdef DEBUG
4044: if (pm->pm_ctx == NULL) {
4045: printf("pmap_rmu: no context here...");
4046: }
4047: #endif
4048: va = VSTOVA(vr,vs); /* retract */
4049: if (CTX_USABLE(pm,rp))
4050: setsegmap(va, seginval);
4051: else if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
4052: /* note: context already set earlier */
4053: setregmap(0, rp->rg_smeg);
4054: setsegmap(vs << SGSHIFT, seginval);
4055: }
4056: free(pte0, M_VMPMAP);
4057: sp->sg_pte = NULL;
4058: me_free(pm, pmeg);
4059:
4060: if (--rp->rg_nsegmap == 0) {
4061: free(rp->rg_segmap, M_VMPMAP);
4062: rp->rg_segmap = NULL;
4063: GAP_WIDEN(pm,vr);
4064:
4065: #if defined(SUN4_MMU3L)
4066: if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
4067: /* note: context already set */
4068: if (pm->pm_ctx)
4069: setregmap(va, reginval);
4070: region_free(pm, rp->rg_smeg);
4071: }
4072: #endif
4073: }
4074:
4075: }
4076: }
4077:
4078: #endif /* sun4,4c */
4079:
4080: #if defined(SUN4M) /* 4M version of pmap_rmu */
4081: /* remove from user */
4082: void
4083: pmap_rmu4m(pm, va, endva, vr, vs)
4084: struct pmap *pm;
4085: vaddr_t va, endva;
4086: int vr, vs;
4087: {
4088: int *pte0, perpage, npg;
4089: struct pvlist *pv;
4090: int nleft;
4091: struct regmap *rp;
4092: struct segmap *sp;
4093:
4094: rp = &pm->pm_regmap[vr];
4095: if (rp->rg_nsegmap == 0)
4096: return;
4097: if (rp->rg_segmap == NULL)
4098: panic("pmap_rmu: no segments");
4099:
4100: sp = &rp->rg_segmap[vs];
4101: if ((nleft = sp->sg_npte) == 0)
4102: return;
4103:
4104: if (sp->sg_pte == NULL)
4105: panic("pmap_rmu: no pages");
4106:
4107: pte0 = sp->sg_pte;
4108:
4109: /*
4110: * Invalidate PTE in MMU pagetables. Flush cache if necessary.
4111: */
4112: if (pm->pm_ctx) {
4113: /* process has a context, must flush cache */
4114: setcontext4m(pm->pm_ctxnum);
4115: if (CACHEINFO.c_vactype != VAC_NONE) {
4116: npg = (endva - va) >> PGSHIFT;
4117: if (npg > PMAP_RMU_MAGIC) {
4118: perpage = 0; /* flush the whole segment */
4119: cache_flush_segment(vr, vs);
4120: } else
4121: perpage = 1;
4122: } else
4123: perpage = 0;
4124: } else {
4125: /* no context; cache flush unnecessary */
4126: perpage = 0;
4127: }
4128: for (; va < endva; va += NBPG) {
4129:
4130: int tpte = pte0[VA_SUN4M_VPG(va)];
4131:
4132: if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
4133: #ifdef DEBUG
4134: if ((pmapdebug & PDB_SANITYCHK) &&
4135: pm->pm_ctx &&
4136: (getpte4m(va) & SRMMU_TEPTE) == SRMMU_TEPTE)
4137: panic("pmap_rmu: Spurious uTLB entry for 0x%lx",
4138: va);
4139: #endif
4140: continue;
4141: }
4142:
4143: if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
4144: /* if cacheable, flush page as needed */
4145: if (perpage && (tpte & SRMMU_PG_C))
4146: cache_flush_page(va);
4147: pv = pvhead((tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
4148: if (pv) {
4149: pv->pv_flags |= MR4M(tpte);
4150: pv_unlink4m(pv, pm, va);
4151: }
4152: }
4153: nleft--;
4154: if (pm->pm_ctx)
4155: tlb_flush_page(va);
4156: setpgt4m(&pte0[VA_SUN4M_VPG(va)], SRMMU_TEINVALID);
4157: }
4158:
4159: /*
4160: * If the segment is all gone, and the context is loaded, give
4161: * the segment back.
4162: */
4163: if ((sp->sg_npte = nleft) == 0) {
4164: #ifdef DEBUG
4165: if (pm->pm_ctx == NULL) {
4166: printf("pmap_rmu: no context here...");
4167: }
4168: #endif
4169: va = VSTOVA(vr,vs); /* retract */
4170:
4171: if (pm->pm_ctx)
4172: tlb_flush_segment(vr, vs); /* Paranoia? */
4173: setpgt4m(&rp->rg_seg_ptps[vs], SRMMU_TEINVALID);
4174: pool_put(&L23_pool, pte0);
4175: sp->sg_pte = NULL;
4176:
4177: if (--rp->rg_nsegmap == 0) {
4178: if (pm->pm_ctx)
4179: tlb_flush_context(); /* Paranoia? */
4180: setpgt4m(&pm->pm_reg_ptps[vr], SRMMU_TEINVALID);
4181: free(rp->rg_segmap, M_VMPMAP);
4182: rp->rg_segmap = NULL;
4183: pool_put(&L23_pool, rp->rg_seg_ptps);
4184: }
4185: }
4186: }
4187: #endif /* sun4m */
4188:
4189: /*
4190: * Lower (make more strict) the protection on the specified
4191: * physical page.
4192: *
4193: * There are only two cases: either the protection is going to 0
4194: * (in which case we do the dirty work here), or it is going from
4195: * to read-only (in which case pv_changepte does the trick).
4196: */
4197:
4198: #if defined(SUN4) || defined(SUN4C)
4199: void
4200: pmap_page_protect4_4c(struct vm_page *pg, vm_prot_t prot)
4201: {
4202: struct pvlist *pv, *pv0, *npv;
4203: struct pmap *pm;
4204: int va, vr, vs, pteva, tpte;
4205: int flags, nleft, i, s, ctx;
4206: struct regmap *rp;
4207: struct segmap *sp;
4208:
4209: #ifdef DEBUG
4210: if ((pmapdebug & PDB_CHANGEPROT) ||
4211: (pmapdebug & PDB_REMOVE && prot == VM_PROT_NONE))
4212: printf("pmap_page_protect(0x%lx, 0x%x)\n", pg, prot);
4213: #endif
4214: pv = &pg->mdpage.pv_head;
4215: /*
4216: * Skip operations that do not take away write permission.
4217: */
4218: if (prot & VM_PROT_WRITE)
4219: return;
4220: write_user_windows(); /* paranoia */
4221: if (prot & VM_PROT_READ) {
4222: pv_changepte4_4c(pv, 0, PG_W);
4223: return;
4224: }
4225:
4226: /*
4227: * Remove all access to all people talking to this page.
4228: * Walk down PV list, removing all mappings.
4229: * The logic is much like that for pmap_remove,
4230: * but we know we are removing exactly one page.
4231: */
4232: s = splvm();
4233: if ((pm = pv->pv_pmap) == NULL) {
4234: splx(s);
4235: return;
4236: }
4237: ctx = getcontext4();
4238: pv0 = pv;
4239: flags = pv->pv_flags & ~PV_NC;
4240: while (pv != NULL) {
4241: pm = pv->pv_pmap;
4242: va = pv->pv_va;
4243: vr = VA_VREG(va);
4244: vs = VA_VSEG(va);
4245: rp = &pm->pm_regmap[vr];
4246: #ifdef DIAGNOSTIC
4247: if (rp->rg_nsegmap == 0)
4248: panic("pmap_remove_all: empty vreg");
4249: #endif
4250: sp = &rp->rg_segmap[vs];
4251: #ifdef DIAGNOSTIC
4252: if (sp->sg_npte == 0)
4253: panic("pmap_remove_all: empty vseg");
4254: #endif
4255: nleft = --sp->sg_npte;
4256:
4257: if (sp->sg_pmeg == seginval) {
4258: /* Definitely not a kernel map */
4259: if (nleft) {
4260: sp->sg_pte[VA_VPG(va)] = 0;
4261: } else {
4262: free(sp->sg_pte, M_VMPMAP);
4263: sp->sg_pte = NULL;
4264: if (--rp->rg_nsegmap == 0) {
4265: free(rp->rg_segmap, M_VMPMAP);
4266: rp->rg_segmap = NULL;
4267: GAP_WIDEN(pm,vr);
4268: #if defined(SUN4_MMU3L)
4269: if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
4270: if (pm->pm_ctx) {
4271: setcontext4(pm->pm_ctxnum);
4272: setregmap(va, reginval);
4273: } else
4274: setcontext4(0);
4275: region_free(pm, rp->rg_smeg);
4276: }
4277: #endif
4278: }
4279: }
4280: goto nextpv;
4281: }
4282:
4283: if (CTX_USABLE(pm,rp)) {
4284: setcontext4(pm->pm_ctxnum);
4285: pteva = va;
4286: cache_flush_page(va);
4287: } else {
4288: setcontext4(0);
4289: /* XXX use per-cpu pteva? */
4290: if (HASSUN4_MMU3L)
4291: setregmap(0, tregion);
4292: setsegmap(0, sp->sg_pmeg);
4293: pteva = VA_VPG(va) << PGSHIFT;
4294: }
4295:
4296: tpte = getpte4(pteva);
4297: #ifdef DIAGNOSTIC
4298: if ((tpte & PG_V) == 0)
4299: panic("pmap_page_protect !PG_V: ctx %d, va 0x%x, pte 0x%x",
4300: pm->pm_ctxnum, va, tpte);
4301: #endif
4302: flags |= MR4_4C(tpte);
4303:
4304: if (nleft) {
4305: setpte4(pteva, 0);
4306: if (sp->sg_pte != NULL)
4307: sp->sg_pte[VA_VPG(pteva)] = 0;
4308: goto nextpv;
4309: }
4310:
4311: /* Entire segment is gone */
4312: if (pm == pmap_kernel()) {
4313: #if defined(SUN4_MMU3L)
4314: if (!HASSUN4_MMU3L)
4315: #endif
4316: for (i = ncontext; --i >= 0;) {
4317: setcontext4(i);
4318: setsegmap(va, seginval);
4319: }
4320: me_free(pm, sp->sg_pmeg);
4321: if (--rp->rg_nsegmap == 0) {
4322: #if defined(SUN4_MMU3L)
4323: if (HASSUN4_MMU3L) {
4324: for (i = ncontext; --i >= 0;) {
4325: setcontext4(i);
4326: setregmap(va, reginval);
4327: }
4328: region_free(pm, rp->rg_smeg);
4329: }
4330: #endif
4331: }
4332: } else {
4333: if (CTX_USABLE(pm,rp))
4334: /* `pteva'; we might be using tregion */
4335: setsegmap(pteva, seginval);
4336: #if defined(SUN4_MMU3L)
4337: else if (HASSUN4_MMU3L &&
4338: rp->rg_smeg != reginval) {
4339: /* note: context already set earlier */
4340: setregmap(0, rp->rg_smeg);
4341: setsegmap(vs << SGSHIFT, seginval);
4342: }
4343: #endif
4344: free(sp->sg_pte, M_VMPMAP);
4345: sp->sg_pte = NULL;
4346: me_free(pm, sp->sg_pmeg);
4347:
4348: if (--rp->rg_nsegmap == 0) {
4349: #if defined(SUN4_MMU3L)
4350: if (HASSUN4_MMU3L &&
4351: rp->rg_smeg != reginval) {
4352: if (pm->pm_ctx)
4353: setregmap(va, reginval);
4354: region_free(pm, rp->rg_smeg);
4355: }
4356: #endif
4357: free(rp->rg_segmap, M_VMPMAP);
4358: rp->rg_segmap = NULL;
4359: GAP_WIDEN(pm,vr);
4360: }
4361: }
4362:
4363: nextpv:
4364: npv = pv->pv_next;
4365: if (pv != pv0)
4366: pool_put(&pvpool, pv);
4367: pv = npv;
4368: }
4369: pv0->pv_pmap = NULL;
4370: pv0->pv_next = NULL;
4371: pv0->pv_flags = flags;
4372: setcontext4(ctx);
4373: splx(s);
4374: }
4375:
4376: /*
4377: * Lower (make more strict) the protection on the specified
4378: * range of this pmap.
4379: *
4380: * There are only two cases: either the protection is going to 0
4381: * (in which case we call pmap_remove to do the dirty work), or
4382: * it is going from read/write to read-only. The latter is
4383: * fairly easy.
4384: */
4385: void
4386: pmap_protect4_4c(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
4387: {
4388: int va, nva, vr, vs;
4389: int s, ctx;
4390: struct regmap *rp;
4391: struct segmap *sp;
4392:
4393: if (pm == NULL || prot & VM_PROT_WRITE)
4394: return;
4395:
4396: if ((prot & VM_PROT_READ) == 0) {
4397: pmap_remove(pm, sva, eva);
4398: return;
4399: }
4400:
4401: write_user_windows();
4402: ctx = getcontext4();
4403: s = splvm();
4404: simple_lock(&pm->pm_lock);
4405:
4406: for (va = sva; va < eva;) {
4407: vr = VA_VREG(va);
4408: vs = VA_VSEG(va);
4409: rp = &pm->pm_regmap[vr];
4410: nva = VSTOVA(vr,vs + 1);
4411: if (nva == 0) panic("pmap_protect: last segment"); /* cannot happen */
4412: if (nva > eva)
4413: nva = eva;
4414: if (rp->rg_nsegmap == 0) {
4415: va = nva;
4416: continue;
4417: }
4418: #ifdef DEBUG
4419: if (rp->rg_segmap == NULL)
4420: panic("pmap_protect: no segments");
4421: #endif
4422: sp = &rp->rg_segmap[vs];
4423: if (sp->sg_npte == 0) {
4424: va = nva;
4425: continue;
4426: }
4427: #ifdef DEBUG
4428: if (sp->sg_pte == NULL)
4429: panic("pmap_protect: no pages");
4430: #endif
4431: if (sp->sg_pmeg == seginval) {
4432: int *pte = &sp->sg_pte[VA_VPG(va)];
4433:
4434: /* not in MMU; just clear PG_W from core copies */
4435: for (; va < nva; va += NBPG)
4436: *pte++ &= ~PG_W;
4437: } else {
4438: /* in MMU: take away write bits from MMU PTEs */
4439: if (CTX_USABLE(pm,rp)) {
4440: int tpte;
4441:
4442: /*
4443: * Flush cache so that any existing cache
4444: * tags are updated. This is really only
4445: * needed for PTEs that lose PG_W.
4446: */
4447: setcontext4(pm->pm_ctxnum);
4448: for (; va < nva; va += NBPG) {
4449: tpte = getpte4(va);
4450: pmap_stats.ps_npg_prot_all++;
4451: if ((tpte & (PG_W|PG_TYPE)) ==
4452: (PG_W|PG_OBMEM)) {
4453: pmap_stats.ps_npg_prot_actual++;
4454: cache_flush_page(va);
4455: setpte4(va, tpte & ~PG_W);
4456: }
4457: }
4458: } else {
4459: int pteva;
4460:
4461: /*
4462: * No context, hence not cached;
4463: * just update PTEs.
4464: */
4465: setcontext4(0);
4466: /* XXX use per-cpu pteva? */
4467: if (HASSUN4_MMU3L)
4468: setregmap(0, tregion);
4469: setsegmap(0, sp->sg_pmeg);
4470: pteva = VA_VPG(va) << PGSHIFT;
4471: for (; va < nva; pteva += NBPG, va += NBPG)
4472: setpte4(pteva, getpte4(pteva) & ~PG_W);
4473: }
4474: }
4475: }
4476: simple_unlock(&pm->pm_lock);
4477: splx(s);
4478: setcontext4(ctx);
4479: }
4480:
4481: /*
4482: * Change the protection and/or wired status of the given (MI) virtual page.
4483: * XXX: should have separate function (or flag) telling whether only wiring
4484: * is changing.
4485: */
4486: void
4487: pmap_changeprot4_4c(pm, va, prot, wired)
4488: struct pmap *pm;
4489: vaddr_t va;
4490: vm_prot_t prot;
4491: int wired;
4492: {
4493: int vr, vs, tpte, newprot, ctx, s;
4494: struct regmap *rp;
4495: struct segmap *sp;
4496:
4497: #ifdef DEBUG
4498: if (pmapdebug & PDB_CHANGEPROT)
4499: printf("pmap_changeprot(%p, 0x%lx, 0x%x, 0x%x)\n",
4500: pm, va, prot, wired);
4501: #endif
4502:
4503: write_user_windows(); /* paranoia */
4504:
4505: va = trunc_page(va);
4506: if (pm == pmap_kernel())
4507: newprot = prot & VM_PROT_WRITE ? PG_S|PG_W : PG_S;
4508: else
4509: newprot = prot & VM_PROT_WRITE ? PG_W : 0;
4510: vr = VA_VREG(va);
4511: vs = VA_VSEG(va);
4512: s = splvm(); /* conservative */
4513: rp = &pm->pm_regmap[vr];
4514: if (rp->rg_nsegmap == 0) {
4515: printf("pmap_changeprot: no segments in %d\n", vr);
4516: splx(s);
4517: return;
4518: }
4519: if (rp->rg_segmap == NULL) {
4520: printf("pmap_changeprot: no segments in %d!\n", vr);
4521: splx(s);
4522: return;
4523: }
4524: sp = &rp->rg_segmap[vs];
4525:
4526: pmap_stats.ps_changeprots++;
4527:
4528: #ifdef DEBUG
4529: if (pm != pmap_kernel() && sp->sg_pte == NULL)
4530: panic("pmap_changeprot: no pages");
4531: #endif
4532:
4533: /* update PTEs in software or hardware */
4534: if (sp->sg_pmeg == seginval) {
4535: int *pte = &sp->sg_pte[VA_VPG(va)];
4536:
4537: /* update in software */
4538: if ((*pte & PG_PROT) == newprot)
4539: goto useless;
4540: *pte = (*pte & ~PG_PROT) | newprot;
4541: } else {
4542: /* update in hardware */
4543: ctx = getcontext4();
4544: if (CTX_USABLE(pm,rp)) {
4545: /*
4546: * Use current context.
4547: * Flush cache if page has been referenced to
4548: * avoid stale protection bits in the cache tags.
4549: */
4550: setcontext4(pm->pm_ctxnum);
4551: tpte = getpte4(va);
4552: if ((tpte & PG_PROT) == newprot) {
4553: setcontext4(ctx);
4554: goto useless;
4555: }
4556: if ((tpte & (PG_U|PG_NC|PG_TYPE)) == (PG_U|PG_OBMEM))
4557: cache_flush_page((int)va);
4558: } else {
4559: setcontext4(0);
4560: /* XXX use per-cpu va? */
4561: if (HASSUN4_MMU3L)
4562: setregmap(0, tregion);
4563: setsegmap(0, sp->sg_pmeg);
4564: va = VA_VPG(va) << PGSHIFT;
4565: tpte = getpte4(va);
4566: if ((tpte & PG_PROT) == newprot) {
4567: setcontext4(ctx);
4568: goto useless;
4569: }
4570: }
4571: tpte = (tpte & ~PG_PROT) | newprot;
4572: setpte4(va, tpte);
4573: setcontext4(ctx);
4574: }
4575: splx(s);
4576: return;
4577:
4578: useless:
4579: /* only wiring changed, and we ignore wiring */
4580: pmap_stats.ps_useless_changeprots++;
4581: splx(s);
4582: }
4583:
4584: #endif /* sun4, 4c */
4585:
4586: #if defined(SUN4M) /* 4M version of protection routines above */
4587: /*
4588: * Lower (make more strict) the protection on the specified
4589: * physical page.
4590: *
4591: * There are only two cases: either the protection is going to 0
4592: * (in which case we do the dirty work here), or it is going
4593: * to read-only (in which case pv_changepte does the trick).
4594: */
4595: void
4596: pmap_page_protect4m(struct vm_page *pg, vm_prot_t prot)
4597: {
4598: struct pvlist *pv, *pv0, *npv;
4599: struct pmap *pm;
4600: int va, vr, vs, tpte;
4601: int flags, s, ctx;
4602: struct regmap *rp;
4603: struct segmap *sp;
4604:
4605: #ifdef DEBUG
4606: if ((pmapdebug & PDB_CHANGEPROT) ||
4607: (pmapdebug & PDB_REMOVE && prot == VM_PROT_NONE))
4608: printf("pmap_page_protect(0x%lx, 0x%x)\n", pg, prot);
4609: #endif
4610: pv = &pg->mdpage.pv_head;
4611: /*
4612: * Skip operations that do not take away write permission.
4613: */
4614: if (prot & VM_PROT_WRITE)
4615: return;
4616: write_user_windows(); /* paranoia */
4617: if (prot & VM_PROT_READ) {
4618: pv_changepte4m(pv, 0, PPROT_WRITE);
4619: return;
4620: }
4621:
4622: /*
4623: * Remove all access to all people talking to this page.
4624: * Walk down PV list, removing all mappings.
4625: * The logic is much like that for pmap_remove,
4626: * but we know we are removing exactly one page.
4627: */
4628: s = splvm();
4629: if ((pm = pv->pv_pmap) == NULL) {
4630: splx(s);
4631: return;
4632: }
4633: ctx = getcontext4m();
4634: pv0 = pv;
4635: flags = pv->pv_flags;
4636: while (pv != NULL) {
4637: pm = pv->pv_pmap;
4638: va = pv->pv_va;
4639: vr = VA_VREG(va);
4640: vs = VA_VSEG(va);
4641: rp = &pm->pm_regmap[vr];
4642: #ifdef DIAGNOSTIC
4643: if (rp->rg_nsegmap == 0)
4644: panic("pmap_page_protect4m: empty vreg");
4645: #endif
4646: sp = &rp->rg_segmap[vs];
4647: #ifdef DIAGNOSTIC
4648: if (sp->sg_npte == 0)
4649: panic("pmap_page_protect4m: empty vseg");
4650: #endif
4651: sp->sg_npte--;
4652:
4653: /* Invalidate PTE in pagetables. Flush cache if necessary */
4654: if (pm->pm_ctx) {
4655: setcontext4m(pm->pm_ctxnum);
4656: cache_flush_page(va);
4657: tlb_flush_page(va);
4658: }
4659:
4660: tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
4661:
4662: #ifdef DIAGNOSTIC
4663: if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE)
4664: panic("pmap_page_protect4m: !TEPTE");
4665: #endif
4666:
4667: flags |= MR4M(tpte);
4668:
4669: setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], SRMMU_TEINVALID);
4670:
4671: /* Entire segment is gone */
4672: if (sp->sg_npte == 0 && pm != pmap_kernel()) {
4673: if (pm->pm_ctx)
4674: tlb_flush_segment(vr, vs);
4675: setpgt4m(&rp->rg_seg_ptps[vs], SRMMU_TEINVALID);
4676: pool_put(&L23_pool, sp->sg_pte);
4677: sp->sg_pte = NULL;
4678:
4679: if (--rp->rg_nsegmap == 0) {
4680: if (pm->pm_ctx)
4681: tlb_flush_context();
4682: setpgt4m(&pm->pm_reg_ptps[vr], SRMMU_TEINVALID);
4683: free(rp->rg_segmap, M_VMPMAP);
4684: rp->rg_segmap = NULL;
4685: pool_put(&L23_pool, rp->rg_seg_ptps);
4686: }
4687: }
4688:
4689: npv = pv->pv_next;
4690: if (pv != pv0)
4691: pool_put(&pvpool, pv);
4692: pv = npv;
4693: }
4694: pv0->pv_pmap = NULL;
4695: pv0->pv_next = NULL;
4696: pv0->pv_flags = (flags | PV_C4M) & ~PV_ANC;
4697: setcontext4m(ctx);
4698: splx(s);
4699: }
4700:
4701: /*
4702: * Lower (make more strict) the protection on the specified
4703: * range of this pmap.
4704: *
4705: * There are only two cases: either the protection is going to 0
4706: * (in which case we call pmap_remove to do the dirty work), or
4707: * it is going from read/write to read-only. The latter is
4708: * fairly easy.
4709: */
4710: void
4711: pmap_protect4m(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
4712: {
4713: int va, nva, vr, vs;
4714: int s, ctx;
4715: struct regmap *rp;
4716: struct segmap *sp;
4717: int newprot;
4718:
4719: if ((prot & VM_PROT_READ) == 0) {
4720: pmap_remove(pm, sva, eva);
4721: return;
4722: }
4723:
4724: /*
4725: * Since the caller might request either a removal of PROT_EXECUTE
4726: * or PROT_WRITE, we don't attempt to guess what to do, just lower
4727: * to read-only and let the real protection be faulted in.
4728: */
4729: newprot = pte_prot4m(pm, VM_PROT_READ);
4730:
4731: write_user_windows();
4732: ctx = getcontext4m();
4733: s = splvm();
4734: simple_lock(&pm->pm_lock);
4735:
4736: for (va = sva; va < eva;) {
4737: vr = VA_VREG(va);
4738: vs = VA_VSEG(va);
4739: rp = &pm->pm_regmap[vr];
4740: nva = VSTOVA(vr,vs + 1);
4741: if (nva == 0) /* XXX */
4742: panic("pmap_protect: last segment"); /* cannot happen(why?)*/
4743: if (nva > eva)
4744: nva = eva;
4745: if (rp->rg_nsegmap == 0) {
4746: va = nva;
4747: continue;
4748: }
4749: #ifdef DEBUG
4750: if (rp->rg_segmap == NULL)
4751: panic("pmap_protect: no segments");
4752: #endif
4753: sp = &rp->rg_segmap[vs];
4754: if (sp->sg_npte == 0) {
4755: va = nva;
4756: continue;
4757: }
4758: #ifdef DEBUG
4759: if (sp->sg_pte == NULL)
4760: panic("pmap_protect: no pages");
4761: #endif
4762: /* pages loaded: take away write bits from MMU PTEs */
4763: if (pm->pm_ctx)
4764: setcontext4m(pm->pm_ctxnum);
4765:
4766: pmap_stats.ps_npg_prot_all += (nva - va) >> PGSHIFT;
4767: for (; va < nva; va += NBPG) {
4768: int tpte, npte;
4769:
4770: tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
4771: npte = (tpte & ~SRMMU_PROT_MASK) | newprot;
4772:
4773: /* Only do work when needed. */
4774: if (npte == tpte)
4775: continue;
4776:
4777: pmap_stats.ps_npg_prot_actual++;
4778: /*
4779: * Flush cache so that any existing cache
4780: * tags are updated.
4781: */
4782: if (pm->pm_ctx) {
4783: if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
4784: cache_flush_page(va);
4785: }
4786: tlb_flush_page(va);
4787: }
4788: setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], npte);
4789: }
4790: }
4791: simple_unlock(&pm->pm_lock);
4792: splx(s);
4793: setcontext4m(ctx);
4794: }
4795:
4796: /*
4797: * Change the protection and/or wired status of the given (MI) virtual page.
4798: * XXX: should have separate function (or flag) telling whether only wiring
4799: * is changing.
4800: */
4801: void
4802: pmap_changeprot4m(pm, va, prot, wired)
4803: struct pmap *pm;
4804: vaddr_t va;
4805: vm_prot_t prot;
4806: int wired;
4807: {
4808: int tpte, newprot, ctx, s;
4809: int *ptep;
4810:
4811: #ifdef DEBUG
4812: if (pmapdebug & PDB_CHANGEPROT)
4813: printf("pmap_changeprot(%p, 0x%lx, 0x%x, 0x%x)\n",
4814: pm, va, prot, wired);
4815: #endif
4816:
4817: write_user_windows(); /* paranoia */
4818:
4819: va = trunc_page(va);
4820: newprot = pte_prot4m(pm, prot);
4821:
4822: pmap_stats.ps_changeprots++;
4823:
4824: s = splvm(); /* conservative */
4825: ptep = getptep4m(pm, va);
4826: if (pm->pm_ctx) {
4827: ctx = getcontext4m();
4828: setcontext4m(pm->pm_ctxnum);
4829: /*
4830: * Use current context.
4831: * Flush cache if page has been referenced to
4832: * avoid stale protection bits in the cache tags.
4833: */
4834: tpte = *ptep;
4835: if ((tpte & (SRMMU_PG_C|SRMMU_PGTYPE)) ==
4836: (SRMMU_PG_C|PG_SUN4M_OBMEM))
4837: cache_flush_page(va);
4838: tlb_flush_page(va);
4839: setcontext4m(ctx);
4840: } else {
4841: tpte = *ptep;
4842: }
4843: if ((tpte & SRMMU_PROT_MASK) == newprot) {
4844: /* only wiring changed, and we ignore wiring */
4845: pmap_stats.ps_useless_changeprots++;
4846: goto out;
4847: }
4848: setpgt4m(ptep, (tpte & ~SRMMU_PROT_MASK) | newprot);
4849:
4850: out:
4851: splx(s);
4852: }
4853: #endif /* 4m */
4854:
4855: /*
4856: * Insert (MI) physical page pa at virtual address va in the given pmap.
4857: * NB: the pa parameter includes type bits PMAP_OBIO, PMAP_NC as necessary.
4858: *
4859: * If pa is not in the `managed' range it will not be `bank mapped'.
4860: * This works during bootstrap only because the first 4MB happens to
4861: * map one-to-one.
4862: *
4863: * There may already be something else there, or we might just be
4864: * changing protections and/or wiring on an existing mapping.
4865: * XXX should have different entry points for changing!
4866: */
4867:
4868: #if defined(SUN4) || defined(SUN4C)
4869:
4870: int
4871: pmap_enter4_4c(pm, va, pa, prot, flags)
4872: struct pmap *pm;
4873: vaddr_t va;
4874: paddr_t pa;
4875: vm_prot_t prot;
4876: int flags;
4877: {
4878: struct pvlist *pv;
4879: int pteproto, ctx;
4880: int ret;
4881:
4882: if (VA_INHOLE(va)) {
4883: #ifdef DEBUG
4884: printf("pmap_enter: pm %p, va 0x%lx, pa 0x%lx: in MMU hole\n",
4885: pm, va, pa);
4886: #endif
4887: return (EINVAL);
4888: }
4889:
4890: #ifdef DEBUG
4891: if (pmapdebug & PDB_ENTER)
4892: printf("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
4893: pm, va, pa, prot, flags);
4894: #endif
4895:
4896: pteproto = PG_V | PMAP_T2PTE_4(pa);
4897: pa &= ~PMAP_TNC_4;
4898: /*
4899: * Set up prototype for new PTE. Cannot set PG_NC from PV_NC yet
4900: * since the pvlist no-cache bit might change as a result of the
4901: * new mapping.
4902: */
4903: if ((pteproto & PG_TYPE) == PG_OBMEM)
4904: pv = pvhead(atop(pa));
4905: else
4906: pv = NULL;
4907:
4908: pteproto |= atop(pa) & PG_PFNUM;
4909: if (prot & VM_PROT_WRITE)
4910: pteproto |= PG_W;
4911:
4912: ctx = getcontext4();
4913: if (pm == pmap_kernel())
4914: ret = pmap_enk4_4c(pm, va, prot, flags, pv, pteproto | PG_S);
4915: else
4916: ret = pmap_enu4_4c(pm, va, prot, flags, pv, pteproto);
4917: #ifdef DIAGNOSTIC
4918: if ((flags & PMAP_CANFAIL) == 0 && ret != 0)
4919: panic("pmap_enter4_4c: can't fail, but did");
4920: #endif
4921: setcontext4(ctx);
4922:
4923: return (ret);
4924: }
4925:
4926: /* enter new (or change existing) kernel mapping */
4927: int
4928: pmap_enk4_4c(pm, va, prot, flags, pv, pteproto)
4929: struct pmap *pm;
4930: vaddr_t va;
4931: vm_prot_t prot;
4932: int flags;
4933: struct pvlist *pv;
4934: int pteproto;
4935: {
4936: int vr, vs, tpte, i, s;
4937: struct regmap *rp;
4938: struct segmap *sp;
4939: int wired = (flags & PMAP_WIRED) != 0;
4940:
4941: vr = VA_VREG(va);
4942: vs = VA_VSEG(va);
4943: rp = &pm->pm_regmap[vr];
4944: sp = &rp->rg_segmap[vs];
4945: s = splvm(); /* XXX way too conservative */
4946:
4947: #if defined(SUN4_MMU3L)
4948: if (HASSUN4_MMU3L && rp->rg_smeg == reginval) {
4949: vaddr_t tva;
4950: rp->rg_smeg = region_alloc(®ion_locked, pm, vr)->me_cookie;
4951: i = ncontext - 1;
4952: do {
4953: setcontext4(i);
4954: setregmap(va, rp->rg_smeg);
4955: } while (--i >= 0);
4956:
4957: /* set all PTEs to invalid, then overwrite one PTE below */
4958: tva = VA_ROUNDDOWNTOREG(va);
4959: for (i = 0; i < NSEGRG; i++) {
4960: setsegmap(tva, rp->rg_segmap[i].sg_pmeg);
4961: tva += NBPSG;
4962: };
4963: }
4964: #endif
4965: if (sp->sg_pmeg != seginval && (tpte = getpte4(va)) & PG_V) {
4966: /* old mapping exists, and is of the same pa type */
4967: if ((tpte & (PG_PFNUM|PG_TYPE)) ==
4968: (pteproto & (PG_PFNUM|PG_TYPE))) {
4969: /* just changing protection and/or wiring */
4970: splx(s);
4971: pmap_changeprot4_4c(pm, va, prot, wired);
4972: return (0);
4973: }
4974:
4975: if ((tpte & PG_TYPE) == PG_OBMEM) {
4976: struct pvlist *pv1;
4977:
4978: /*
4979: * Switcheroo: changing pa for this va.
4980: * If old pa was managed, remove from pvlist.
4981: * If old page was cached, flush cache.
4982: */
4983: pv1 = pvhead(tpte & PG_PFNUM);
4984: if (pv1)
4985: pv_unlink4_4c(pv1, pm, va);
4986: if ((tpte & PG_NC) == 0) {
4987: setcontext4(0); /* ??? */
4988: cache_flush_page((int)va);
4989: }
4990: }
4991: } else {
4992: /* adding new entry */
4993: sp->sg_npte++;
4994: }
4995:
4996: /*
4997: * If the new mapping is for a managed PA, enter into pvlist.
4998: * Note that the mapping for a malloc page will always be
4999: * unique (hence will never cause a second call to malloc).
5000: */
5001: if (pv != NULL)
5002: pteproto |= pv_link4_4c(pv, pm, va, pteproto & PG_NC);
5003:
5004: if (sp->sg_pmeg == seginval) {
5005: int tva;
5006:
5007: /*
5008: * Allocate an MMU entry now (on locked list),
5009: * and map it into every context. Set all its
5010: * PTEs invalid (we will then overwrite one, but
5011: * this is more efficient than looping twice).
5012: */
5013: #ifdef DEBUG
5014: if (pm->pm_ctx == NULL || pm->pm_ctxnum != 0)
5015: panic("pmap_enk: kern seg but no kern ctx");
5016: #endif
5017: sp->sg_pmeg = me_alloc(&segm_locked, pm, vr, vs)->me_cookie;
5018: rp->rg_nsegmap++;
5019:
5020: #if defined(SUN4_MMU3L)
5021: if (HASSUN4_MMU3L)
5022: setsegmap(va, sp->sg_pmeg);
5023: else
5024: #endif
5025: {
5026: i = ncontext - 1;
5027: do {
5028: setcontext4(i);
5029: setsegmap(va, sp->sg_pmeg);
5030: } while (--i >= 0);
5031: }
5032:
5033: /* set all PTEs to invalid, then overwrite one PTE below */
5034: tva = VA_ROUNDDOWNTOSEG(va);
5035: i = NPTESG;
5036: do {
5037: setpte4(tva, 0);
5038: tva += NBPG;
5039: } while (--i > 0);
5040: }
5041:
5042: /* ptes kept in hardware only */
5043: setpte4(va, pteproto);
5044: splx(s);
5045:
5046: return (0);
5047: }
5048:
5049: /* enter new (or change existing) user mapping */
5050: int
5051: pmap_enu4_4c(pm, va, prot, flags, pv, pteproto)
5052: struct pmap *pm;
5053: vaddr_t va;
5054: vm_prot_t prot;
5055: int flags;
5056: struct pvlist *pv;
5057: int pteproto;
5058: {
5059: int vr, vs, *pte, tpte, pmeg, s, doflush;
5060: struct regmap *rp;
5061: struct segmap *sp;
5062: int wired = (flags & PMAP_WIRED) != 0;
5063:
5064: write_user_windows(); /* XXX conservative */
5065: vr = VA_VREG(va);
5066: vs = VA_VSEG(va);
5067: rp = &pm->pm_regmap[vr];
5068: s = splvm(); /* XXX conservative */
5069:
5070: /*
5071: * If there is no space in which the PTEs can be written
5072: * while they are not in the hardware, this must be a new
5073: * virtual segment. Get PTE space and count the segment.
5074: *
5075: * TO SPEED UP CTX ALLOC, PUT SEGMENT BOUNDS STUFF HERE
5076: * AND IN pmap_rmu()
5077: */
5078:
5079: GAP_SHRINK(pm,vr);
5080:
5081: #ifdef DEBUG
5082: if (pm->pm_gap_end < pm->pm_gap_start) {
5083: printf("pmap_enu: gap_start 0x%x, gap_end 0x%x",
5084: pm->pm_gap_start, pm->pm_gap_end);
5085: panic("pmap_enu: gap botch");
5086: }
5087: #endif
5088:
5089: if (rp->rg_segmap == NULL) {
5090: /* definitely a new mapping */
5091: int i;
5092: int size = NSEGRG * sizeof (struct segmap);
5093:
5094: sp = malloc((u_long)size, M_VMPMAP, M_NOWAIT);
5095: if (sp == NULL) {
5096: splx(s);
5097: return (ENOMEM);
5098: }
5099: qzero((caddr_t)sp, size);
5100: rp->rg_segmap = sp;
5101: rp->rg_nsegmap = 0;
5102: for (i = NSEGRG; --i >= 0;)
5103: sp++->sg_pmeg = seginval;
5104: }
5105:
5106: sp = &rp->rg_segmap[vs];
5107: if ((pte = sp->sg_pte) == NULL) {
5108: /* definitely a new mapping */
5109: int size = NPTESG * sizeof *pte;
5110:
5111: pte = malloc((u_long)size, M_VMPMAP, M_NOWAIT);
5112: if (pte == NULL) {
5113: splx(s);
5114: return (ENOMEM);
5115: }
5116: #ifdef DEBUG
5117: if (sp->sg_pmeg != seginval)
5118: panic("pmap_enter: new ptes, but not seginval");
5119: #endif
5120: qzero((caddr_t)pte, size);
5121: sp->sg_pte = pte;
5122: sp->sg_npte = 1;
5123: rp->rg_nsegmap++;
5124: } else {
5125: /* might be a change: fetch old pte */
5126: doflush = 0;
5127: if ((pmeg = sp->sg_pmeg) == seginval) {
5128: /* software pte */
5129: tpte = pte[VA_VPG(va)];
5130: } else {
5131: /* hardware pte */
5132: if (CTX_USABLE(pm,rp)) {
5133: setcontext4(pm->pm_ctxnum);
5134: tpte = getpte4(va);
5135: doflush = CACHEINFO.c_vactype != VAC_NONE;
5136: } else {
5137: setcontext4(0);
5138: /* XXX use per-cpu pteva? */
5139: if (HASSUN4_MMU3L)
5140: setregmap(0, tregion);
5141: setsegmap(0, pmeg);
5142: tpte = getpte4(VA_VPG(va) << PGSHIFT);
5143: }
5144: }
5145: if (tpte & PG_V) {
5146: /* old mapping exists, and is of the same pa type */
5147: if ((tpte & (PG_PFNUM|PG_TYPE)) ==
5148: (pteproto & (PG_PFNUM|PG_TYPE))) {
5149: /* just changing prot and/or wiring */
5150: splx(s);
5151: /* caller should call this directly: */
5152: pmap_changeprot4_4c(pm, va, prot, wired);
5153: if (wired)
5154: pm->pm_stats.wired_count++;
5155: else
5156: pm->pm_stats.wired_count--;
5157: return (0);
5158: }
5159: /*
5160: * Switcheroo: changing pa for this va.
5161: * If old pa was managed, remove from pvlist.
5162: * If old page was cached, flush cache.
5163: */
5164: if ((tpte & PG_TYPE) == PG_OBMEM) {
5165: struct pvlist *pv1;
5166:
5167: pv1 = pvhead(tpte & PG_PFNUM);
5168: if (pv1)
5169: pv_unlink4_4c(pv1, pm, va);
5170: if (doflush && (tpte & PG_NC) == 0)
5171: cache_flush_page((int)va);
5172: }
5173: } else {
5174: /* adding new entry */
5175: sp->sg_npte++;
5176:
5177: /*
5178: * Increment counters
5179: */
5180: if (wired)
5181: pm->pm_stats.wired_count++;
5182: }
5183: }
5184:
5185: if (pv != NULL)
5186: pteproto |= pv_link4_4c(pv, pm, va, pteproto & PG_NC);
5187:
5188: /*
5189: * Update hardware & software PTEs.
5190: */
5191: if ((pmeg = sp->sg_pmeg) != seginval) {
5192: /* ptes are in hardware */
5193: if (CTX_USABLE(pm,rp))
5194: setcontext4(pm->pm_ctxnum);
5195: else {
5196: setcontext4(0);
5197: /* XXX use per-cpu pteva? */
5198: if (HASSUN4_MMU3L)
5199: setregmap(0, tregion);
5200: setsegmap(0, pmeg);
5201: va = VA_VPG(va) << PGSHIFT;
5202: }
5203: setpte4(va, pteproto);
5204: }
5205: /* update software copy */
5206: pte += VA_VPG(va);
5207: *pte = pteproto;
5208:
5209: splx(s);
5210:
5211: return (0);
5212: }
5213:
5214: void
5215: pmap_kenter_pa4_4c(va, pa, prot)
5216: vaddr_t va;
5217: paddr_t pa;
5218: vm_prot_t prot;
5219: {
5220: struct pvlist *pv;
5221: int pteproto, ctx;
5222:
5223: pteproto = PG_S | PG_V | PMAP_T2PTE_4(pa);
5224: if (prot & VM_PROT_WRITE)
5225: pteproto |= PG_W;
5226:
5227: pa &= ~PMAP_TNC_4;
5228:
5229: if ((pteproto & PG_TYPE) == PG_OBMEM)
5230: pv = pvhead(atop(pa));
5231: else
5232: pv = NULL;
5233:
5234: pteproto |= atop(pa) & PG_PFNUM;
5235:
5236: ctx = getcontext4();
5237: pmap_enk4_4c(pmap_kernel(), va, prot, PMAP_WIRED, pv, pteproto);
5238: setcontext4(ctx);
5239: }
5240:
5241: #endif /*sun4,4c*/
5242:
5243: #if defined(SUN4M) /* Sun4M versions of enter routines */
5244: /*
5245: * Insert (MI) physical page pa at virtual address va in the given pmap.
5246: * NB: the pa parameter includes type bits PMAP_OBIO, PMAP_NC as necessary.
5247: *
5248: * If pa is not in the `managed' range it will not be `bank mapped'.
5249: * This works during bootstrap only because the first 4MB happens to
5250: * map one-to-one.
5251: *
5252: * There may already be something else there, or we might just be
5253: * changing protections and/or wiring on an existing mapping.
5254: */
5255:
5256: int
5257: pmap_enter4m(pm, va, pa, prot, flags)
5258: struct pmap *pm;
5259: vaddr_t va;
5260: paddr_t pa;
5261: vm_prot_t prot;
5262: int flags;
5263: {
5264: struct pvlist *pv;
5265: int pteproto, ctx;
5266: int ret;
5267:
5268: #ifdef DEBUG
5269: if (pmapdebug & PDB_ENTER)
5270: printf("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
5271: pm, va, pa, prot, flags);
5272: #endif
5273:
5274: /* Initialise pteproto with cache bit */
5275: pteproto = (pa & PMAP_NC) == 0 ? SRMMU_PG_C : 0;
5276:
5277: #ifdef DEBUG
5278: if (pa & PMAP_TYPE_SRMMU) { /* this page goes in an iospace */
5279: if (cpuinfo.cpu_type == CPUTYP_MS1)
5280: panic("pmap_enter4m: attempt to use 36-bit iospace on"
5281: " MicroSPARC");
5282: }
5283: #endif
5284: pteproto |= PMAP_T2PTE_SRMMU(pa);
5285:
5286: pteproto |= SRMMU_TEPTE;
5287:
5288: pa &= ~PMAP_TNC_SRMMU;
5289: /*
5290: * Set up prototype for new PTE. Cannot set PG_NC from PV_NC yet
5291: * since the pvlist no-cache bit might change as a result of the
5292: * new mapping.
5293: */
5294: if ((pteproto & SRMMU_PGTYPE) == PG_SUN4M_OBMEM)
5295: pv = pvhead(atop(pa));
5296: else
5297: pv = NULL;
5298:
5299: pteproto |= (atop(pa) << SRMMU_PPNSHIFT);
5300:
5301: /* correct protections */
5302: pteproto |= pte_prot4m(pm, prot);
5303:
5304: ctx = getcontext4m();
5305: if (pm == pmap_kernel())
5306: ret = pmap_enk4m(pm, va, prot, flags, pv, pteproto);
5307: else
5308: ret = pmap_enu4m(pm, va, prot, flags, pv, pteproto);
5309: #ifdef DIAGNOSTIC
5310: if ((flags & PMAP_CANFAIL) == 0 && ret != 0)
5311: panic("pmap_enter4m: can't fail, but did");
5312: #endif
5313: if (pv) {
5314: if (flags & VM_PROT_WRITE)
5315: pv->pv_flags |= PV_MOD4M;
5316: if (flags & VM_PROT_READ)
5317: pv->pv_flags |= PV_REF4M;
5318: }
5319: setcontext4m(ctx);
5320:
5321: return (ret);
5322: }
5323:
5324: /* enter new (or change existing) kernel mapping */
5325: int
5326: pmap_enk4m(pm, va, prot, flags, pv, pteproto)
5327: struct pmap *pm;
5328: vaddr_t va;
5329: vm_prot_t prot;
5330: int flags;
5331: struct pvlist *pv;
5332: int pteproto;
5333: {
5334: int tpte, s;
5335: struct regmap *rp;
5336: struct segmap *sp;
5337: int wired = (flags & PMAP_WIRED) != 0;
5338:
5339: #ifdef DIAGNOSTIC
5340: if (va < VM_MIN_KERNEL_ADDRESS)
5341: panic("pmap_enk4m: can't enter va 0x%lx below VM_MIN_KERNEL_ADDRESS", va);
5342: #endif
5343: rp = &pm->pm_regmap[VA_VREG(va)];
5344: sp = &rp->rg_segmap[VA_VSEG(va)];
5345:
5346: s = splvm(); /* XXX way too conservative */
5347:
5348: #ifdef DEBUG
5349: if (rp->rg_seg_ptps == NULL) /* enter new region */
5350: panic("pmap_enk4m: missing region table for va 0x%lx", va);
5351: if (sp->sg_pte == NULL) /* If no existing pagetable */
5352: panic("pmap_enk4m: missing segment table for va 0x%lx", va);
5353: #endif
5354:
5355: tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
5356: if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
5357: /* old mapping exists, and is of the same pa type */
5358:
5359: if ((tpte & SRMMU_PPNMASK) == (pteproto & SRMMU_PPNMASK)) {
5360: /* just changing protection and/or wiring */
5361: splx(s);
5362: pmap_changeprot4m(pm, va, prot, wired);
5363: return (0);
5364: }
5365:
5366: if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
5367: struct pvlist *pv1;
5368:
5369: /*
5370: * Switcheroo: changing pa for this va.
5371: * If old pa was managed, remove from pvlist.
5372: * If old page was cached, flush cache.
5373: */
5374: pv1 = pvhead((tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
5375: if (pv1)
5376: pv_unlink4m(pv1, pm, va);
5377: if (tpte & SRMMU_PG_C) {
5378: setcontext4m(0); /* ??? */
5379: cache_flush_page((int)va);
5380: }
5381: }
5382: } else {
5383: /* adding new entry */
5384: sp->sg_npte++;
5385: }
5386:
5387: /*
5388: * If the new mapping is for a managed PA, enter into pvlist.
5389: * Note that the mapping for a malloc page will always be
5390: * unique (hence will never cause a second call to malloc).
5391: */
5392: if (pv != NULL)
5393: pteproto &= ~(pv_link4m(pv, pm, va, (pteproto & SRMMU_PG_C) == 0));
5394:
5395: tlb_flush_page(va);
5396: setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], pteproto);
5397:
5398: splx(s);
5399:
5400: return (0);
5401: }
5402:
5403: /* enter new (or change existing) user mapping */
5404: int
5405: pmap_enu4m(pm, va, prot, flags, pv, pteproto)
5406: struct pmap *pm;
5407: vaddr_t va;
5408: vm_prot_t prot;
5409: int flags;
5410: struct pvlist *pv;
5411: int pteproto;
5412: {
5413: int vr, vs, *pte, tpte, s;
5414: struct regmap *rp;
5415: struct segmap *sp;
5416: int wired = (flags & PMAP_WIRED) != 0;
5417:
5418: #ifdef DEBUG
5419: if (VM_MIN_KERNEL_ADDRESS < va)
5420: panic("pmap_enu4m: can't enter va 0x%lx above VM_MIN_KERNEL_ADDRESS", va);
5421: #endif
5422:
5423: write_user_windows(); /* XXX conservative */
5424: vr = VA_VREG(va);
5425: vs = VA_VSEG(va);
5426: rp = &pm->pm_regmap[vr];
5427: s = splvm(); /* XXX conservative */
5428: if (rp->rg_segmap == NULL) {
5429: /* definitely a new mapping */
5430: int size = NSEGRG * sizeof (struct segmap);
5431:
5432: sp = malloc((u_long)size, M_VMPMAP, M_NOWAIT);
5433: if (sp == NULL) {
5434: splx(s);
5435: return (ENOMEM);
5436: }
5437: qzero((caddr_t)sp, size);
5438: rp->rg_segmap = sp;
5439: rp->rg_nsegmap = 0;
5440: rp->rg_seg_ptps = NULL;
5441: }
5442:
5443: if (rp->rg_seg_ptps == NULL) {
5444: /* Need a segment table */
5445: int i, *ptd;
5446:
5447: ptd = pool_get(&L23_pool, PR_NOWAIT);
5448: if (ptd == NULL) {
5449: splx(s);
5450: return (ENOMEM);
5451: }
5452:
5453: rp->rg_seg_ptps = ptd;
5454: for (i = 0; i < SRMMU_L2SIZE; i++)
5455: setpgt4m(&ptd[i], SRMMU_TEINVALID);
5456: setpgt4m(&pm->pm_reg_ptps[vr],
5457: (VA2PA((caddr_t)ptd) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
5458: }
5459:
5460: sp = &rp->rg_segmap[vs];
5461: if ((pte = sp->sg_pte) == NULL) {
5462: /* definitely a new mapping */
5463: int i;
5464:
5465: pte = pool_get(&L23_pool, PR_NOWAIT);
5466: if (pte == NULL) {
5467: splx(s);
5468: return (ENOMEM);
5469: }
5470:
5471: sp->sg_pte = pte;
5472: sp->sg_npte = 1;
5473: rp->rg_nsegmap++;
5474: for (i = 0; i < SRMMU_L3SIZE; i++)
5475: setpgt4m(&pte[i], SRMMU_TEINVALID);
5476: setpgt4m(&rp->rg_seg_ptps[vs],
5477: (VA2PA((caddr_t)pte) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
5478: } else {
5479: /*
5480: * Might be a change: fetch old pte
5481: * Note we're only interested in the PTE's page frame
5482: * number and type bits, so the memory copy will do.
5483: */
5484: tpte = pte[VA_SUN4M_VPG(va)];
5485:
5486: if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
5487: /* old mapping exists, and is of the same pa type */
5488: if ((tpte & SRMMU_PPNMASK) ==
5489: (pteproto & SRMMU_PPNMASK)) {
5490: /* just changing prot and/or wiring */
5491: splx(s);
5492: /* caller should call this directly: */
5493: pmap_changeprot4m(pm, va, prot, wired);
5494: if (wired)
5495: pm->pm_stats.wired_count++;
5496: else
5497: pm->pm_stats.wired_count--;
5498: return (0);
5499: }
5500: /*
5501: * Switcheroo: changing pa for this va.
5502: * If old pa was managed, remove from pvlist.
5503: * If old page was cached, flush cache.
5504: */
5505: if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
5506: struct pvlist *pv1;
5507:
5508: pv1 = pvhead((tpte & SRMMU_PPNMASK) >>
5509: SRMMU_PPNSHIFT);
5510: if (pv1)
5511: pv_unlink4m(pv1, pm, va);
5512: if (pm->pm_ctx && (tpte & SRMMU_PG_C))
5513: cache_flush_page((int)va);
5514: }
5515: } else {
5516: /* adding new entry */
5517: sp->sg_npte++;
5518:
5519: /*
5520: * Increment counters
5521: */
5522: if (wired)
5523: pm->pm_stats.wired_count++;
5524: }
5525: }
5526: if (pv != NULL)
5527: pteproto &= ~(pv_link4m(pv, pm, va, (pteproto & SRMMU_PG_C) == 0));
5528:
5529: /*
5530: * Update PTEs, flush TLB as necessary.
5531: */
5532: if (pm->pm_ctx) {
5533: setcontext4m(pm->pm_ctxnum);
5534: tlb_flush_page(va);
5535: }
5536: setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], pteproto);
5537:
5538: splx(s);
5539:
5540: return (0);
5541: }
5542:
5543: void
5544: pmap_kenter_pa4m(va, pa, prot)
5545: vaddr_t va;
5546: paddr_t pa;
5547: vm_prot_t prot;
5548: {
5549: struct pvlist *pv;
5550: int pteproto, ctx;
5551:
5552: pteproto = ((pa & PMAP_NC) == 0 ? SRMMU_PG_C : 0) |
5553: PMAP_T2PTE_SRMMU(pa) | SRMMU_TEPTE |
5554: ((prot & VM_PROT_WRITE) ? PPROT_N_RWX : PPROT_N_RX);
5555:
5556: pa &= ~PMAP_TNC_SRMMU;
5557:
5558: pteproto |= atop(pa) << SRMMU_PPNSHIFT;
5559:
5560: pv = pvhead(atop(pa));
5561:
5562: ctx = getcontext4m();
5563: pmap_enk4m(pmap_kernel(), va, prot, PMAP_WIRED, pv, pteproto);
5564: setcontext4m(ctx);
5565: }
5566:
5567: #endif /* sun4m */
5568:
5569: /*
5570: * Change the wiring attribute for a map/virtual-address pair.
5571: */
5572: /* ARGSUSED */
5573: void
5574: pmap_unwire(pm, va)
5575: struct pmap *pm;
5576: vaddr_t va;
5577: {
5578:
5579: pmap_stats.ps_useless_changewire++;
5580: }
5581:
5582: /*
5583: * Extract the physical page address associated
5584: * with the given map/virtual_address pair.
5585: * GRR, the vm code knows; we should not have to do this!
5586: */
5587:
5588: #if defined(SUN4) || defined(SUN4C)
5589: boolean_t
5590: pmap_extract4_4c(pm, va, pa)
5591: struct pmap *pm;
5592: vaddr_t va;
5593: paddr_t *pa;
5594: {
5595: int tpte;
5596: int vr, vs;
5597: struct regmap *rp;
5598: struct segmap *sp;
5599:
5600: if (pm == NULL) {
5601: #ifdef DEBUG
5602: if (pmapdebug & PDB_FOLLOW)
5603: printf("pmap_extract: null pmap\n");
5604: #endif
5605: return (FALSE);
5606: }
5607: vr = VA_VREG(va);
5608: vs = VA_VSEG(va);
5609: rp = &pm->pm_regmap[vr];
5610: if (rp->rg_segmap == NULL) {
5611: #ifdef DEBUG
5612: if (pmapdebug & PDB_FOLLOW)
5613: printf("pmap_extract: invalid segment (%d)\n", vr);
5614: #endif
5615: return (FALSE);
5616: }
5617: sp = &rp->rg_segmap[vs];
5618:
5619: if (sp->sg_pmeg != seginval) {
5620: int ctx = getcontext4();
5621:
5622: if (CTX_USABLE(pm,rp)) {
5623: CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
5624: tpte = getpte4(va);
5625: } else {
5626: CHANGE_CONTEXTS(ctx, 0);
5627: if (HASSUN4_MMU3L)
5628: setregmap(0, tregion);
5629: setsegmap(0, sp->sg_pmeg);
5630: tpte = getpte4(VA_VPG(va) << PGSHIFT);
5631: }
5632: setcontext4(ctx);
5633: } else {
5634: int *pte = sp->sg_pte;
5635:
5636: if (pte == NULL) {
5637: #ifdef DEBUG
5638: if (pmapdebug & PDB_FOLLOW)
5639: printf("pmap_extract: invalid segment\n");
5640: #endif
5641: return (FALSE);
5642: }
5643: tpte = pte[VA_VPG(va)];
5644: }
5645: if ((tpte & PG_V) == 0) {
5646: #ifdef DEBUG
5647: if (pmapdebug & PDB_FOLLOW)
5648: printf("pmap_extract: invalid pte\n");
5649: #endif
5650: return (FALSE);
5651: }
5652: tpte &= PG_PFNUM;
5653: tpte = tpte;
5654: *pa = ((tpte << PGSHIFT) | (va & PGOFSET));
5655: return (TRUE);
5656: }
5657: #endif /*4,4c*/
5658:
5659: #if defined(SUN4M) /* 4m version of pmap_extract */
5660: /*
5661: * Extract the physical page address associated
5662: * with the given map/virtual_address pair.
5663: * GRR, the vm code knows; we should not have to do this!
5664: */
5665: boolean_t
5666: pmap_extract4m(pm, va, pa)
5667: struct pmap *pm;
5668: vaddr_t va;
5669: paddr_t *pa;
5670: {
5671: struct regmap *rm;
5672: struct segmap *sm;
5673: int pte;
5674:
5675: if (pm == NULL) {
5676: #ifdef DEBUG
5677: if (pmapdebug & PDB_FOLLOW)
5678: printf("pmap_extract: null pmap\n");
5679: #endif
5680: return (FALSE);
5681: }
5682:
5683: if ((rm = pm->pm_regmap) == NULL) {
5684: #ifdef DEBUG
5685: if (pmapdebug & PDB_FOLLOW)
5686: printf("pmap_extract: no regmap entry");
5687: #endif
5688: return (FALSE);
5689: }
5690:
5691: rm += VA_VREG(va);
5692: if ((sm = rm->rg_segmap) == NULL) {
5693: #ifdef DEBUG
5694: if (pmapdebug & PDB_FOLLOW)
5695: printf("pmap_extract: no segmap");
5696: #endif
5697: return (FALSE);
5698: }
5699:
5700: sm += VA_VSEG(va);
5701: if (sm->sg_pte == NULL) {
5702: #ifdef DEBUG
5703: if (pmapdebug & PDB_FOLLOW)
5704: panic("pmap_extract: no ptes");
5705: #endif
5706: return FALSE;
5707: }
5708:
5709: pte = sm->sg_pte[VA_SUN4M_VPG(va)];
5710: if ((pte & SRMMU_TETYPE) != SRMMU_TEPTE) {
5711: #ifdef DEBUG
5712: if (pmapdebug & PDB_FOLLOW)
5713: printf("pmap_extract: invalid pte of type %d\n",
5714: pte & SRMMU_TETYPE);
5715: #endif
5716: return (FALSE);
5717: }
5718:
5719: *pa = (ptoa((pte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT) | VA_OFF(va));
5720: return (TRUE);
5721: }
5722: #endif /* sun4m */
5723:
5724: #if defined(SUN4) || defined(SUN4C)
5725:
5726: /*
5727: * Clear the modify bit for the given physical page.
5728: */
5729: boolean_t
5730: pmap_clear_modify4_4c(struct vm_page *pg)
5731: {
5732: struct pvlist *pv;
5733: boolean_t ret;
5734:
5735: pv = &pg->mdpage.pv_head;
5736:
5737: (void) pv_syncflags4_4c(pv);
5738: ret = pv->pv_flags & PV_MOD;
5739: pv->pv_flags &= ~PV_MOD;
5740:
5741: return ret;
5742: }
5743:
5744: /*
5745: * Tell whether the given physical page has been modified.
5746: */
5747: boolean_t
5748: pmap_is_modified4_4c(struct vm_page *pg)
5749: {
5750: struct pvlist *pv;
5751:
5752: pv = &pg->mdpage.pv_head;
5753:
5754: return (pv->pv_flags & PV_MOD || pv_syncflags4_4c(pv) & PV_MOD);
5755: }
5756:
5757: /*
5758: * Clear the reference bit for the given physical page.
5759: */
5760: boolean_t
5761: pmap_clear_reference4_4c(struct vm_page *pg)
5762: {
5763: struct pvlist *pv;
5764: boolean_t ret;
5765:
5766: pv = &pg->mdpage.pv_head;
5767:
5768: (void) pv_syncflags4_4c(pv);
5769: ret = pv->pv_flags & PV_REF;
5770: pv->pv_flags &= ~PV_REF;
5771:
5772: return ret;
5773: }
5774:
5775: /*
5776: * Tell whether the given physical page has been referenced.
5777: */
5778: boolean_t
5779: pmap_is_referenced4_4c(struct vm_page *pg)
5780: {
5781: struct pvlist *pv;
5782:
5783: pv = &pg->mdpage.pv_head;
5784:
5785: return (pv->pv_flags & PV_REF || pv_syncflags4_4c(pv) & PV_REF);
5786: }
5787: #endif /*4,4c*/
5788:
5789: #if defined(SUN4M)
5790:
5791: /*
5792: * 4m versions of bit test/set routines
5793: *
5794: * Note that the 4m-specific routines should eventually service these
5795: * requests from their page tables, and the whole pvlist bit mess should
5796: * be dropped for the 4m (unless this causes a performance hit from
5797: * tracing down pagetables/regmap/segmaps).
5798: */
5799:
5800: /*
5801: * Clear the modify bit for the given physical page.
5802: */
5803: boolean_t
5804: pmap_clear_modify4m(struct vm_page *pg)
5805: {
5806: struct pvlist *pv;
5807: boolean_t ret;
5808:
5809: pv = &pg->mdpage.pv_head;
5810:
5811: (void) pv_syncflags4m(pv);
5812: ret = pv->pv_flags & PV_MOD4M;
5813: pv->pv_flags &= ~PV_MOD4M;
5814:
5815: return ret;
5816: }
5817:
5818: /*
5819: * Tell whether the given physical page has been modified.
5820: */
5821: boolean_t
5822: pmap_is_modified4m(struct vm_page *pg)
5823: {
5824: struct pvlist *pv;
5825:
5826: pv = &pg->mdpage.pv_head;
5827:
5828: return (pv->pv_flags & PV_MOD4M || pv_syncflags4m(pv) & PV_MOD4M);
5829: }
5830:
5831: /*
5832: * Clear the reference bit for the given physical page.
5833: */
5834: boolean_t
5835: pmap_clear_reference4m(struct vm_page *pg)
5836: {
5837: struct pvlist *pv;
5838: boolean_t ret;
5839:
5840: pv = &pg->mdpage.pv_head;
5841:
5842: (void) pv_syncflags4m(pv);
5843: ret = pv->pv_flags & PV_REF4M;
5844: pv->pv_flags &= ~PV_REF4M;
5845:
5846: return ret;
5847: }
5848:
5849: /*
5850: * Tell whether the given physical page has been referenced.
5851: */
5852: boolean_t
5853: pmap_is_referenced4m(struct vm_page *pg)
5854: {
5855: struct pvlist *pv;
5856:
5857: pv = &pg->mdpage.pv_head;
5858:
5859: return (pv->pv_flags & PV_REF4M || pv_syncflags4m(pv) & PV_REF4M);
5860: }
5861: #endif /* 4m */
5862:
5863: /*
5864: * Fill the given MI physical page with zero bytes.
5865: *
5866: * We avoid stomping on the cache.
5867: * XXX might be faster to use destination's context and allow cache to fill?
5868: */
5869:
5870: #if defined(SUN4) || defined(SUN4C)
5871:
5872: void
5873: pmap_zero_page4_4c(struct vm_page *pg)
5874: {
5875: paddr_t pa = VM_PAGE_TO_PHYS(pg);
5876: caddr_t va;
5877: int pte;
5878:
5879: /*
5880: * The following might not be necessary since the page
5881: * is being cleared because it is about to be allocated,
5882: * i.e., is in use by no one.
5883: */
5884: pg_flushcache(pg);
5885:
5886: pte = PG_V | PG_S | PG_W | PG_NC | (atop(pa) & PG_PFNUM);
5887:
5888: va = vpage[0];
5889: setpte4(va, pte);
5890: qzero(va, NBPG);
5891: setpte4(va, 0);
5892: }
5893:
5894: /*
5895: * Copy the given MI physical source page to its destination.
5896: *
5897: * We avoid stomping on the cache as above (with same `XXX' note).
5898: * We must first flush any write-back cache for the source page.
5899: * We go ahead and stomp on the kernel's virtual cache for the
5900: * source page, since the cache can read memory MUCH faster than
5901: * the processor.
5902: */
5903: void
5904: pmap_copy_page4_4c(struct vm_page *srcpg, struct vm_page *dstpg)
5905: {
5906: paddr_t src = VM_PAGE_TO_PHYS(srcpg);
5907: paddr_t dst = VM_PAGE_TO_PHYS(dstpg);
5908: caddr_t sva, dva;
5909: int spte, dpte;
5910:
5911: if (CACHEINFO.c_vactype == VAC_WRITEBACK)
5912: pg_flushcache(srcpg);
5913:
5914: spte = PG_V | PG_S | (atop(src) & PG_PFNUM);
5915:
5916: if (CACHEINFO.c_vactype != VAC_NONE)
5917: pg_flushcache(dstpg);
5918:
5919: dpte = PG_V | PG_S | PG_W | PG_NC | (atop(dst) & PG_PFNUM);
5920:
5921: sva = vpage[0];
5922: dva = vpage[1];
5923: setpte4(sva, spte);
5924: setpte4(dva, dpte);
5925: qcopy(sva, dva, NBPG); /* loads cache, so we must ... */
5926: cache_flush_page((int)sva);
5927: setpte4(sva, 0);
5928: setpte4(dva, 0);
5929: }
5930: #endif /* 4, 4c */
5931:
5932: #if defined(SUN4M) /* Sun4M version of copy/zero routines */
5933: /*
5934: * Fill the given MI physical page with zero bytes.
5935: *
5936: * We avoid stomping on the cache.
5937: * XXX might be faster to use destination's context and allow cache to fill?
5938: */
5939: void
5940: pmap_zero_page4m(struct vm_page *pg)
5941: {
5942: paddr_t pa = VM_PAGE_TO_PHYS(pg);
5943: static vaddr_t va;
5944: static int *ptep;
5945: int pte;
5946:
5947: if (ptep == NULL)
5948: ptep = getptep4m(pmap_kernel(), (va = (vaddr_t)vpage[0]));
5949:
5950: if (CACHEINFO.c_vactype != VAC_NONE) {
5951: /*
5952: * The following might not be necessary since the page
5953: * is being cleared because it is about to be allocated,
5954: * i.e., is in use by no one.
5955: */
5956: pg_flushcache(pg);
5957: }
5958:
5959: pte = (SRMMU_TEPTE | (atop(pa) << SRMMU_PPNSHIFT) | PPROT_N_RWX);
5960:
5961: if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY)
5962: pte |= SRMMU_PG_C;
5963: else
5964: pte &= ~SRMMU_PG_C;
5965:
5966: tlb_flush_page(va);
5967: setpgt4m(ptep, pte);
5968: qzero((caddr_t)va, PAGE_SIZE);
5969: tlb_flush_page(va);
5970: setpgt4m(ptep, SRMMU_TEINVALID);
5971: }
5972:
5973: /*
5974: * Copy the given MI physical source page to its destination.
5975: *
5976: * We avoid stomping on the cache as above (with same `XXX' note).
5977: * We must first flush any write-back cache for the source page.
5978: * We go ahead and stomp on the kernel's virtual cache for the
5979: * source page, since the cache can read memory MUCH faster than
5980: * the processor.
5981: */
5982: void
5983: pmap_copy_page4m(struct vm_page *srcpg, struct vm_page *dstpg)
5984: {
5985: paddr_t src = VM_PAGE_TO_PHYS(srcpg);
5986: paddr_t dst = VM_PAGE_TO_PHYS(dstpg);
5987: static int *sptep, *dptep;
5988: static vaddr_t sva, dva;
5989: int spte, dpte;
5990:
5991: if (sptep == NULL) {
5992: sptep = getptep4m(pmap_kernel(), (sva = (vaddr_t)vpage[0]));
5993: dptep = getptep4m(pmap_kernel(), (dva = (vaddr_t)vpage[1]));
5994: }
5995:
5996: if (CACHEINFO.c_vactype == VAC_WRITEBACK)
5997: pg_flushcache(srcpg);
5998:
5999: spte = SRMMU_TEPTE | SRMMU_PG_C | (atop(src) << SRMMU_PPNSHIFT) |
6000: PPROT_N_RX;
6001:
6002: if (CACHEINFO.c_vactype != VAC_NONE)
6003: pg_flushcache(dstpg);
6004:
6005: dpte = (SRMMU_TEPTE | (atop(dst) << SRMMU_PPNSHIFT) | PPROT_N_RWX);
6006: if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY)
6007: dpte |= SRMMU_PG_C;
6008: else
6009: dpte &= ~SRMMU_PG_C;
6010:
6011: tlb_flush_page(sva);
6012: setpgt4m(sptep, spte);
6013: tlb_flush_page(dva);
6014: setpgt4m(dptep, dpte);
6015: qcopy((caddr_t)sva, (caddr_t)dva, PAGE_SIZE);
6016: cache_flush_page((int)sva);
6017: tlb_flush_page(sva);
6018: setpgt4m(sptep, SRMMU_TEINVALID);
6019: tlb_flush_page(dva);
6020: setpgt4m(dptep, SRMMU_TEINVALID);
6021: }
6022: #endif /* Sun4M */
6023:
6024: /*
6025: * Turn on/off cache for a given (va, number of pages).
6026: *
6027: * We just assert PG_NC for each PTE; the addresses must reside
6028: * in locked kernel space. A cache flush is also done.
6029: */
6030: void
6031: kvm_setcache(va, npages, cached)
6032: caddr_t va;
6033: int npages;
6034: int cached;
6035: {
6036: int pte, ctx;
6037: struct pvlist *pv;
6038:
6039: if (CPU_ISSUN4M) {
6040: #if defined(SUN4M)
6041: ctx = getcontext4m();
6042: setcontext4m(0);
6043: for (; --npages >= 0; va += NBPG) {
6044: int *ptep;
6045:
6046: ptep = getptep4m(pmap_kernel(), (vaddr_t)va);
6047: pte = *ptep;
6048: #ifdef DIAGNOSTIC
6049: if ((pte & SRMMU_TETYPE) != SRMMU_TEPTE)
6050: panic("kvm_uncache: table entry not pte");
6051: #endif
6052: pv = pvhead((pte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
6053: if (pv) {
6054: if (cached)
6055: pv_changepte4m(pv, SRMMU_PG_C, 0);
6056: else
6057: pv_changepte4m(pv, 0, SRMMU_PG_C);
6058: }
6059: if (cached)
6060: pte |= SRMMU_PG_C;
6061: else
6062: pte &= ~SRMMU_PG_C;
6063: tlb_flush_page((vaddr_t)va);
6064: setpgt4m(ptep, pte);
6065:
6066: if ((pte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM)
6067: cache_flush_page((int)va);
6068:
6069: }
6070: setcontext4m(ctx);
6071:
6072: #endif
6073: } else {
6074: #if defined(SUN4) || defined(SUN4C)
6075: ctx = getcontext4();
6076: setcontext4(0);
6077: for (; --npages >= 0; va += NBPG) {
6078: pte = getpte4(va);
6079: if ((pte & PG_V) == 0)
6080: panic("kvm_uncache !pg_v");
6081:
6082: pv = pvhead(pte & PG_PFNUM);
6083: /* XXX - we probably don't need to check for OBMEM */
6084: if ((pte & PG_TYPE) == PG_OBMEM && pv) {
6085: if (cached)
6086: pv_changepte4_4c(pv, 0, PG_NC);
6087: else
6088: pv_changepte4_4c(pv, PG_NC, 0);
6089: }
6090: if (cached)
6091: pte &= ~PG_NC;
6092: else
6093: pte |= PG_NC;
6094: setpte4(va, pte);
6095:
6096: if ((pte & PG_TYPE) == PG_OBMEM)
6097: cache_flush_page((int)va);
6098: }
6099: setcontext4(ctx);
6100: #endif
6101: }
6102: }
6103:
6104: int
6105: pmap_count_ptes(pm)
6106: struct pmap *pm;
6107: {
6108: int idx, total;
6109: struct regmap *rp;
6110: struct segmap *sp;
6111:
6112: if (pm == pmap_kernel()) {
6113: rp = &pm->pm_regmap[NUREG];
6114: idx = NKREG;
6115: } else {
6116: rp = pm->pm_regmap;
6117: idx = NUREG;
6118: }
6119: for (total = 0; idx;)
6120: if ((sp = rp[--idx].rg_segmap) != NULL)
6121: total += sp->sg_npte;
6122: pm->pm_stats.resident_count = total;
6123: return (total);
6124: }
6125:
6126: /*
6127: * Find first virtual address >= *va that is
6128: * least likely to cause cache aliases.
6129: * (This will just seg-align mappings.)
6130: */
6131: void
6132: pmap_prefer(foff, vap)
6133: vaddr_t foff;
6134: vaddr_t *vap;
6135: {
6136: vaddr_t va = *vap;
6137: long d, m;
6138:
6139: #if defined(SUN4) || defined(SUN4C)
6140: if (VA_INHOLE(va))
6141: va = MMU_HOLE_END;
6142: #endif
6143:
6144: m = CACHE_ALIAS_DIST;
6145: if (m == 0) /* m=0 => no cache aliasing */
6146: return;
6147:
6148: d = foff - va;
6149: d &= (m - 1);
6150: *vap = va + d;
6151: }
6152:
6153: void
6154: pmap_redzone()
6155: {
6156: #if defined(SUN4M)
6157: if (CPU_ISSUN4M) {
6158: setpte4m(KERNBASE, 0);
6159: return;
6160: }
6161: #endif
6162: #if defined(SUN4) || defined(SUN4C)
6163: if (CPU_ISSUN4OR4C) {
6164: setpte4(KERNBASE, 0);
6165: return;
6166: }
6167: #endif
6168: }
6169:
6170: /*
6171: * Activate the address space for the specified process. If the
6172: * process is the current process, load the new MMU context.
6173: */
6174: void
6175: pmap_activate(p)
6176: struct proc *p;
6177: {
6178: pmap_t pmap = p->p_vmspace->vm_map.pmap;
6179: int s;
6180:
6181: /*
6182: * This is essentially the same thing that happens in cpu_switch()
6183: * when the newly selected process is about to run, except that we
6184: * have to make sure to clean the windows before we set
6185: * the new context.
6186: */
6187:
6188: s = splvm();
6189: if (p == curproc) {
6190: write_user_windows();
6191: if (pmap->pm_ctx == NULL) {
6192: ctx_alloc(pmap); /* performs setcontext() */
6193: } else {
6194: /* Do any cache flush needed on context switch */
6195: (*cpuinfo.pure_vcache_flush)();
6196: setcontext(pmap->pm_ctxnum);
6197: }
6198: }
6199: splx(s);
6200: }
6201:
6202: #ifdef DEBUG
6203: /*
6204: * Check consistency of a pmap (time consuming!).
6205: */
6206: void
6207: pm_check(s, pm)
6208: char *s;
6209: struct pmap *pm;
6210: {
6211: if (pm == pmap_kernel())
6212: pm_check_k(s, pm);
6213: else
6214: pm_check_u(s, pm);
6215: }
6216:
6217: void
6218: pm_check_u(s, pm)
6219: char *s;
6220: struct pmap *pm;
6221: {
6222: struct regmap *rp;
6223: struct segmap *sp;
6224: int n, vs, vr, j, m, *pte;
6225:
6226: if (pm->pm_regmap == NULL)
6227: panic("%s: CHK(pmap %p): no region mapping", s, pm);
6228:
6229: #if defined(SUN4M)
6230: if (CPU_ISSUN4M &&
6231: (pm->pm_reg_ptps == NULL ||
6232: pm->pm_reg_ptps_pa != VA2PA((caddr_t)pm->pm_reg_ptps)))
6233: panic("%s: CHK(pmap %p): no SRMMU region table or bad pa: "
6234: "tblva=%p, tblpa=0x%x",
6235: s, pm, pm->pm_reg_ptps, pm->pm_reg_ptps_pa);
6236:
6237: if (CPU_ISSUN4M && pm->pm_ctx != NULL &&
6238: (cpuinfo.ctx_tbl[pm->pm_ctxnum] != ((VA2PA((caddr_t)pm->pm_reg_ptps)
6239: >> SRMMU_PPNPASHIFT) |
6240: SRMMU_TEPTD)))
6241: panic("%s: CHK(pmap %p): SRMMU region table at 0x%x not installed "
6242: "for context %d", s, pm, pm->pm_reg_ptps_pa, pm->pm_ctxnum);
6243: #endif
6244:
6245: for (vr = 0; vr < NUREG; vr++) {
6246: rp = &pm->pm_regmap[vr];
6247: if (rp->rg_nsegmap == 0)
6248: continue;
6249: if (rp->rg_segmap == NULL)
6250: panic("%s: CHK(vr %d): nsegmap = %d; sp==NULL",
6251: s, vr, rp->rg_nsegmap);
6252: #if defined(SUN4M)
6253: if (CPU_ISSUN4M && rp->rg_seg_ptps == NULL)
6254: panic("%s: CHK(vr %d): nsegmap=%d; no SRMMU segment table",
6255: s, vr, rp->rg_nsegmap);
6256: if (CPU_ISSUN4M &&
6257: pm->pm_reg_ptps[vr] != ((VA2PA((caddr_t)rp->rg_seg_ptps) >>
6258: SRMMU_PPNPASHIFT) | SRMMU_TEPTD))
6259: panic("%s: CHK(vr %d): SRMMU segtbl not installed",s,vr);
6260: #endif
6261: if ((unsigned int)rp < VM_MIN_KERNEL_ADDRESS)
6262: panic("%s: rp=%p", s, rp);
6263: n = 0;
6264: for (vs = 0; vs < NSEGRG; vs++) {
6265: sp = &rp->rg_segmap[vs];
6266: if ((unsigned int)sp < VM_MIN_KERNEL_ADDRESS)
6267: panic("%s: sp=%p", s, sp);
6268: if (sp->sg_npte != 0) {
6269: n++;
6270: if (sp->sg_pte == NULL)
6271: panic("%s: CHK(vr %d, vs %d): npte=%d, "
6272: "pte=NULL", s, vr, vs, sp->sg_npte);
6273: #if defined(SUN4M)
6274: if (CPU_ISSUN4M &&
6275: rp->rg_seg_ptps[vs] !=
6276: ((VA2PA((caddr_t)sp->sg_pte)
6277: >> SRMMU_PPNPASHIFT) |
6278: SRMMU_TEPTD))
6279: panic("%s: CHK(vr %d, vs %d): SRMMU page "
6280: "table not installed correctly",s,vr,
6281: vs);
6282: #endif
6283: pte=sp->sg_pte;
6284: m = 0;
6285: for (j=0; j<NPTESG; j++,pte++)
6286: if ((CPU_ISSUN4M
6287: ?((*pte & SRMMU_TETYPE) == SRMMU_TEPTE)
6288: :(*pte & PG_V)))
6289: m++;
6290: if (m != sp->sg_npte)
6291: /*if (pmapdebug & 0x10000)*/
6292: printf("%s: user CHK(vr %d, vs %d): "
6293: "npte(%d) != # valid(%d)\n",
6294: s, vr, vs, sp->sg_npte, m);
6295: }
6296: }
6297: if (n != rp->rg_nsegmap)
6298: panic("%s: CHK(vr %d): inconsistent "
6299: "# of pte's: %d, should be %d",
6300: s, vr, rp->rg_nsegmap, n);
6301: }
6302: return;
6303: }
6304:
6305: void
6306: pm_check_k(s, pm) /* Note: not as extensive as pm_check_u. */
6307: char *s;
6308: struct pmap *pm;
6309: {
6310: struct regmap *rp;
6311: int vr, vs, n;
6312:
6313: if (pm->pm_regmap == NULL)
6314: panic("%s: CHK(pmap %p): no region mapping", s, pm);
6315:
6316: #if defined(SUN4M)
6317: if (CPU_ISSUN4M &&
6318: (pm->pm_reg_ptps == NULL ||
6319: pm->pm_reg_ptps_pa != VA2PA((caddr_t)pm->pm_reg_ptps)))
6320: panic("%s: CHK(pmap %p): no SRMMU region table or bad pa: tblva=%p, tblpa=0x%x",
6321: s, pm, pm->pm_reg_ptps, pm->pm_reg_ptps_pa);
6322:
6323: if (CPU_ISSUN4M &&
6324: (cpuinfo.ctx_tbl[0] != ((VA2PA((caddr_t)pm->pm_reg_ptps) >>
6325: SRMMU_PPNPASHIFT) | SRMMU_TEPTD)))
6326: panic("%s: CHK(pmap %p): SRMMU region table at 0x%x not installed "
6327: "for context %d", s, pm, pm->pm_reg_ptps_pa, 0);
6328: #endif
6329: for (vr = NUREG; vr < NUREG+NKREG; vr++) {
6330: rp = &pm->pm_regmap[vr];
6331: if (rp->rg_segmap == NULL)
6332: panic("%s: CHK(vr %d): nsegmap = %d; sp==NULL",
6333: s, vr, rp->rg_nsegmap);
6334: if (rp->rg_nsegmap == 0)
6335: continue;
6336: #if defined(SUN4M)
6337: if (CPU_ISSUN4M && rp->rg_seg_ptps == NULL)
6338: panic("%s: CHK(vr %d): nsegmap=%d; no SRMMU segment table",
6339: s, vr, rp->rg_nsegmap);
6340: if (CPU_ISSUN4M &&
6341: pm->pm_reg_ptps[vr] != ((VA2PA((caddr_t)rp->rg_seg_ptps) >>
6342: SRMMU_PPNPASHIFT) | SRMMU_TEPTD))
6343: panic("%s: CHK(vr %d): SRMMU segtbl not installed",s,vr);
6344: #endif
6345: if (CPU_ISSUN4M) {
6346: n = NSEGRG;
6347: } else {
6348: for (n = 0, vs = 0; vs < NSEGRG; vs++) {
6349: if (rp->rg_segmap[vs].sg_npte)
6350: n++;
6351: }
6352: }
6353: if (n != rp->rg_nsegmap)
6354: printf("%s: kernel CHK(vr %d): inconsistent "
6355: "# of pte's: %d, should be %d\n",
6356: s, vr, rp->rg_nsegmap, n);
6357: }
6358: return;
6359: }
6360: #endif
6361:
6362: /*
6363: * Return the number bytes that pmap_dumpmmu() will dump.
6364: * For each pmeg in the MMU, we'll write NPTESG PTEs.
6365: * The last page or two contains stuff so libkvm can bootstrap.
6366: */
6367: int
6368: pmap_dumpsize()
6369: {
6370: long sz;
6371:
6372: sz = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
6373: sz += npmemarr * sizeof(phys_ram_seg_t);
6374:
6375: if (CPU_ISSUN4OR4C)
6376: sz += (seginval + 1) * NPTESG * sizeof(int);
6377:
6378: return (btoc(sz));
6379: }
6380:
6381: /*
6382: * Write the mmu contents to the dump device.
6383: * This gets appended to the end of a crash dump since
6384: * there is no in-core copy of kernel memory mappings on a 4/4c machine.
6385: */
6386: int
6387: pmap_dumpmmu(dump, blkno)
6388: daddr64_t blkno;
6389: int (*dump)(dev_t, daddr64_t, caddr_t, size_t);
6390: {
6391: kcore_seg_t *ksegp;
6392: cpu_kcore_hdr_t *kcpup;
6393: phys_ram_seg_t memseg;
6394: int error = 0;
6395: int i, memsegoffset, pmegoffset;
6396: int buffer[dbtob(1) / sizeof(int)];
6397: int *bp, *ep;
6398: #if defined(SUN4C) || defined(SUN4)
6399: int pmeg;
6400: #endif
6401:
6402: #define EXPEDITE(p,n) do { \
6403: int *sp = (int *)(p); \
6404: int sz = (n); \
6405: while (sz > 0) { \
6406: *bp++ = *sp++; \
6407: if (bp >= ep) { \
6408: error = (*dump)(dumpdev, blkno, \
6409: (caddr_t)buffer, dbtob(1)); \
6410: if (error != 0) \
6411: return (error); \
6412: ++blkno; \
6413: bp = buffer; \
6414: } \
6415: sz -= 4; \
6416: } \
6417: } while (0)
6418:
6419: setcontext(0);
6420:
6421: /* Setup bookkeeping pointers */
6422: bp = buffer;
6423: ep = &buffer[sizeof(buffer) / sizeof(buffer[0])];
6424:
6425: /* Fill in MI segment header */
6426: ksegp = (kcore_seg_t *)bp;
6427: CORE_SETMAGIC(*ksegp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
6428: ksegp->c_size = ctob(pmap_dumpsize()) - ALIGN(sizeof(kcore_seg_t));
6429:
6430: /* Fill in MD segment header (interpreted by MD part of libkvm) */
6431: kcpup = (cpu_kcore_hdr_t *)((int)bp + ALIGN(sizeof(kcore_seg_t)));
6432: kcpup->cputype = cputyp;
6433: kcpup->nmemseg = npmemarr;
6434: kcpup->memsegoffset = memsegoffset = ALIGN(sizeof(cpu_kcore_hdr_t));
6435: kcpup->npmeg = (CPU_ISSUN4OR4C) ? seginval + 1 : 0;
6436: kcpup->pmegoffset = pmegoffset =
6437: memsegoffset + npmemarr * sizeof(phys_ram_seg_t);
6438:
6439: /* Note: we have assumed everything fits in buffer[] so far... */
6440: bp = (int *)&kcpup->segmap_store;
6441: EXPEDITE(&kernel_segmap_store, sizeof(kernel_segmap_store));
6442:
6443: /* Align storage for upcoming quad-aligned segment array */
6444: while (bp != (int *)ALIGN(bp)) {
6445: int dummy = 0;
6446: EXPEDITE(&dummy, 4);
6447: }
6448: for (i = 0; i < npmemarr; i++) {
6449: memseg.start = pmemarr[i].addr;
6450: memseg.size = pmemarr[i].len;
6451: EXPEDITE(&memseg, sizeof(phys_ram_seg_t));
6452: }
6453:
6454: if (CPU_ISSUN4M)
6455: goto out;
6456:
6457: #if defined(SUN4C) || defined(SUN4)
6458: /*
6459: * dump page table entries
6460: *
6461: * We dump each pmeg in order (by segment number). Since the MMU
6462: * automatically maps the given virtual segment to a pmeg we must
6463: * iterate over the segments by incrementing an unused segment slot
6464: * in the MMU. This fixed segment number is used in the virtual
6465: * address argument to getpte().
6466: */
6467:
6468: /*
6469: * Go through the pmegs and dump each one.
6470: */
6471: for (pmeg = 0; pmeg <= seginval; ++pmeg) {
6472: int va = 0;
6473:
6474: setsegmap(va, pmeg);
6475: i = NPTESG;
6476: do {
6477: int pte = getpte4(va);
6478: EXPEDITE(&pte, sizeof(pte));
6479: va += NBPG;
6480: } while (--i > 0);
6481: }
6482: setsegmap(0, seginval);
6483: #endif
6484:
6485: out:
6486: if (bp != buffer)
6487: error = (*dump)(dumpdev, blkno++, (caddr_t)buffer, dbtob(1));
6488:
6489: return (error);
6490: }
6491:
6492: /*
6493: * Helper function for debuggers.
6494: */
6495: void
6496: pmap_writetext(dst, ch)
6497: unsigned char *dst;
6498: int ch;
6499: {
6500: int s, pte0, pte, ctx;
6501: vaddr_t va;
6502:
6503: s = splvm();
6504: va = (unsigned long)dst & (~PGOFSET);
6505: cpuinfo.cache_flush(dst, 1);
6506:
6507: ctx = getcontext();
6508: setcontext(0);
6509:
6510: #if defined(SUN4M)
6511: if (CPU_ISSUN4M) {
6512: int *ptep;
6513:
6514: ptep = getptep4m(pmap_kernel(), va);
6515: pte0 = *ptep;
6516: if ((pte0 & SRMMU_TETYPE) != SRMMU_TEPTE) {
6517: splx(s);
6518: return;
6519: }
6520: pte = pte0 | PPROT_WRITE;
6521: tlb_flush_page((vaddr_t)va);
6522: setpgt4m(ptep, pte);
6523: *dst = (unsigned char)ch;
6524: tlb_flush_page((vaddr_t)va);
6525: setpgt4m(ptep, pte0);
6526: }
6527: #endif
6528: #if defined(SUN4) || defined(SUN4C)
6529: if (CPU_ISSUN4C || CPU_ISSUN4) {
6530: pte0 = getpte4(va);
6531: if ((pte0 & PG_V) == 0) {
6532: splx(s);
6533: return;
6534: }
6535: pte = pte0 | PG_W;
6536: setpte4(va, pte);
6537: *dst = (unsigned char)ch;
6538: setpte4(va, pte0);
6539: }
6540: #endif
6541: cpuinfo.cache_flush(dst, 1);
6542: setcontext(ctx);
6543: splx(s);
6544: }
6545:
6546: #ifdef EXTREME_DEBUG
6547:
6548: static void test_region(int, int, int);
6549:
6550: void
6551: debug_pagetables()
6552: {
6553: int i;
6554: int *regtbl;
6555: int te;
6556:
6557: printf("\nncontext=%d. ",ncontext);
6558: printf("Context table is at va 0x%x. Level 0 PTP: 0x%x\n",
6559: cpuinfo.ctx_tbl, cpuinfo.ctx_tbl[0]);
6560: printf("Context 0 region table is at va 0x%x, pa 0x%x. Contents:\n",
6561: pmap_kernel()->pm_reg_ptps, pmap_kernel()->pm_reg_ptps_pa);
6562:
6563: regtbl = pmap_kernel()->pm_reg_ptps;
6564:
6565: printf("PROM vector is at 0x%x\n",promvec);
6566: printf("PROM reboot routine is at 0x%x\n",promvec->pv_reboot);
6567: printf("PROM abort routine is at 0x%x\n",promvec->pv_abort);
6568: printf("PROM halt routine is at 0x%x\n",promvec->pv_halt);
6569:
CVSweb