Annotation of sys/arch/sparc/include/pmap.h, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: pmap.h,v 1.41 2007/06/06 17:15:12 deraadt Exp $ */
2: /* $NetBSD: pmap.h,v 1.30 1997/08/04 20:00:47 pk Exp $ */
3:
4: /*
5: * Copyright (c) 1996
6: * The President and Fellows of Harvard College. All rights reserved.
7: * Copyright (c) 1992, 1993
8: * The Regents of the University of California. All rights reserved.
9: *
10: * This software was developed by the Computer Systems Engineering group
11: * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
12: * contributed to Berkeley.
13: *
14: * All advertising materials mentioning features or use of this software
15: * must display the following acknowledgement:
16: * This product includes software developed by Aaron Brown and
17: * Harvard University.
18: * This product includes software developed by the University of
19: * California, Lawrence Berkeley Laboratory.
20: *
21: * @InsertRedistribution@
22: * 3. All advertising materials mentioning features or use of this software
23: * must display the following acknowledgement:
24: * This product includes software developed by Aaron Brown and
25: * Harvard University.
26: * This product includes software developed by the University of
27: * California, Berkeley and its contributors.
28: * 4. Neither the name of the University nor the names of its contributors
29: * may be used to endorse or promote products derived from this software
30: * without specific prior written permission.
31: *
32: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42: * SUCH DAMAGE.
43: *
44: * @(#)pmap.h 8.1 (Berkeley) 6/11/93
45: */
46:
47: #ifndef _SPARC_PMAP_H_
48: #define _SPARC_PMAP_H_
49:
50: #include <machine/pte.h>
51:
52: /*
53: * Pmap structure.
54: *
55: * The pmap structure really comes in two variants, one---a single
56: * instance---for kernel virtual memory and the other---up to nproc
57: * instances---for user virtual memory. Unfortunately, we have to mash
58: * both into the same structure. Fortunately, they are almost the same.
59: *
60: * The kernel begins at 0xf8000000 and runs to 0xffffffff (although
61: * some of this is not actually used). Kernel space, including DVMA
62: * space (for now?), is mapped identically into all user contexts.
63: * There is no point in duplicating this mapping in each user process
64: * so they do not appear in the user structures.
65: *
66: * User space begins at 0x00000000 and runs through 0x1fffffff,
67: * then has a `hole', then resumes at 0xe0000000 and runs until it
68: * hits the kernel space at 0xf8000000. This can be mapped
69: * contiguously by ignorning the top two bits and pretending the
70: * space goes from 0 to 37ffffff. Typically the lower range is
71: * used for text+data and the upper for stack, but the code here
72: * makes no such distinction.
73: *
74: * Since each virtual segment covers 256 kbytes, the user space
75: * requires 3584 segments, while the kernel (including DVMA) requires
76: * only 512 segments.
77: *
78: *
79: ** FOR THE SUN4/SUN4C
80: *
81: * The segment map entry for virtual segment vseg is offset in
82: * pmap->pm_rsegmap by 0 if pmap is not the kernel pmap, or by
83: * NUSEG if it is. We keep a pointer called pmap->pm_segmap
84: * pre-offset by this value. pmap->pm_segmap thus contains the
85: * values to be loaded into the user portion of the hardware segment
86: * map so as to reach the proper PMEGs within the MMU. The kernel
87: * mappings are `set early' and are always valid in every context
88: * (every change is always propagated immediately).
89: *
90: * The PMEGs within the MMU are loaded `on demand'; when a PMEG is
91: * taken away from context `c', the pmap for context c has its
92: * corresponding pm_segmap[vseg] entry marked invalid (the MMU segment
93: * map entry is also made invalid at the same time). Thus
94: * pm_segmap[vseg] is the `invalid pmeg' number (127 or 511) whenever
95: * the corresponding PTEs are not actually in the MMU. On the other
96: * hand, pm_pte[vseg] is NULL only if no pages in that virtual segment
97: * are in core; otherwise it points to a copy of the 32 or 64 PTEs that
98: * must be loaded in the MMU in order to reach those pages.
99: * pm_npte[vseg] counts the number of valid pages in each vseg.
100: *
101: * XXX performance: faster to count valid bits?
102: *
103: * The kernel pmap cannot malloc() PTEs since malloc() will sometimes
104: * allocate a new virtual segment. Since kernel mappings are never
105: * `stolen' out of the MMU, we just keep all its PTEs there, and
106: * have no software copies. Its mmu entries are nonetheless kept on lists
107: * so that the code that fiddles with mmu lists has something to fiddle.
108: *
109: ** FOR THE SUN4M
110: *
111: * On this architecture, the virtual-to-physical translation (page) tables
112: * are *not* stored within the MMU as they are in the earlier Sun architect-
113: * ures; instead, they are maintained entirely within physical memory (there
114: * is a TLB cache to prevent the high performance hit from keeping all page
115: * tables in core). Thus there is no need to dynamically allocate PMEGs or
116: * SMEGs; only contexts must be shared.
117: *
118: * We maintain two parallel sets of tables: one is the actual MMU-edible
119: * hierarchy of page tables in allocated kernel memory; these tables refer
120: * to each other by physical address pointers in SRMMU format (thus they
121: * are not very useful to the kernel's management routines). The other set
122: * of tables is similar to those used for the Sun4/100's 3-level MMU; it
123: * is a hierarchy of regmap and segmap structures which contain kernel virtual
124: * pointers to each other. These must (unfortunately) be kept in sync.
125: *
126: */
127: #define NKREG ((int)((-(unsigned)VM_MIN_KERNEL_ADDRESS) / NBPRG)) /* 8 */
128: #define NUREG (256 - NKREG) /* 248 */
129:
130: TAILQ_HEAD(mmuhd,mmuentry);
131:
132: /*
133: * data appearing in both user and kernel pmaps
134: *
135: * note: if we want the same binaries to work on the 4/4c and 4m, we have to
136: * include the fields for both to make sure that the struct kproc
137: * is the same size.
138: */
139: struct pmap {
140: union ctxinfo *pm_ctx; /* current context, if any */
141: int pm_ctxnum; /* current context's number */
142: struct simplelock pm_lock; /* spinlock */
143: int pm_refcount; /* just what it says */
144:
145: struct mmuhd pm_reglist; /* MMU regions on this pmap (4/4c) */
146: struct mmuhd pm_seglist; /* MMU segments on this pmap (4/4c) */
147:
148: void *pm_regstore;
149: struct regmap *pm_regmap;
150:
151: int *pm_reg_ptps; /* SRMMU-edible region table for 4m */
152: int pm_reg_ptps_pa; /* _Physical_ address of pm_reg_ptps */
153:
154: int pm_gap_start; /* Starting with this vreg there's */
155: int pm_gap_end; /* no valid mapping until here */
156:
157: struct pmap_statistics pm_stats; /* pmap statistics */
158: };
159:
160: struct regmap {
161: struct segmap *rg_segmap; /* point to NSGPRG PMEGs */
162: int *rg_seg_ptps; /* SRMMU-edible segment tables (NULL
163: * indicates invalid region (4m) */
164: smeg_t rg_smeg; /* the MMU region number (4c) */
165: u_char rg_nsegmap; /* number of valid PMEGS */
166: };
167:
168: struct segmap {
169: int *sg_pte; /* points to NPTESG PTEs */
170: pmeg_t sg_pmeg; /* the MMU segment number (4c) */
171: u_char sg_npte; /* number of valid PTEs per seg */
172: };
173:
174: typedef struct pmap *pmap_t;
175:
176: /*
177: * For each managed physical page, there is a list of all currently
178: * valid virtual mappings of that page. Since there is usually one
179: * (or zero) mapping per page, the table begins with an initial entry,
180: * rather than a pointer; this head entry is empty iff its pv_pmap
181: * field is NULL.
182: *
183: * Note that these are per machine independent page (so there may be
184: * only one for every two hardware pages, e.g.). Since the virtual
185: * address is aligned on a page boundary, the low order bits are free
186: * for storing flags. Only the head of each list has flags.
187: *
188: * THIS SHOULD BE PART OF THE CORE MAP
189: */
190: /* XXX - struct pvlist moved to vmparam.h because of include ordering issues */
191:
192: /*
193: * Flags in pv_flags. Note that PV_MOD must be 1 and PV_REF must be 2
194: * since they must line up with the bits in the hardware PTEs (see pte.h).
195: * SUN4M bits are at a slightly different location in the PTE.
196: * Note: the REF, MOD and ANC flag bits occur only in the head of a pvlist.
197: * The cacheable bit (either PV_NC or PV_C4M) is meaningful in each
198: * individual pv entry.
199: */
200: #define PV_MOD 1 /* page modified */
201: #define PV_REF 2 /* page referenced */
202: #define PV_NC 4 /* page cannot be cached */
203: #define PV_REF4M 1 /* page referenced (SRMMU) */
204: #define PV_MOD4M 2 /* page modified (SRMMU) */
205: #define PV_C4M 4 /* page _can_ be cached (SRMMU) */
206: #define PV_ANC 0x10 /* page has incongruent aliases */
207:
208: #if 0
209: struct kvm_cpustate {
210: int kvm_npmemarr;
211: struct memarr kvm_pmemarr[MA_SIZE];
212: int kvm_seginval; /* [4,4c] */
213: struct segmap kvm_segmap_store[NKREG*NSEGRG]; /* [4,4c] */
214: }/*not yet used*/;
215: #endif
216:
217: #ifdef _KERNEL
218:
219: #define PMAP_NULL ((pmap_t)0)
220:
221: extern struct pmap kernel_pmap_store;
222:
223: /*
224: * Since PTEs also contain type bits, we have to have some way
225: * to tell pmap_enter `this is an IO page' or `this is not to
226: * be cached'. Since physical addresses are always aligned, we
227: * can do this with the low order bits.
228: *
229: * The ordering below is important: PMAP_PGTYPE << PG_TNC must give
230: * exactly the PG_NC and PG_TYPE bits.
231: */
232: #define PMAP_OBIO 1 /* tells pmap_enter to use PG_OBIO */
233: #define PMAP_VME16 2 /* etc */
234: #define PMAP_VME32 3 /* etc */
235: #define PMAP_NC 4 /* tells pmap_enter to set PG_NC */
236: #define PMAP_TNC_4 7 /* mask to get PG_TYPE & PG_NC */
237:
238: #define PMAP_T2PTE_4(x) (((x) & PMAP_TNC_4) << PG_TNC_SHIFT)
239: #define PMAP_IOENC_4(io) (io)
240:
241: /*
242: * On a SRMMU machine, the iospace is encoded in bits [3-6] of the
243: * physical address passed to pmap_enter().
244: */
245: #define PMAP_TYPE_SRMMU 0x78 /* mask to get 4m page type */
246: #define PMAP_PTESHFT_SRMMU 25 /* right shift to put type in pte */
247: #define PMAP_SHFT_SRMMU 3 /* left shift to extract iospace */
248: #define PMAP_TNC_SRMMU 127 /* mask to get PG_TYPE & PG_NC */
249:
250: /*#define PMAP_IOC 0x00800000 -* IO cacheable, NOT shifted */
251:
252: #define PMAP_T2PTE_SRMMU(x) (((x) & PMAP_TYPE_SRMMU) << PMAP_PTESHFT_SRMMU)
253: #define PMAP_IOENC_SRMMU(io) ((io) << PMAP_SHFT_SRMMU)
254:
255: /* Encode IO space for pmap_enter() */
256: #define PMAP_IOENC(io) (CPU_ISSUN4M ? PMAP_IOENC_SRMMU(io) : PMAP_IOENC_4(io))
257:
258: int pmap_dumpsize(void);
259: int pmap_dumpmmu(int (*)(dev_t, daddr64_t, caddr_t, size_t), daddr64_t);
260:
261: #define pmap_kernel() (&kernel_pmap_store)
262: #define pmap_resident_count(pmap) pmap_count_ptes(pmap)
263:
264: #define PMAP_PREFER(fo, ap) pmap_prefer((fo), (ap))
265:
266: #define PMAP_EXCLUDE_DECLS /* tells MI pmap.h *not* to include decls */
267:
268: /* FUNCTION DECLARATIONS FOR COMMON PMAP MODULE */
269:
270: struct proc;
271: void pmap_activate(struct proc *);
272: void pmap_bootstrap(int nmmu, int nctx, int nregion);
273: int pmap_count_ptes(struct pmap *);
274: void pmap_prefer(vaddr_t, vaddr_t *);
275: int pmap_pa_exists(paddr_t);
276: void pmap_unwire(pmap_t, vaddr_t);
277: void pmap_copy(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t);
278: pmap_t pmap_create(void);
279: void pmap_destroy(pmap_t);
280: void pmap_init(void);
281: void pmap_kremove(vaddr_t, vsize_t);
282: vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, int);
283: void pmap_reference(pmap_t);
284: void pmap_release(pmap_t);
285: void pmap_remove(pmap_t, vaddr_t, vaddr_t);
286: int pmap_page_index(paddr_t);
287: void pmap_virtual_space(vaddr_t *, vaddr_t *);
288: void pmap_redzone(void);
289: void kvm_setcache(caddr_t, int, int);
290: #define kvm_uncache(addr, npages) kvm_setcache(addr, npages, 0)
291: #define kvm_recache(addr, npages) kvm_setcache(addr, npages, 1)
292: void pmap_cache_enable(void);
293: struct user;
294: void switchexit(struct proc *);
295: int mmu_pagein(struct pmap *pm, vaddr_t, int);
296: void pmap_writetext(unsigned char *, int);
297:
298: #define pmap_collect(pm) do { /* nothing */ } while (0)
299: #define pmap_copy(DP,SP,D,L,S) do { /* nothing */ } while (0)
300: #define pmap_deactivate(p) do { /* nothing */ } while (0)
301: #define pmap_phys_address(frame) (frame)
302: #define pmap_proc_iflush(p,va,len) do { /* nothing */ } while (0)
303: #define pmap_update(pm) do { /* nothing */ } while (0)
304:
305: /* SUN4/SUN4C SPECIFIC DECLARATIONS */
306:
307: #if defined(SUN4) || defined(SUN4C)
308: boolean_t pmap_clear_modify4_4c(struct vm_page *);
309: boolean_t pmap_clear_reference4_4c(struct vm_page *);
310: int pmap_enter4_4c(pmap_t, vaddr_t, paddr_t, vm_prot_t, int);
311: boolean_t pmap_extract4_4c(pmap_t, vaddr_t, paddr_t *);
312: boolean_t pmap_is_modified4_4c(struct vm_page *);
313: boolean_t pmap_is_referenced4_4c(struct vm_page *);
314: void pmap_kenter_pa4_4c(vaddr_t, paddr_t, vm_prot_t);
315: void pmap_page_protect4_4c(struct vm_page *, vm_prot_t);
316: void pmap_protect4_4c(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
317: void pmap_copy_page4_4c(struct vm_page *, struct vm_page *);
318: void pmap_zero_page4_4c(struct vm_page *);
319: void pmap_changeprot4_4c(pmap_t, vaddr_t, vm_prot_t, int);
320: #endif
321:
322: /* SIMILAR DECLARATIONS FOR SUN4M MODULE */
323:
324: #if defined(SUN4M)
325: boolean_t pmap_clear_modify4m(struct vm_page *);
326: boolean_t pmap_clear_reference4m(struct vm_page *);
327: int pmap_enter4m(pmap_t, vaddr_t, paddr_t, vm_prot_t, int);
328: boolean_t pmap_extract4m(pmap_t, vaddr_t, paddr_t *);
329: boolean_t pmap_is_modified4m(struct vm_page *);
330: boolean_t pmap_is_referenced4m(struct vm_page *);
331: void pmap_kenter_pa4m(vaddr_t, paddr_t, vm_prot_t);
332: void pmap_page_protect4m(struct vm_page *, vm_prot_t);
333: void pmap_protect4m(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
334: void pmap_copy_page4m(struct vm_page *, struct vm_page *);
335: void pmap_zero_page4m(struct vm_page *);
336: void pmap_changeprot4m(pmap_t, vaddr_t, vm_prot_t, int);
337: #endif /* defined SUN4M */
338:
339: #if !defined(SUN4M) && (defined(SUN4) || defined(SUN4C))
340:
341: #define pmap_clear_modify pmap_clear_modify4_4c
342: #define pmap_clear_reference pmap_clear_reference4_4c
343: #define pmap_copy_page pmap_copy_page4_4c
344: #define pmap_enter pmap_enter4_4c
345: #define pmap_extract pmap_extract4_4c
346: #define pmap_is_modified pmap_is_modified4_4c
347: #define pmap_is_referenced pmap_is_referenced4_4c
348: #define pmap_kenter_pa pmap_kenter_pa4_4c
349: #define pmap_page_protect pmap_page_protect4_4c
350: #define pmap_protect pmap_protect4_4c
351: #define pmap_zero_page pmap_zero_page4_4c
352: #define pmap_changeprot pmap_changeprot4_4c
353:
354: #elif defined(SUN4M) && !(defined(SUN4) || defined(SUN4C))
355:
356: #define pmap_clear_modify pmap_clear_modify4m
357: #define pmap_clear_reference pmap_clear_reference4m
358: #define pmap_copy_page pmap_copy_page4m
359: #define pmap_enter pmap_enter4m
360: #define pmap_extract pmap_extract4m
361: #define pmap_is_modified pmap_is_modified4m
362: #define pmap_is_referenced pmap_is_referenced4m
363: #define pmap_kenter_pa pmap_kenter_pa4m
364: #define pmap_page_protect pmap_page_protect4m
365: #define pmap_protect pmap_protect4m
366: #define pmap_zero_page pmap_zero_page4m
367: #define pmap_changeprot pmap_changeprot4m
368:
369: #else /* must use function pointers */
370:
371: extern boolean_t (*pmap_clear_modify_p)(struct vm_page *);
372: extern boolean_t (*pmap_clear_reference_p)(struct vm_page *);
373: extern int (*pmap_enter_p)(pmap_t, vaddr_t, paddr_t,
374: vm_prot_t, int);
375: extern boolean_t (*pmap_extract_p)(pmap_t, vaddr_t, paddr_t *);
376: extern boolean_t (*pmap_is_modified_p)(struct vm_page *);
377: extern boolean_t (*pmap_is_referenced_p)(struct vm_page *);
378: extern void (*pmap_kenter_pa_p)(vaddr_t, paddr_t, vm_prot_t);
379: extern void (*pmap_page_protect_p)(struct vm_page *,
380: vm_prot_t);
381: extern void (*pmap_protect_p)(pmap_t, vaddr_t, vaddr_t,
382: vm_prot_t);
383: extern void (*pmap_copy_page_p)(struct vm_page *, struct vm_page *);
384: extern void (*pmap_zero_page_p)(struct vm_page *);
385: extern void (*pmap_changeprot_p)(pmap_t, vaddr_t,
386: vm_prot_t, int);
387:
388: #define pmap_clear_modify (*pmap_clear_modify_p)
389: #define pmap_clear_reference (*pmap_clear_reference_p)
390: #define pmap_copy_page (*pmap_copy_page_p)
391: #define pmap_enter (*pmap_enter_p)
392: #define pmap_extract (*pmap_extract_p)
393: #define pmap_is_modified (*pmap_is_modified_p)
394: #define pmap_is_referenced (*pmap_is_referenced_p)
395: #define pmap_kenter_pa (*pmap_kenter_pa_p)
396: #define pmap_page_protect (*pmap_page_protect_p)
397: #define pmap_protect (*pmap_protect_p)
398: #define pmap_zero_page (*pmap_zero_page_p)
399: #define pmap_changeprot (*pmap_changeprot_p)
400:
401: #endif
402:
403: #endif /* _KERNEL */
404:
405: #endif /* _SPARC_PMAP_H_ */
CVSweb