Annotation of sys/uvm/uvm_map.h, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: uvm_map.h,v 1.38 2007/04/11 12:10:42 art Exp $ */
2: /* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */
3:
4: /*
5: * Copyright (c) 1997 Charles D. Cranor and Washington University.
6: * Copyright (c) 1991, 1993, The Regents of the University of California.
7: *
8: * All rights reserved.
9: *
10: * This code is derived from software contributed to Berkeley by
11: * The Mach Operating System project at Carnegie-Mellon University.
12: *
13: * Redistribution and use in source and binary forms, with or without
14: * modification, are permitted provided that the following conditions
15: * are met:
16: * 1. Redistributions of source code must retain the above copyright
17: * notice, this list of conditions and the following disclaimer.
18: * 2. Redistributions in binary form must reproduce the above copyright
19: * notice, this list of conditions and the following disclaimer in the
20: * documentation and/or other materials provided with the distribution.
21: * 3. All advertising materials mentioning features or use of this software
22: * must display the following acknowledgement:
23: * This product includes software developed by Charles D. Cranor,
24: * Washington University, the University of California, Berkeley and
25: * its contributors.
26: * 4. Neither the name of the University nor the names of its contributors
27: * may be used to endorse or promote products derived from this software
28: * without specific prior written permission.
29: *
30: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40: * SUCH DAMAGE.
41: *
42: * @(#)vm_map.h 8.3 (Berkeley) 3/15/94
43: * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp
44: *
45: *
46: * Copyright (c) 1987, 1990 Carnegie-Mellon University.
47: * All rights reserved.
48: *
49: * Permission to use, copy, modify and distribute this software and
50: * its documentation is hereby granted, provided that both the copyright
51: * notice and this permission notice appear in all copies of the
52: * software, derivative works or modified versions, and any portions
53: * thereof, and that both notices appear in supporting documentation.
54: *
55: * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56: * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57: * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58: *
59: * Carnegie Mellon requests users of this software to return to
60: *
61: * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
62: * School of Computer Science
63: * Carnegie Mellon University
64: * Pittsburgh PA 15213-3890
65: *
66: * any improvements or extensions that they make and grant Carnegie the
67: * rights to redistribute these changes.
68: */
69:
70: #ifndef _UVM_UVM_MAP_H_
71: #define _UVM_UVM_MAP_H_
72:
73: #include <sys/rwlock.h>
74:
75: #ifdef _KERNEL
76:
77: /*
78: * UVM_MAP_CLIP_START: ensure that the entry begins at or after
79: * the starting address, if it doesn't we split the entry.
80: *
81: * => map must be locked by caller
82: */
83:
84: #define UVM_MAP_CLIP_START(MAP,ENTRY,VA) { \
85: if ((VA) > (ENTRY)->start) uvm_map_clip_start(MAP,ENTRY,VA); }
86:
87: /*
88: * UVM_MAP_CLIP_END: ensure that the entry ends at or before
89: * the ending address, if it does't we split the entry.
90: *
91: * => map must be locked by caller
92: */
93:
94: #define UVM_MAP_CLIP_END(MAP,ENTRY,VA) { \
95: if ((VA) < (ENTRY)->end) uvm_map_clip_end(MAP,ENTRY,VA); }
96:
97: /*
98: * extract flags
99: */
100: #define UVM_EXTRACT_REMOVE 0x1 /* remove mapping from old map */
101: #define UVM_EXTRACT_CONTIG 0x2 /* try to keep it contig */
102: #define UVM_EXTRACT_QREF 0x4 /* use quick refs */
103: #define UVM_EXTRACT_FIXPROT 0x8 /* set prot to maxprot as we go */
104:
105: #endif /* _KERNEL */
106:
107: #include <uvm/uvm_anon.h>
108:
109: /*
110: * types defined:
111: *
112: * vm_map_t the high-level address map data structure.
113: * vm_map_entry_t an entry in an address map.
114: * vm_map_version_t a timestamp of a map, for use with vm_map_lookup
115: */
116:
117: /*
118: * Objects which live in maps may be either VM objects, or another map
119: * (called a "sharing map") which denotes read-write sharing with other maps.
120: *
121: * XXXCDC: private pager data goes here now
122: */
123:
124: union vm_map_object {
125: struct uvm_object *uvm_obj; /* UVM OBJECT */
126: struct vm_map *sub_map; /* belongs to another map */
127: };
128:
129: /*
130: * Address map entries consist of start and end addresses,
131: * a VM object (or sharing map) and offset into that object,
132: * and user-exported inheritance and protection information.
133: * Also included is control information for virtual copy operations.
134: */
135: struct vm_map_entry {
136: RB_ENTRY(vm_map_entry) rb_entry; /* tree information */
137: vaddr_t ownspace; /* free space after */
138: vaddr_t space; /* space in subtree */
139: struct vm_map_entry *prev; /* previous entry */
140: struct vm_map_entry *next; /* next entry */
141: vaddr_t start; /* start address */
142: vaddr_t end; /* end address */
143: union vm_map_object object; /* object I point to */
144: voff_t offset; /* offset into object */
145: int etype; /* entry type */
146: vm_prot_t protection; /* protection code */
147: vm_prot_t max_protection; /* maximum protection */
148: vm_inherit_t inheritance; /* inheritance */
149: int wired_count; /* can be paged if == 0 */
150: struct vm_aref aref; /* anonymous overlay */
151: int advice; /* madvise advice */
152: #define uvm_map_entry_stop_copy flags
153: u_int8_t flags; /* flags */
154:
155: #define UVM_MAP_STATIC 0x01 /* static map entry */
156: #define UVM_MAP_KMEM 0x02 /* from kmem entry pool */
157:
158: };
159:
160: #define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0)
161:
162: /*
163: * Maps are doubly-linked lists of map entries, kept sorted
164: * by address. A single hint is provided to start
165: * searches again from the last successful search,
166: * insertion, or removal.
167: *
168: * LOCKING PROTOCOL NOTES:
169: * -----------------------
170: *
171: * VM map locking is a little complicated. There are both shared
172: * and exclusive locks on maps. However, it is sometimes required
173: * to downgrade an exclusive lock to a shared lock, and upgrade to
174: * an exclusive lock again (to perform error recovery). However,
175: * another thread *must not* queue itself to receive an exclusive
176: * lock while before we upgrade back to exclusive, otherwise the
177: * error recovery becomes extremely difficult, if not impossible.
178: *
179: * In order to prevent this scenario, we introduce the notion of
180: * a `busy' map. A `busy' map is read-locked, but other threads
181: * attempting to write-lock wait for this flag to clear before
182: * entering the lock manager. A map may only be marked busy
183: * when the map is write-locked (and then the map must be downgraded
184: * to read-locked), and may only be marked unbusy by the thread
185: * which marked it busy (holding *either* a read-lock or a
186: * write-lock, the latter being gained by an upgrade).
187: *
188: * Access to the map `flags' member is controlled by the `flags_lock'
189: * simple lock. Note that some flags are static (set once at map
190: * creation time, and never changed), and thus require no locking
191: * to check those flags. All flags which are r/w must be set or
192: * cleared while the `flags_lock' is asserted. Additional locking
193: * requirements are:
194: *
195: * VM_MAP_PAGEABLE r/o static flag; no locking required
196: *
197: * VM_MAP_INTRSAFE r/o static flag; no locking required
198: *
199: * VM_MAP_WIREFUTURE r/w; may only be set or cleared when
200: * map is write-locked. may be tested
201: * without asserting `flags_lock'.
202: *
203: * VM_MAP_BUSY r/w; may only be set when map is
204: * write-locked, may only be cleared by
205: * thread which set it, map read-locked
206: * or write-locked. must be tested
207: * while `flags_lock' is asserted.
208: *
209: * VM_MAP_WANTLOCK r/w; may only be set when the map
210: * is busy, and thread is attempting
211: * to write-lock. must be tested
212: * while `flags_lock' is asserted.
213: */
214: struct vm_map {
215: struct pmap * pmap; /* Physical map */
216: struct rwlock lock; /* Lock for map data */
217: RB_HEAD(uvm_tree, vm_map_entry) rbhead; /* Tree for entries */
218: struct vm_map_entry header; /* List of entries */
219: int nentries; /* Number of entries */
220: vsize_t size; /* virtual size */
221: int ref_count; /* Reference count */
222: simple_lock_data_t ref_lock; /* Lock for ref_count field */
223: vm_map_entry_t hint; /* hint for quick lookups */
224: simple_lock_data_t hint_lock; /* lock for hint storage */
225: vm_map_entry_t first_free; /* First free space hint */
226: int flags; /* flags */
227: unsigned int timestamp; /* Version number */
228: #define min_offset header.start
229: #define max_offset header.end
230: };
231:
232: /* vm_map flags */
233: #define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */
234: #define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */
235: #define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */
236: #define VM_MAP_BUSY 0x08 /* rw: map is busy */
237: #define VM_MAP_WANTLOCK 0x10 /* rw: want to write-lock */
238:
239: /* XXX: number of kernel maps and entries to statically allocate */
240:
241: #if !defined(MAX_KMAPENT)
242: #define MAX_KMAPENT 1024 /* XXXCDC: no crash */
243: #endif /* !defined MAX_KMAPENT */
244:
245: #ifdef _KERNEL
246: #define vm_map_modflags(map, set, clear) \
247: do { \
248: (map)->flags = ((map)->flags | (set)) & ~(clear); \
249: } while (0)
250: #endif /* _KERNEL */
251:
252: /*
253: * Interrupt-safe maps must also be kept on a special list,
254: * to assist uvm_fault() in avoiding locking problems.
255: */
256: struct vm_map_intrsafe {
257: struct vm_map vmi_map;
258: LIST_ENTRY(vm_map_intrsafe) vmi_list;
259: };
260:
261: /*
262: * handle inline options
263: */
264:
265: #ifdef UVM_MAP_INLINE
266: #define MAP_INLINE static __inline
267: #else
268: #define MAP_INLINE /* nothing */
269: #endif /* UVM_MAP_INLINE */
270:
271: /*
272: * globals:
273: */
274:
275: #ifdef _KERNEL
276:
277: #ifdef PMAP_GROWKERNEL
278: extern vaddr_t uvm_maxkaddr;
279: #endif
280:
281: /*
282: * protos: the following prototypes define the interface to vm_map
283: */
284:
285: MAP_INLINE
286: void uvm_map_deallocate(vm_map_t);
287:
288: int uvm_map_clean(vm_map_t, vaddr_t, vaddr_t, int);
289: void uvm_map_clip_start(vm_map_t, vm_map_entry_t, vaddr_t);
290: void uvm_map_clip_end(vm_map_t, vm_map_entry_t, vaddr_t);
291: MAP_INLINE
292: vm_map_t uvm_map_create(pmap_t, vaddr_t, vaddr_t, int);
293: int uvm_map_extract(vm_map_t, vaddr_t, vsize_t,
294: vm_map_t, vaddr_t *, int);
295: vm_map_entry_t uvm_map_findspace(vm_map_t, vaddr_t, vsize_t, vaddr_t *,
296: struct uvm_object *, voff_t, vsize_t, int);
297: vaddr_t uvm_map_hint(struct proc *, vm_prot_t);
298: int uvm_map_inherit(vm_map_t, vaddr_t, vaddr_t, vm_inherit_t);
299: int uvm_map_advice(vm_map_t, vaddr_t, vaddr_t, int);
300: void uvm_map_init(void);
301: boolean_t uvm_map_lookup_entry(vm_map_t, vaddr_t, vm_map_entry_t *);
302: MAP_INLINE
303: void uvm_map_reference(vm_map_t);
304: int uvm_map_replace(vm_map_t, vaddr_t, vaddr_t,
305: vm_map_entry_t, int);
306: int uvm_map_reserve(vm_map_t, vsize_t, vaddr_t, vsize_t,
307: vaddr_t *);
308: void uvm_map_setup(vm_map_t, vaddr_t, vaddr_t, int);
309: int uvm_map_submap(vm_map_t, vaddr_t, vaddr_t, vm_map_t);
310: #define uvm_unmap(_m, _s, _e) uvm_unmap_p(_m, _s, _e, 0)
311: MAP_INLINE
312: void uvm_unmap_p(vm_map_t, vaddr_t, vaddr_t, struct proc *);
313: void uvm_unmap_detach(vm_map_entry_t,int);
314: void uvm_unmap_remove(vm_map_t, vaddr_t, vaddr_t,
315: vm_map_entry_t *, struct proc *);
316:
317: #endif /* _KERNEL */
318:
319: /*
320: * VM map locking operations:
321: *
322: * These operations perform locking on the data portion of the
323: * map.
324: *
325: * vm_map_lock_try: try to lock a map, failing if it is already locked.
326: *
327: * vm_map_lock: acquire an exclusive (write) lock on a map.
328: *
329: * vm_map_lock_read: acquire a shared (read) lock on a map.
330: *
331: * vm_map_unlock: release an exclusive lock on a map.
332: *
333: * vm_map_unlock_read: release a shared lock on a map.
334: *
335: * vm_map_downgrade: downgrade an exclusive lock to a shared lock.
336: *
337: * vm_map_upgrade: upgrade a shared lock to an exclusive lock.
338: *
339: * vm_map_busy: mark a map as busy.
340: *
341: * vm_map_unbusy: clear busy status on a map.
342: *
343: * Note that "intrsafe" maps use only exclusive, spin locks. We simply
344: * use the sleep lock's interlock for this.
345: */
346:
347: #ifdef _KERNEL
348: /* XXX: clean up later */
349: #include <sys/time.h>
350: #include <sys/proc.h> /* for tsleep(), wakeup() */
351: #include <sys/systm.h> /* for panic() */
352:
353: static __inline boolean_t vm_map_lock_try(vm_map_t);
354: static __inline void vm_map_lock(vm_map_t);
355: extern const char vmmapbsy[];
356:
357: static __inline boolean_t
358: vm_map_lock_try(struct vm_map *map)
359: {
360: boolean_t rv;
361:
362: if (map->flags & VM_MAP_INTRSAFE) {
363: rv = TRUE;
364: } else {
365: if (map->flags & VM_MAP_BUSY) {
366: return (FALSE);
367: }
368: rv = (rw_enter(&map->lock, RW_WRITE|RW_NOSLEEP) == 0);
369: }
370:
371: if (rv)
372: map->timestamp++;
373:
374: return (rv);
375: }
376:
377: static __inline void
378: vm_map_lock(struct vm_map *map)
379: {
380: if (map->flags & VM_MAP_INTRSAFE)
381: return;
382:
383: do {
384: while (map->flags & VM_MAP_BUSY) {
385: map->flags |= VM_MAP_WANTLOCK;
386: tsleep(&map->flags, PVM, (char *)vmmapbsy, 0);
387: }
388: } while (rw_enter(&map->lock, RW_WRITE|RW_SLEEPFAIL) != 0);
389:
390: map->timestamp++;
391: }
392:
393: #define vm_map_lock_read(map) rw_enter_read(&(map)->lock)
394:
395: #define vm_map_unlock(map) \
396: do { \
397: if (((map)->flags & VM_MAP_INTRSAFE) == 0) \
398: rw_exit(&(map)->lock); \
399: } while (0)
400:
401: #define vm_map_unlock_read(map) rw_exit_read(&(map)->lock)
402:
403: #define vm_map_downgrade(map) rw_enter(&(map)->lock, RW_DOWNGRADE)
404:
405: #define vm_map_upgrade(map) \
406: do { \
407: rw_exit_read(&(map)->lock); \
408: rw_enter_write(&(map)->lock); \
409: } while (0)
410:
411: #define vm_map_busy(map) \
412: do { \
413: (map)->flags |= VM_MAP_BUSY; \
414: } while (0)
415:
416: #define vm_map_unbusy(map) \
417: do { \
418: int oflags; \
419: \
420: oflags = (map)->flags; \
421: (map)->flags &= ~(VM_MAP_BUSY|VM_MAP_WANTLOCK); \
422: if (oflags & VM_MAP_WANTLOCK) \
423: wakeup(&(map)->flags); \
424: } while (0)
425: #endif /* _KERNEL */
426:
427: /*
428: * Functions implemented as macros
429: */
430: #define vm_map_min(map) ((map)->min_offset)
431: #define vm_map_max(map) ((map)->max_offset)
432: #define vm_map_pmap(map) ((map)->pmap)
433:
434: #endif /* _UVM_UVM_MAP_H_ */
CVSweb