Annotation of sys/arch/arm/arm/vm_machdep.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: vm_machdep.c,v 1.6 2007/05/27 20:59:25 miod Exp $ */
2: /* $NetBSD: vm_machdep.c,v 1.31 2004/01/04 11:33:29 jdolecek Exp $ */
3:
4: /*
5: * Copyright (c) 1994-1998 Mark Brinicombe.
6: * Copyright (c) 1994 Brini.
7: * All rights reserved.
8: *
9: * This code is derived from software written for Brini by Mark Brinicombe
10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
21: * This product includes software developed by Brini.
22: * 4. The name of the company nor the name of the author may be used to
23: * endorse or promote products derived from this software without specific
24: * prior written permission.
25: *
26: * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
27: * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
28: * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
29: * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
30: * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31: * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32: * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36: * SUCH DAMAGE.
37: *
38: * RiscBSD kernel project
39: *
40: * vm_machdep.h
41: *
42: * vm machine specific bits
43: *
44: * Created : 08/10/94
45: */
46:
47: #include <sys/param.h>
48: #include <sys/systm.h>
49: #include <sys/proc.h>
50: #include <sys/malloc.h>
51: #include <sys/vnode.h>
52: #include <sys/buf.h>
53: #if 0
54: #include <sys/pmc.h>
55: #endif
56: #include <sys/user.h>
57: #include <sys/exec.h>
58: #include <sys/syslog.h>
59:
60: #include <uvm/uvm_extern.h>
61:
62: #include <machine/cpu.h>
63: #include <machine/pmap.h>
64: #include <machine/reg.h>
65: #include <machine/vmparam.h>
66:
67: #ifdef ARMFPE
68: #include <arm/fpe-arm/armfpe.h>
69: #endif
70:
71: extern pv_addr_t systempage;
72:
73: int process_read_regs (struct proc *p, struct reg *regs);
74: int process_read_fpregs (struct proc *p, struct fpreg *regs);
75:
76: void switch_exit (struct proc *p, struct proc *p0,
77: void (*)(struct proc *));
78: extern void proc_trampoline (void);
79:
80: /*
81: * Special compilation symbols:
82: *
83: * STACKCHECKS - Fill undefined and supervisor stacks with a known pattern
84: * on forking and check the pattern on exit, reporting
85: * the amount of stack used.
86: */
87:
88: #if 0
89: void
90: cpu_proc_fork(p1, p2)
91: struct proc *p1, *p2;
92: {
93:
94: #if defined(PERFCTRS)
95: if (PMC_ENABLED(p1))
96: pmc_md_fork(p1, p2);
97: else {
98: p2->p_md.pmc_enabled = 0;
99: p2->p_md.pmc_state = NULL;
100: }
101: #endif
102: }
103: #endif
104:
105: /*
106: * Finish a fork operation, with process p2 nearly set up.
107: * Copy and update the pcb and trap frame, making the child ready to run.
108: *
109: * Rig the child's kernel stack so that it will start out in
110: * proc_trampoline() and call child_return() with p2 as an
111: * argument. This causes the newly-created child process to go
112: * directly to user level with an apparent return value of 0 from
113: * fork(), while the parent process returns normally.
114: *
115: * p1 is the process being forked; if p1 == &proc0, we are creating
116: * a kernel thread, and the return path and argument are specified with
117: * `func' and `arg'.
118: *
119: * If an alternate user-level stack is requested (with non-zero values
120: * in both the stack and stacksize args), set up the user stack pointer
121: * accordingly.
122: */
123: void
124: cpu_fork(p1, p2, stack, stacksize, func, arg)
125: struct proc *p1;
126: struct proc *p2;
127: void *stack;
128: size_t stacksize;
129: void (*func) (void *);
130: void *arg;
131: {
132: struct pcb *pcb = (struct pcb *)&p2->p_addr->u_pcb;
133: struct trapframe *tf;
134: struct switchframe *sf;
135:
136: #ifdef PMAP_DEBUG
137: if (pmap_debug_level >= 0)
138: printf("cpu_fork: %p %p %p %p\n", p1, p2, curlwp, &proc0);
139: #endif /* PMAP_DEBUG */
140:
141: #if 0 /* XXX */
142: if (l1 == curlwp) {
143: /* Sync the PCB before we copy it. */
144: savectx(curpcb);
145: }
146: #endif
147:
148: /* Copy the pcb */
149: *pcb = p1->p_addr->u_pcb;
150:
151: /*
152: * Set up the undefined stack for the process.
153: * Note: this stack is not in use if we are forking from p1
154: */
155: pcb->pcb_un.un_32.pcb32_und_sp = (u_int)p2->p_addr +
156: USPACE_UNDEF_STACK_TOP;
157: pcb->pcb_un.un_32.pcb32_sp = (u_int)p2->p_addr + USPACE_SVC_STACK_TOP;
158:
159: #ifdef STACKCHECKS
160: /* Fill the undefined stack with a known pattern */
161: memset(((u_char *)p2->p_addr) + USPACE_UNDEF_STACK_BOTTOM, 0xdd,
162: (USPACE_UNDEF_STACK_TOP - USPACE_UNDEF_STACK_BOTTOM));
163: /* Fill the kernel stack with a known pattern */
164: memset(((u_char *)p2->p_addr) + USPACE_SVC_STACK_BOTTOM, 0xdd,
165: (USPACE_SVC_STACK_TOP - USPACE_SVC_STACK_BOTTOM));
166: #endif /* STACKCHECKS */
167:
168: #ifdef PMAP_DEBUG
169: if (pmap_debug_level >= 0) {
170: printf("p1->procaddr=%p p1->procaddr->u_pcb=%p pid=%d pmap=%p\n",
171: p1->p_addr, &p1->p_addr->u_pcb, p1->p_lid,
172: p1->p_proc->p_vmspace->vm_map.pmap);
173: printf("p2->procaddr=%p p2->procaddr->u_pcb=%p pid=%d pmap=%p\n",
174: p2->p_addr, &p2->p_addr->u_pcb, p2->p_lid,
175: p2->p_proc->p_vmspace->vm_map.pmap);
176: }
177: #endif /* PMAP_DEBUG */
178:
179: pmap_activate(p2);
180:
181: #ifdef ARMFPE
182: /* Initialise a new FP context for p2 and copy the context from p1 */
183: arm_fpe_core_initcontext(FP_CONTEXT(p2));
184: arm_fpe_copycontext(FP_CONTEXT(p1), FP_CONTEXT(p2));
185: #endif /* ARMFPE */
186:
187: p2->p_addr->u_pcb.pcb_tf = tf =
188: (struct trapframe *)pcb->pcb_un.un_32.pcb32_sp - 1;
189: *tf = *p1->p_addr->u_pcb.pcb_tf;
190:
191: /*
192: * If specified, give the child a different stack.
193: */
194: if (stack != NULL)
195: tf->tf_usr_sp = (u_int)stack + stacksize;
196:
197: sf = (struct switchframe *)tf - 1;
198: sf->sf_r4 = (u_int)func;
199: sf->sf_r5 = (u_int)arg;
200: sf->sf_pc = (u_int)proc_trampoline;
201: pcb->pcb_un.un_32.pcb32_sp = (u_int)sf;
202: }
203:
204: #if 0
205: void
206: cpu_setfunc(struct proc *p, void (*func)(void *), void *arg)
207: {
208: struct pcb *pcb = &p->p_addr->u_pcb;
209: struct trapframe *tf = pcb->pcb_tf;
210: struct switchframe *sf = (struct switchframe *)tf - 1;
211:
212: sf->sf_r4 = (u_int)func;
213: sf->sf_r5 = (u_int)arg;
214: sf->sf_pc = (u_int)proc_trampoline;
215: pcb->pcb_un.un_32.pcb32_sp = (u_int)sf;
216: }
217: #endif
218:
219:
220: void
221: cpu_exit(struct proc *p)
222: {
223: pmap_update(p->p_vmspace->vm_map.pmap); /* XXX DSR help stability */
224: switch_exit(p, &proc0, exit2);
225: }
226:
227: /*
228: * Map a user I/O request into kernel virtual address space.
229: * Note: the pages are already locked by uvm_vslock(), so we
230: * do not need to pass an access_type to pmap_enter().
231: */
232: void
233: vmapbuf(bp, len)
234: struct buf *bp;
235: vsize_t len;
236: {
237: vaddr_t faddr, taddr, off;
238: paddr_t fpa;
239:
240:
241: #ifdef PMAP_DEBUG
242: if (pmap_debug_level >= 0)
243: printf("vmapbuf: bp=%08x buf=%08x len=%08x\n", (u_int)bp,
244: (u_int)bp->b_data, (u_int)len);
245: #endif /* PMAP_DEBUG */
246:
247: if ((bp->b_flags & B_PHYS) == 0)
248: panic("vmapbuf");
249:
250: faddr = trunc_page((vaddr_t)bp->b_saveaddr = bp->b_data);
251: off = (vaddr_t)bp->b_data - faddr;
252: len = round_page(off + len);
253: taddr = uvm_km_valloc_wait(phys_map, len);
254: bp->b_data = (caddr_t)(taddr + off);
255:
256: /*
257: * The region is locked, so we expect that pmap_pte() will return
258: * non-NULL.
259: */
260: while (len) {
261: (void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
262: faddr, &fpa);
263: pmap_enter(pmap_kernel(), taddr, fpa,
264: VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
265: faddr += PAGE_SIZE;
266: taddr += PAGE_SIZE;
267: len -= PAGE_SIZE;
268: }
269: pmap_update(pmap_kernel());
270: }
271:
272: /*
273: * Unmap a previously-mapped user I/O request.
274: */
275: void
276: vunmapbuf(bp, len)
277: struct buf *bp;
278: vsize_t len;
279: {
280: vaddr_t addr, off;
281:
282: #ifdef PMAP_DEBUG
283: if (pmap_debug_level >= 0)
284: printf("vunmapbuf: bp=%08x buf=%08x len=%08x\n",
285: (u_int)bp, (u_int)bp->b_data, (u_int)len);
286: #endif /* PMAP_DEBUG */
287:
288: if ((bp->b_flags & B_PHYS) == 0)
289: panic("vunmapbuf");
290:
291: /*
292: * Make sure the cache does not have dirty data for the
293: * pages we had mapped.
294: */
295: addr = trunc_page((vaddr_t)bp->b_data);
296: off = (vaddr_t)bp->b_data - addr;
297: len = round_page(off + len);
298:
299: pmap_remove(pmap_kernel(), addr, addr + len);
300: pmap_update(pmap_kernel());
301: uvm_km_free_wakeup(phys_map, addr, len);
302: bp->b_data = bp->b_saveaddr;
303: bp->b_saveaddr = 0;
304: }
305:
306: /* End of vm_machdep.c */
CVSweb