Annotation of sys/arch/hppa/hppa/vm_machdep.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: vm_machdep.c,v 1.61 2007/06/20 17:29:34 miod Exp $ */
2:
3: /*
4: * Copyright (c) 1999-2004 Michael Shalayeff
5: * All rights reserved.
6: *
7: * Redistribution and use in source and binary forms, with or without
8: * modification, are permitted provided that the following conditions
9: * are met:
10: * 1. Redistributions of source code must retain the above copyright
11: * notice, this list of conditions and the following disclaimer.
12: * 2. Redistributions in binary form must reproduce the above copyright
13: * notice, this list of conditions and the following disclaimer in the
14: * documentation and/or other materials provided with the distribution.
15: *
16: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19: * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20: * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21: * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22: * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24: * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25: * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26: * THE POSSIBILITY OF SUCH DAMAGE.
27: */
28:
29:
30: #include <sys/param.h>
31: #include <sys/systm.h>
32: #include <sys/proc.h>
33: #include <sys/signalvar.h>
34: #include <sys/malloc.h>
35: #include <sys/buf.h>
36: #include <sys/vnode.h>
37: #include <sys/user.h>
38: #include <sys/ptrace.h>
39: #include <sys/exec.h>
40: #include <sys/core.h>
41:
42: #include <machine/cpufunc.h>
43: #include <machine/pmap.h>
44: #include <machine/pcb.h>
45:
46: #include <uvm/uvm.h>
47:
48:
49: /*
50: * Dump the machine specific header information at the start of a core dump.
51: */
52: int
53: cpu_coredump(p, vp, cred, core)
54: struct proc *p;
55: struct vnode *vp;
56: struct ucred *cred;
57: struct core *core;
58: {
59: struct md_coredump md_core;
60: struct coreseg cseg;
61: off_t off;
62: int error;
63:
64: CORE_SETMAGIC(*core, COREMAGIC, MID_HPPA, 0);
65: core->c_hdrsize = ALIGN(sizeof(*core));
66: core->c_seghdrsize = ALIGN(sizeof(cseg));
67: core->c_cpusize = sizeof(md_core);
68:
69: process_read_regs(p, &md_core.md_reg);
70: process_read_fpregs(p, &md_core.md_fpreg);
71:
72: CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_HPPA, CORE_CPU);
73: cseg.c_addr = 0;
74: cseg.c_size = core->c_cpusize;
75:
76: #define write(vp, addr, n) \
77: vn_rdwr(UIO_WRITE, (vp), (caddr_t)(addr), (n), off, \
78: UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, NULL, p)
79:
80: off = core->c_hdrsize;
81: if ((error = write(vp, &cseg, core->c_seghdrsize)))
82: return error;
83: off += core->c_seghdrsize;
84: if ((error = write(vp, &md_core, sizeof md_core)))
85: return error;
86:
87: #undef write
88: core->c_nseg++;
89:
90: return error;
91: }
92:
93: void
94: cpu_fork(p1, p2, stack, stacksize, func, arg)
95: struct proc *p1, *p2;
96: void *stack;
97: size_t stacksize;
98: void (*func)(void *);
99: void *arg;
100: {
101: extern paddr_t fpu_curpcb; /* from locore.S */
102: extern u_int fpu_enable;
103: struct pcb *pcbp;
104: struct trapframe *tf;
105: register_t sp, osp;
106: paddr_t pa;
107:
108: #ifdef DIAGNOSTIC
109: if (round_page(sizeof(struct user)) > NBPG)
110: panic("USPACE too small for user");
111: #endif
112: if (p1->p_md.md_regs->tf_cr30 == fpu_curpcb) {
113: mtctl(fpu_enable, CR_CCR);
114: fpu_save(fpu_curpcb);
115: mtctl(0, CR_CCR);
116: }
117:
118: pcbp = &p2->p_addr->u_pcb;
119: bcopy(&p1->p_addr->u_pcb, pcbp, sizeof(*pcbp));
120: /* space is cached for the copy{in,out}'s pleasure */
121: pcbp->pcb_space = p2->p_vmspace->vm_map.pmap->pm_space;
122: pcbp->pcb_uva = (vaddr_t)p2->p_addr;
123: /* reset any of the pending FPU exceptions from parent */
124: pcbp->pcb_fpregs[0] = HPPA_FPU_FORK(pcbp->pcb_fpregs[0]);
125: pcbp->pcb_fpregs[1] = 0;
126: pcbp->pcb_fpregs[2] = 0;
127: pcbp->pcb_fpregs[3] = 0;
128:
129: sp = (register_t)p2->p_addr + NBPG;
130: p2->p_md.md_regs = tf = (struct trapframe *)sp;
131: sp += sizeof(struct trapframe);
132: bcopy(p1->p_md.md_regs, tf, sizeof(*tf));
133:
134: /*
135: * Stash the physical for the pcb of U for later perusal
136: */
137: if (!pmap_extract(pmap_kernel(), (vaddr_t)p2->p_addr, &pa))
138: panic("pmap_extract(%p) failed", p2->p_addr);
139:
140: tf->tf_cr30 = pa;
141:
142: tf->tf_sr0 = tf->tf_sr1 = tf->tf_sr2 = tf->tf_sr3 =
143: tf->tf_sr4 = tf->tf_sr5 = tf->tf_sr6 =
144: tf->tf_iisq_head = tf->tf_iisq_tail =
145: p2->p_vmspace->vm_map.pmap->pm_space;
146: tf->tf_pidr1 = tf->tf_pidr2 = pmap_sid2pid(tf->tf_sr0);
147:
148: /*
149: * theoretically these could be inherited from the father,
150: * but just in case.
151: */
152: tf->tf_sr7 = HPPA_SID_KERNEL;
153: mfctl(CR_EIEM, tf->tf_eiem);
154: tf->tf_ipsw = PSL_C | PSL_Q | PSL_P | PSL_D | PSL_I /* | PSL_L */;
155:
156: /*
157: * If specified, give the child a different stack.
158: */
159: if (stack != NULL)
160: tf->tf_sp = (register_t)stack;
161:
162: /*
163: * Build stack frames for the cpu_switch & co.
164: */
165: osp = sp + HPPA_FRAME_SIZE;
166: *(register_t*)(osp - HPPA_FRAME_SIZE) = 0;
167: *(register_t*)(osp + HPPA_FRAME_CRP) = (register_t)&switch_trampoline;
168: *(register_t*)(osp + HPPA_FRAME_SL) = 0; /* cpl */
169: *(register_t*)(osp) = (osp - HPPA_FRAME_SIZE);
170:
171: sp = osp + HPPA_FRAME_SIZE + 20*4; /* frame + calee-save registers */
172: *HPPA_FRAME_CARG(0, sp) = (register_t)arg;
173: *HPPA_FRAME_CARG(1, sp) = KERNMODE(func);
174: pcbp->pcb_ksp = sp;
175: fdcache(HPPA_SID_KERNEL, (vaddr_t)p2->p_addr, sp - (vaddr_t)p2->p_addr);
176: }
177:
178: void
179: cpu_exit(p)
180: struct proc *p;
181: {
182: extern paddr_t fpu_curpcb; /* from locore.S */
183: struct trapframe *tf = p->p_md.md_regs;
184:
185: if (fpu_curpcb == tf->tf_cr30) {
186: fpu_exit();
187: fpu_curpcb = 0;
188: }
189:
190: exit2(p);
191: cpu_switch(p);
192: }
193:
194: void
195: cpu_wait(p)
196: struct proc *p;
197: {
198: }
199:
200: /*
201: * Map an IO request into kernel virtual address space.
202: */
203: void
204: vmapbuf(bp, len)
205: struct buf *bp;
206: vsize_t len;
207: {
208: struct pmap *pm = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
209: vaddr_t kva, uva;
210: vsize_t size, off;
211:
212: #ifdef DIAGNOSTIC
213: if ((bp->b_flags & B_PHYS) == 0)
214: panic("vmapbuf");
215: #endif
216: bp->b_saveaddr = bp->b_data;
217: uva = trunc_page((vaddr_t)bp->b_data);
218: off = (vaddr_t)bp->b_data - uva;
219: size = round_page(off + len);
220:
221: /*
222: * We do it on our own here to be able to specify an offset to uvm_map
223: * so that we can get all benefits of PMAP_PREFER.
224: * - art@
225: */
226: kva = uvm_km_valloc_prefer_wait(phys_map, size, uva);
227: fdcache(pm->pm_space, uva, size);
228: bp->b_data = (caddr_t)(kva + off);
229: while (size > 0) {
230: paddr_t pa;
231:
232: if (pmap_extract(pm, uva, &pa) == FALSE)
233: panic("vmapbuf: null page frame");
234: else
235: pmap_kenter_pa(kva, pa, UVM_PROT_RW);
236: uva += PAGE_SIZE;
237: kva += PAGE_SIZE;
238: size -= PAGE_SIZE;
239: }
240: pmap_update(pmap_kernel());
241: }
242:
243: /*
244: * Unmap IO request from the kernel virtual address space.
245: */
246: void
247: vunmapbuf(bp, len)
248: struct buf *bp;
249: vsize_t len;
250: {
251: vaddr_t addr, off;
252:
253: #ifdef DIAGNOSTIC
254: if ((bp->b_flags & B_PHYS) == 0)
255: panic("vunmapbuf");
256: #endif
257: addr = trunc_page((vaddr_t)bp->b_data);
258: off = (vaddr_t)bp->b_data - addr;
259: len = round_page(off + len);
260: pmap_kremove(addr, len);
261: pmap_update(pmap_kernel());
262: uvm_km_free_wakeup(phys_map, addr, len);
263: bp->b_data = bp->b_saveaddr;
264: bp->b_saveaddr = NULL;
265: }
CVSweb