Annotation of sys/arch/sparc/sparc/trap.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: trap.c,v 1.52 2007/05/08 07:23:18 art Exp $ */
2: /* $NetBSD: trap.c,v 1.58 1997/09/12 08:55:01 pk Exp $ */
3:
4: /*
5: * Copyright (c) 1996
6: * The President and Fellows of Harvard College. All rights reserved.
7: * Copyright (c) 1992, 1993
8: * The Regents of the University of California. All rights reserved.
9: *
10: * This software was developed by the Computer Systems Engineering group
11: * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
12: * contributed to Berkeley.
13: *
14: * All advertising materials mentioning features or use of this software
15: * must display the following acknowledgement:
16: * This product includes software developed by the University of
17: * California, Lawrence Berkeley Laboratory.
18: * This product includes software developed by Harvard University.
19: *
20: * Redistribution and use in source and binary forms, with or without
21: * modification, are permitted provided that the following conditions
22: * are met:
23: * 1. Redistributions of source code must retain the above copyright
24: * notice, this list of conditions and the following disclaimer.
25: * 2. Redistributions in binary form must reproduce the above copyright
26: * notice, this list of conditions and the following disclaimer in the
27: * documentation and/or other materials provided with the distribution.
28: * 3. All advertising materials mentioning features or use of this software
29: * must display the following acknowledgement:
30: * This product includes software developed by the University of
31: * California, Berkeley and its contributors.
32: * This product includes software developed by Harvard University.
33: * 4. Neither the name of the University nor the names of its contributors
34: * may be used to endorse or promote products derived from this software
35: * without specific prior written permission.
36: *
37: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
38: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
42: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
43: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
44: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
45: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
46: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
47: * SUCH DAMAGE.
48: *
49: * @(#)trap.c 8.4 (Berkeley) 9/23/93
50: */
51:
52: #include <sys/param.h>
53: #include <sys/systm.h>
54: #include <sys/proc.h>
55: #include <sys/signalvar.h>
56: #include <sys/user.h>
57: #include <sys/kernel.h>
58: #include <sys/malloc.h>
59: #include <sys/resource.h>
60: #include <sys/signal.h>
61: #include <sys/wait.h>
62: #include <sys/syscall.h>
63: #include <sys/syslog.h>
64: #ifdef KTRACE
65: #include <sys/ktrace.h>
66: #endif
67:
68: #include "systrace.h"
69: #include <dev/systrace.h>
70:
71: #include <uvm/uvm_extern.h>
72:
73: #include <sparc/sparc/asm.h>
74: #include <machine/cpu.h>
75: #include <machine/ctlreg.h>
76: #include <machine/trap.h>
77: #include <machine/instr.h>
78: #include <machine/pmap.h>
79:
80: #ifdef DDB
81: #include <machine/db_machdep.h>
82: #else
83: #include <machine/frame.h>
84: #endif
85: #ifdef COMPAT_SVR4
86: #include <machine/svr4_machdep.h>
87: #endif
88:
89: #include <sparc/fpu/fpu_extern.h>
90: #include <sparc/sparc/memreg.h>
91: #include <sparc/sparc/cpuvar.h>
92:
93: #ifdef DEBUG
94: int rwindow_debug = 0;
95: #endif
96:
97: /*
98: * Initial FPU state is all registers == all 1s, everything else == all 0s.
99: * This makes every floating point register a signalling NaN, with sign bit
100: * set, no matter how it is interpreted. Appendix N of the Sparc V8 document
101: * seems to imply that we should do this, and it does make sense.
102: */
103: struct fpstate initfpstate = {
104: { ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0,
105: ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0 }
106: };
107:
108: /*
109: * There are more than 100 trap types, but most are unused.
110: *
111: * Trap type 0 is taken over as an `Asynchronous System Trap'.
112: * This is left-over Vax emulation crap that should be fixed.
113: *
114: * Note that some of the Sparc v8 traps are actually handled by
115: * the corresponding v7 routine, but listed here for completeness.
116: * The Fujitsu Turbo-Sparc Guide also alludes to several more
117: * unimplemented trap types, but doesn't give the nominal coding.
118: */
119: static const char T[] = "trap";
120: const char *trap_type[] = {
121: /* non-user vectors */
122: "ast", /* 0 */
123: "text fault", /* 1 */
124: "illegal instruction", /* 2 */
125: "privileged instruction",/*3 */
126: "fp disabled", /* 4 */
127: "window overflow", /* 5 */
128: "window underflow", /* 6 */
129: "alignment fault", /* 7 */
130: "fp exception", /* 8 */
131: "data fault", /* 9 */
132: "tag overflow", /* 0a */
133: "watchpoint", /* 0b */
134: T, T, T, T, T, /* 0c..10 */
135: "level 1 int", /* 11 */
136: "level 2 int", /* 12 */
137: "level 3 int", /* 13 */
138: "level 4 int", /* 14 */
139: "level 5 int", /* 15 */
140: "level 6 int", /* 16 */
141: "level 7 int", /* 17 */
142: "level 8 int", /* 18 */
143: "level 9 int", /* 19 */
144: "level 10 int", /* 1a */
145: "level 11 int", /* 1b */
146: "level 12 int", /* 1c */
147: "level 13 int", /* 1d */
148: "level 14 int", /* 1e */
149: "level 15 int", /* 1f */
150: "v8 r-reg error", /* 20 */
151: "v8 text error", /* 21 */
152: T, T, /* 22..23 */
153: "v8 cp disabled", /* 24 */
154: "v8 unimp flush", /* 25 */
155: T, T, /* 26..27 */
156: "v8 cp exception", /* 28 */
157: "v8 data error", /* 29 */
158: "v8 idiv by zero", /* 2a */
159: "v8 store error", /* 2b */
160: "v8 data access MMU miss",/* 2c */
161: T, T, T, /* 2d..2f */
162: T, T, T, T, T, T, T, T, /* 30..37 */
163: T, T, T, T, /* 38..3b */
164: "v8 insn access MMU miss",/* 3c */
165: T, T, T, /* 3d..3f */
166: T, T, T, T, T, T, T, T, /* 40..48 */
167: T, T, T, T, T, T, T, T, /* 48..4f */
168: T, T, T, T, T, T, T, T, /* 50..57 */
169: T, T, T, T, T, T, T, T, /* 58..5f */
170: T, T, T, T, T, T, T, T, /* 60..67 */
171: T, T, T, T, T, T, T, T, /* 68..6f */
172: T, T, T, T, T, T, T, T, /* 70..77 */
173: T, T, T, T, T, T, T, T, /* 78..7f */
174:
175: /* user (software trap) vectors */
176: "syscall", /* 80 */
177: "breakpoint", /* 81 */
178: "zero divide", /* 82 */
179: "flush windows", /* 83 */
180: "clean windows", /* 84 */
181: "range check", /* 85 */
182: "fix align", /* 86 */
183: "integer overflow", /* 87 */
184: "svr4 syscall", /* 88 */
185: "4.4 syscall", /* 89 */
186: "kgdb exec", /* 8a */
187: T, T, T, T, T, /* 8b..8f */
188: T, T, T, T, T, T, T, T, /* 9a..97 */
189: T, T, T, T, T, T, T, T, /* 98..9f */
190: "svr4 getcc", /* a0 */
191: "svr4 setcc", /* a1 */
192: "svr4 getpsr", /* a2 */
193: "svr4 setpsr", /* a3 */
194: "svr4 gethrtime", /* a4 */
195: "svr4 gethrvtime", /* a5 */
196: T, /* a6 */
197: "svr4 gethrestime", /* a7 */
198: };
199:
200: #define N_TRAP_TYPES (sizeof trap_type / sizeof *trap_type)
201:
202: static __inline void userret(struct proc *);
203: void trap(unsigned, int, int, struct trapframe *);
204: static __inline void share_fpu(struct proc *, struct trapframe *);
205: void mem_access_fault(unsigned, int, u_int, int, int, struct trapframe *);
206: void mem_access_fault4m(unsigned, u_int, u_int, struct trapframe *);
207: void syscall(register_t, struct trapframe *, register_t);
208:
209: int ignore_bogus_traps = 0;
210:
211: int want_ast = 0;
212: /*
213: * Define the code needed before returning to user mode, for
214: * trap, mem_access_fault, and syscall.
215: */
216: static __inline void
217: userret(struct proc *p)
218: {
219: int sig;
220:
221: /* take pending signals */
222: while ((sig = CURSIG(p)) != 0)
223: postsig(sig);
224:
225: p->p_cpu->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri;
226: }
227:
228: /*
229: * If someone stole the FPU while we were away, do not enable it
230: * on return. This is not done in userret() above as it must follow
231: * the ktrsysret() in syscall(). Actually, it is likely that the
232: * ktrsysret should occur before the call to userret.
233: */
234: static __inline void share_fpu(p, tf)
235: struct proc *p;
236: struct trapframe *tf;
237: {
238: if ((tf->tf_psr & PSR_EF) != 0 && cpuinfo.fpproc != p)
239: tf->tf_psr &= ~PSR_EF;
240: }
241:
242: /*
243: * Called from locore.s trap handling, for non-MMU-related traps.
244: * (MMU-related traps go through mem_access_fault, below.)
245: */
246: void
247: trap(type, psr, pc, tf)
248: unsigned type;
249: int psr, pc;
250: struct trapframe *tf;
251: {
252: struct proc *p;
253: struct pcb *pcb;
254: int n;
255: union sigval sv;
256:
257: sv.sival_int = pc; /* XXX fix for parm five of trapsignal() */
258:
259: /* This steps the PC over the trap. */
260: #define ADVANCE (n = tf->tf_npc, tf->tf_pc = n, tf->tf_npc = n + 4)
261:
262: uvmexp.traps++;
263: /*
264: * Generally, kernel traps cause a panic. Any exceptions are
265: * handled early here.
266: */
267: if (psr & PSR_PS) {
268: #ifdef DDB
269: if (type == T_BREAKPOINT) {
270: write_all_windows();
271: if (kdb_trap(type, tf)) {
272: return;
273: }
274: }
275: #endif
276: #ifdef DIAGNOSTIC
277: /*
278: * Currently, we allow DIAGNOSTIC kernel code to
279: * flush the windows to record stack traces.
280: */
281: if (type == T_FLUSHWIN) {
282: write_all_windows();
283: ADVANCE;
284: return;
285: }
286: #endif
287: /*
288: * Storing %fsr in cpu_attach will cause this trap
289: * even though the fpu has been enabled, if and only
290: * if there is no FPU.
291: */
292: if (type == T_FPDISABLED && cold) {
293: ADVANCE;
294: return;
295: }
296: dopanic:
297: printf("trap type 0x%x: pc=0x%x npc=0x%x psr=%b\n",
298: type, pc, tf->tf_npc, psr, PSR_BITS);
299: panic(type < N_TRAP_TYPES ? trap_type[type] : T);
300: /* NOTREACHED */
301: }
302: if ((p = curproc) == NULL)
303: p = &proc0;
304: pcb = &p->p_addr->u_pcb;
305: p->p_md.md_tf = tf; /* for ptrace/signals */
306:
307: switch (type) {
308:
309: default:
310: if (type < 0x80) {
311: if (!ignore_bogus_traps)
312: goto dopanic;
313: printf("trap type 0x%x: pc=0x%x npc=0x%x psr=%b\n",
314: type, pc, tf->tf_npc, psr, PSR_BITS);
315: trapsignal(p, SIGILL, type, ILL_ILLOPC, sv);
316: break;
317: }
318: #if defined(COMPAT_SVR4)
319: badtrap:
320: #endif
321: /* the following message is gratuitous */
322: /* ... but leave it in until we find anything */
323: printf("%s[%d]: unimplemented software trap 0x%x\n",
324: p->p_comm, p->p_pid, type);
325: trapsignal(p, SIGILL, type, ILL_ILLOPC, sv);
326: break;
327:
328: #ifdef COMPAT_SVR4
329: case T_SVR4_GETCC:
330: case T_SVR4_SETCC:
331: case T_SVR4_GETPSR:
332: case T_SVR4_SETPSR:
333: case T_SVR4_GETHRTIME:
334: case T_SVR4_GETHRVTIME:
335: case T_SVR4_GETHRESTIME:
336: if (!svr4_trap(type, p))
337: goto badtrap;
338: break;
339: #endif
340:
341: case T_AST:
342: want_ast = 0;
343: if (p->p_flag & P_OWEUPC) {
344: ADDUPROF(p);
345: }
346: if (want_resched)
347: preempt(NULL);
348: break;
349:
350: case T_ILLINST:
351: if ((n = emulinstr(pc, tf)) == 0) {
352: ADVANCE;
353: break;
354: }
355: trapsignal(p, SIGILL, 0, ILL_ILLOPC, sv);
356: break;
357:
358: case T_PRIVINST:
359: trapsignal(p, SIGILL, 0, ILL_PRVOPC, sv);
360: break;
361:
362: case T_FPDISABLED: {
363: struct fpstate *fs = p->p_md.md_fpstate;
364:
365: if (fs == NULL) {
366: fs = malloc(sizeof *fs, M_SUBPROC, M_WAITOK);
367: *fs = initfpstate;
368: p->p_md.md_fpstate = fs;
369: }
370: /*
371: * If we have not found an FPU, we have to emulate it.
372: */
373: if (!foundfpu) {
374: #ifdef notyet
375: fpu_emulate(p, tf, fs);
376: break;
377: #else
378: trapsignal(p, SIGFPE, 0, FPE_FLTINV, sv);
379: break;
380: #endif
381: }
382: /*
383: * We may have more FPEs stored up and/or ops queued.
384: * If they exist, handle them and get out. Otherwise,
385: * resolve the FPU state, turn it on, and try again.
386: */
387: if (fs->fs_qsize) {
388: fpu_cleanup(p, fs);
389: break;
390: }
391: if (cpuinfo.fpproc != p) { /* we do not have it */
392: if (cpuinfo.fpproc != NULL) /* someone else had it */
393: savefpstate(cpuinfo.fpproc->p_md.md_fpstate);
394: loadfpstate(fs);
395: cpuinfo.fpproc = p; /* now we do have it */
396: uvmexp.fpswtch++;
397: }
398: tf->tf_psr |= PSR_EF;
399: break;
400: }
401:
402: case T_WINOF:
403: if (rwindow_save(p))
404: sigexit(p, SIGILL);
405: break;
406:
407: #define read_rw(src, dst) \
408: copyin((caddr_t)(src), (caddr_t)(dst), sizeof(struct rwindow))
409:
410: case T_RWRET:
411: /*
412: * T_RWRET is a window load needed in order to rett.
413: * It simply needs the window to which tf->tf_out[6]
414: * (%sp) points. There are no user or saved windows now.
415: * Copy the one from %sp into pcb->pcb_rw[0] and set
416: * nsaved to -1. If we decide to deliver a signal on
417: * our way out, we will clear nsaved.
418: */
419: if (pcb->pcb_uw || pcb->pcb_nsaved)
420: panic("trap T_RWRET 1");
421: #ifdef DEBUG
422: if (rwindow_debug)
423: printf("%s[%d]: rwindow: pcb<-stack: 0x%x\n",
424: p->p_comm, p->p_pid, tf->tf_out[6]);
425: #endif
426: if (read_rw(tf->tf_out[6], &pcb->pcb_rw[0]))
427: sigexit(p, SIGILL);
428: if (pcb->pcb_nsaved)
429: panic("trap T_RWRET 2");
430: pcb->pcb_nsaved = -1; /* mark success */
431: break;
432:
433: case T_WINUF:
434: /*
435: * T_WINUF is a real window underflow, from a restore
436: * instruction. It needs to have the contents of two
437: * windows---the one belonging to the restore instruction
438: * itself, which is at its %sp, and the one belonging to
439: * the window above, which is at its %fp or %i6---both
440: * in the pcb. The restore's window may still be in
441: * the cpu; we need to force it out to the stack.
442: */
443: #ifdef DEBUG
444: if (rwindow_debug)
445: printf("%s[%d]: rwindow: T_WINUF 0: pcb<-stack: 0x%x\n",
446: p->p_comm, p->p_pid, tf->tf_out[6]);
447: #endif
448: write_user_windows();
449: if (rwindow_save(p) || read_rw(tf->tf_out[6], &pcb->pcb_rw[0]))
450: sigexit(p, SIGILL);
451: #ifdef DEBUG
452: if (rwindow_debug)
453: printf("%s[%d]: rwindow: T_WINUF 1: pcb<-stack: 0x%x\n",
454: p->p_comm, p->p_pid, pcb->pcb_rw[0].rw_in[6]);
455: #endif
456: if (read_rw(pcb->pcb_rw[0].rw_in[6], &pcb->pcb_rw[1]))
457: sigexit(p, SIGILL);
458: if (pcb->pcb_nsaved)
459: panic("trap T_WINUF");
460: pcb->pcb_nsaved = -1; /* mark success */
461: break;
462:
463: case T_ALIGN:
464: if ((p->p_md.md_flags & MDP_FIXALIGN) != 0 &&
465: fixalign(p, tf) == 0) {
466: ADVANCE;
467: break;
468: }
469: trapsignal(p, SIGBUS, 0, BUS_ADRALN, sv);
470: break;
471:
472: case T_FPE:
473: /*
474: * Clean up after a floating point exception.
475: * fpu_cleanup can (and usually does) modify the
476: * state we save here, so we must `give up' the FPU
477: * chip context. (The software and hardware states
478: * will not match once fpu_cleanup does its job, so
479: * we must not save again later.)
480: */
481: if (p != cpuinfo.fpproc)
482: panic("fpe without being the FP user");
483: savefpstate(p->p_md.md_fpstate);
484: cpuinfo.fpproc = NULL;
485: /* tf->tf_psr &= ~PSR_EF; */ /* share_fpu will do this */
486: fpu_cleanup(p, p->p_md.md_fpstate);
487: /* fpu_cleanup posts signals if needed */
488: #if 0 /* ??? really never??? */
489: ADVANCE;
490: #endif
491: break;
492:
493: case T_TAGOF:
494: trapsignal(p, SIGEMT, 0, EMT_TAGOVF, sv);
495: break;
496:
497: case T_CPDISABLED:
498: uprintf("coprocessor instruction\n"); /* XXX */
499: trapsignal(p, SIGILL, 0, ILL_COPROC, sv);
500: break;
501:
502: case T_BREAKPOINT:
503: trapsignal(p, SIGTRAP, 0, TRAP_BRKPT, sv);
504: break;
505:
506: case T_DIV0:
507: case T_IDIV0:
508: ADVANCE;
509: trapsignal(p, SIGFPE, 0, FPE_INTDIV, sv);
510: break;
511:
512: case T_FLUSHWIN:
513: write_user_windows();
514: #ifdef probably_slower_since_this_is_usually_false
515: if (pcb->pcb_nsaved && rwindow_save(p))
516: sigexit(p, SIGILL);
517: #endif
518: ADVANCE;
519: break;
520:
521: case T_CLEANWIN:
522: uprintf("T_CLEANWIN\n"); /* XXX */
523: ADVANCE;
524: break;
525:
526: case T_RANGECHECK:
527: uprintf("T_RANGECHECK\n"); /* XXX */
528: ADVANCE;
529: trapsignal(p, SIGILL, 0, ILL_ILLOPN, sv);
530: break;
531:
532: case T_FIXALIGN:
533: #ifdef DEBUG_ALIGN
534: uprintf("T_FIXALIGN\n");
535: #endif
536: /* User wants us to fix alignment faults */
537: p->p_md.md_flags |= MDP_FIXALIGN;
538: ADVANCE;
539: break;
540:
541: case T_INTOF:
542: uprintf("T_INTOF\n"); /* XXX */
543: ADVANCE;
544: trapsignal(p, SIGFPE, FPE_INTOVF_TRAP, FPE_INTOVF, sv);
545: break;
546: }
547: userret(p);
548: share_fpu(p, tf);
549: #undef ADVANCE
550: }
551:
552: /*
553: * Save windows from PCB into user stack, and return 0. This is used on
554: * window overflow pseudo-traps (from locore.s, just before returning to
555: * user mode) and when ptrace or sendsig needs a consistent state.
556: * As a side effect, rwindow_save() always sets pcb_nsaved to 0,
557: * clobbering the `underflow restore' indicator if it was -1.
558: *
559: * If the windows cannot be saved, pcb_nsaved is restored and we return -1.
560: */
561: int
562: rwindow_save(p)
563: struct proc *p;
564: {
565: struct pcb *pcb = &p->p_addr->u_pcb;
566: struct rwindow *rw = &pcb->pcb_rw[0];
567: int i;
568:
569: i = pcb->pcb_nsaved;
570: if (i < 0) {
571: pcb->pcb_nsaved = 0;
572: return (0);
573: }
574: if (i == 0)
575: return (0);
576: #ifdef DEBUG
577: if (rwindow_debug)
578: printf("%s[%d]: rwindow: pcb->stack:", p->p_comm, p->p_pid);
579: #endif
580: do {
581: #ifdef DEBUG
582: if (rwindow_debug)
583: printf(" 0x%x", rw[1].rw_in[6]);
584: #endif
585: if (copyout((caddr_t)rw, (caddr_t)rw[1].rw_in[6],
586: sizeof *rw))
587: return (-1);
588: rw++;
589: } while (--i > 0);
590: #ifdef DEBUG
591: if (rwindow_debug)
592: printf("\n");
593: #endif
594: pcb->pcb_nsaved = 0;
595: return (0);
596: }
597:
598: /*
599: * Kill user windows (before exec) by writing back to stack or pcb
600: * and then erasing any pcb tracks. Otherwise we might try to write
601: * the registers into the new process after the exec.
602: */
603: void
604: pmap_unuse_final(p)
605: struct proc *p;
606: {
607:
608: write_user_windows();
609: p->p_addr->u_pcb.pcb_nsaved = 0;
610: }
611:
612: /*
613: * Called from locore.s trap handling, for synchronous memory faults.
614: *
615: * This duplicates a lot of logic in trap() and perhaps should be
616: * moved there; but the bus-error-register parameters are unique to
617: * this routine.
618: *
619: * Since synchronous errors accumulate during prefetch, we can have
620: * more than one `cause'. But we do not care what the cause, here;
621: * we just want to page in the page and try again.
622: */
623: void
624: mem_access_fault(type, ser, v, pc, psr, tf)
625: unsigned type;
626: int ser;
627: u_int v;
628: int pc, psr;
629: struct trapframe *tf;
630: {
631: #if defined(SUN4) || defined(SUN4C)
632: struct proc *p;
633: struct vmspace *vm;
634: vaddr_t va;
635: int rv;
636: vm_prot_t ftype;
637: int onfault;
638: union sigval sv;
639:
640: uvmexp.traps++;
641: if ((p = curproc) == NULL) /* safety check */
642: p = &proc0;
643:
644: /*
645: * Figure out what to pass the VM code, and ignore the sva register
646: * value in v on text faults (text faults are always at pc).
647: * Kernel faults are somewhat different: text faults are always
648: * illegal, and data faults are extra complex. User faults must
649: * set p->p_md.md_tf, in case we decide to deliver a signal. Check
650: * for illegal virtual addresses early since those can induce more
651: * faults.
652: */
653: if (type == T_TEXTFAULT)
654: v = pc;
655: if (VA_INHOLE(v))
656: goto fault;
657: ftype = ser & SER_WRITE ? VM_PROT_WRITE : VM_PROT_READ;
658: va = trunc_page(v);
659: if (psr & PSR_PS) {
660: extern char Lfsbail[];
661: if (type == T_TEXTFAULT) {
662: (void) splhigh();
663: printf("text fault: pc=0x%x ser=%b\n", pc,
664: ser, SER_BITS);
665: panic("kernel fault");
666: /* NOTREACHED */
667: }
668: /*
669: * If this was an access that we shouldn't try to page in,
670: * resume at the fault handler without any action.
671: */
672: if (p->p_addr && p->p_addr->u_pcb.pcb_onfault == Lfsbail)
673: goto kfault;
674:
675: /*
676: * During autoconfiguration, faults are never OK unless
677: * pcb_onfault is set. Once running normally we must allow
678: * exec() to cause copy-on-write faults to kernel addresses.
679: */
680: if (cold)
681: goto kfault;
682: if (va >= VM_MIN_KERNEL_ADDRESS) {
683: if (uvm_fault(kernel_map, va, 0, ftype) == 0)
684: return;
685: goto kfault;
686: }
687: } else
688: p->p_md.md_tf = tf;
689:
690: /*
691: * mmu_pagein returns -1 if the page is already valid, in which
692: * case we have a hard fault; it returns 1 if it loads a segment
693: * that got bumped out via LRU replacement.
694: */
695: vm = p->p_vmspace;
696: rv = mmu_pagein(vm->vm_map.pmap, va,
697: ser & SER_WRITE ? VM_PROT_WRITE : VM_PROT_READ);
698: if (rv < 0)
699: goto fault;
700: if (rv > 0)
701: goto out;
702:
703: /* alas! must call the horrible vm code */
704: rv = uvm_fault(&vm->vm_map, (vaddr_t)va, 0, ftype);
705:
706: /*
707: * If this was a stack access we keep track of the maximum
708: * accessed stack size. Also, if vm_fault gets a protection
709: * failure it is due to accessing the stack region outside
710: * the current limit and we need to reflect that as an access
711: * error.
712: */
713: if ((caddr_t)va >= vm->vm_maxsaddr) {
714: if (rv == 0)
715: uvm_grow(p, va);
716: else if (rv == EACCES)
717: rv = EFAULT;
718: }
719: if (rv == 0) {
720: /*
721: * pmap_enter() does not enter all requests made from
722: * vm_fault into the MMU (as that causes unnecessary
723: * entries for `wired' pages). Instead, we call
724: * mmu_pagein here to make sure the new PTE gets installed.
725: */
726: (void) mmu_pagein(vm->vm_map.pmap, va, VM_PROT_NONE);
727: } else {
728: /*
729: * Pagein failed. If doing copyin/out, return to onfault
730: * address. Any other page fault in kernel, die; if user
731: * fault, deliver SIGSEGV.
732: */
733: fault:
734: if (psr & PSR_PS) {
735: kfault:
736: onfault = p->p_addr ?
737: (int)p->p_addr->u_pcb.pcb_onfault : 0;
738: if (!onfault) {
739: (void) splhigh();
740: printf("data fault: pc=0x%x addr=0x%x ser=%b\n",
741: pc, v, ser, SER_BITS);
742: panic("kernel fault");
743: /* NOTREACHED */
744: }
745: tf->tf_pc = onfault;
746: tf->tf_npc = onfault + 4;
747: return;
748: }
749:
750: sv.sival_int = v;
751: trapsignal(p, SIGSEGV, (ser & SER_WRITE) ? VM_PROT_WRITE :
752: VM_PROT_READ, SEGV_MAPERR, sv);
753: }
754: out:
755: if ((psr & PSR_PS) == 0) {
756: userret(p);
757: share_fpu(p, tf);
758: }
759: #endif /* Sun4/Sun4C */
760: }
761:
762: #if defined(SUN4M) /* 4m version of mem_access_fault() follows */
763:
764: static int tfaultaddr = (int) 0xdeadbeef;
765:
766: #ifdef DEBUG
767: int dfdebug = 0;
768: #endif
769:
770: void
771: mem_access_fault4m(type, sfsr, sfva, tf)
772: unsigned type;
773: u_int sfsr;
774: u_int sfva;
775: struct trapframe *tf;
776: {
777: int pc, psr;
778: struct proc *p;
779: struct vmspace *vm;
780: vaddr_t va;
781: int rv;
782: vm_prot_t ftype;
783: int onfault;
784: union sigval sv;
785:
786: uvmexp.traps++;
787: if ((p = curproc) == NULL) /* safety check */
788: p = &proc0;
789:
790: pc = tf->tf_pc; /* These are needed below */
791: psr = tf->tf_psr;
792:
793: /*
794: * Our first priority is handling serious faults, such as
795: * parity errors or async faults that might have come through here.
796: * If afsr & AFSR_AFO != 0, then we're on a HyperSPARC and we
797: * got an async fault. We pass it on to memerr4m. Similarly, if
798: * the trap was T_STOREBUFFAULT, we pass it on to memerr4m.
799: * If we have a data fault, but SFSR_FAV is not set in the sfsr,
800: * then things are really bizarre, and we treat it as a hard
801: * error and pass it on to memerr4m. See pg. 9-35 in the SuperSPARC
802: * user's guide for more info, and for a possible solution which we
803: * don't implement here.
804: */
805: if (type == T_STOREBUFFAULT ||
806: (type == T_DATAFAULT && !(sfsr & SFSR_FAV))) {
807: (*cpuinfo.memerr)(type, sfsr, sfva, tf);
808: /*
809: * If we get here, exit the trap handler and wait for the
810: * trap to re-occur.
811: */
812: goto out;
813: }
814:
815: /*
816: * Figure out what to pass the VM code. We cannot ignore the sfva
817: * register on text faults, since this might be a trap on an
818: * alternate-ASI access to code space. However, if we're on a
819: * supersparc, we can't help using PC, since we don't get a VA in
820: * sfva.
821: * Kernel faults are somewhat different: text faults are always
822: * illegal, and data faults are extra complex. User faults must
823: * set p->p_md.md_tf, in case we decide to deliver a signal. Check
824: * for illegal virtual addresses early since those can induce more
825: * faults.
826: * All translation faults are illegal, and result in a SIGSEGV
827: * being delivered to the running process (or a kernel panic, for
828: * a kernel fault). We check the translation first to make sure
829: * it is not spurious.
830: * Also, note that in the case where we have an overwritten
831: * text fault (OW==1, AT==2,3), we attempt to service the
832: * second (overwriting) fault, then restart the instruction
833: * (which is from the first fault) and allow the first trap
834: * to reappear. XXX is this right? It will probably change...
835: */
836: if ((sfsr & SFSR_FT) == SFSR_FT_NONE)
837: goto out; /* No fault. Why were we called? */
838:
839: if ((sfsr & SFSR_AT_STORE)) {
840: /* stores are never text faults. */
841: ftype = VM_PROT_WRITE;
842: } else {
843: ftype = VM_PROT_READ;
844: if ((sfsr & SFSR_AT_TEXT) || (type == T_TEXTFAULT)) {
845: ftype |= VM_PROT_EXECUTE;
846: }
847: }
848:
849: /*
850: * NOTE: the per-CPU fault status register readers (in locore)
851: * may already have decided to pass `pc' in `sfva', so we avoid
852: * testing CPU types here.
853: * Q: test SFSR_FAV in the locore stubs too?
854: */
855: if ((sfsr & SFSR_FAV) == 0) {
856: if (type == T_TEXTFAULT)
857: sfva = pc;
858: else
859: goto fault;
860: }
861:
862: if ((sfsr & SFSR_FT) == SFSR_FT_TRANSERR) {
863: /* Translation errors are always fatal, as they indicate
864: * a corrupt translation (page) table hierarchy.
865: */
866: if (tfaultaddr == sfva) /* Prevent infinite loops w/a static */
867: goto fault;
868: tfaultaddr = sfva;
869: if ((lda((sfva & 0xFFFFF000) | ASI_SRMMUFP_LN, ASI_SRMMUFP) &
870: SRMMU_TETYPE) != SRMMU_TEPTE)
871: goto fault; /* Translation bad */
872: lda(SRMMU_SFSR, ASI_SRMMU);
873: goto out; /* Translation OK, retry operation */
874: }
875:
876: va = trunc_page(sfva);
877:
878: if (((sfsr & SFSR_AT_TEXT) || type == T_TEXTFAULT) &&
879: !(sfsr & SFSR_AT_STORE) && (sfsr & SFSR_OW)) {
880: if (psr & PSR_PS) /* never allow in kernel */
881: goto kfault;
882: #if 0
883: /*
884: * Double text fault. The evil "case 5" from the HS manual...
885: * Attempt to handle early fault. Ignores ASI 8,9 issue...may
886: * do a useless VM read.
887: * XXX: Is this really necessary?
888: */
889: if (mmumod == SUN4M_MMU_HS) { /* On HS, we have va for both */
890: if (vm_fault(kernel_map, trunc_page(pc),
891: VM_PROT_READ, 0))
892: #ifdef DEBUG
893: printf("mem_access_fault: "
894: "can't pagein 1st text fault.\n")
895: #endif
896: ;
897: }
898: #endif
899: }
900:
901: /* Now munch on protections... */
902:
903: if (psr & PSR_PS) {
904: if (sfsr & SFSR_AT_TEXT || type == T_TEXTFAULT) {
905: (void) splhigh();
906: printf("text fault: pc=0x%x sfsr=%b sfva=0x%x\n", pc,
907: sfsr, SFSR_BITS, sfva);
908: panic("kernel fault");
909: /* NOTREACHED */
910: }
911:
912: /*
913: * During autoconfiguration, faults are never OK unless
914: * pcb_onfault is set. Once running normally we must allow
915: * exec() to cause copy-on-write faults to kernel addresses.
916: */
917: if (cold)
918: goto kfault;
919: if (va >= VM_MIN_KERNEL_ADDRESS) {
920: if (uvm_fault(kernel_map, va, 0, ftype) == 0)
921: return;
922: goto kfault;
923: }
924: } else
925: p->p_md.md_tf = tf;
926:
927: vm = p->p_vmspace;
928:
929: /* alas! must call the horrible vm code */
930: rv = uvm_fault(&vm->vm_map, (vaddr_t)va, 0, ftype);
931: /*
932: * If this was a stack access we keep track of the maximum
933: * accessed stack size. Also, if vm_fault gets a protection
934: * failure it is due to accessing the stack region outside
935: * the current limit and we need to reflect that as an access
936: * error.
937: */
938: if ((caddr_t)va >= vm->vm_maxsaddr) {
939: if (rv == 0)
940: uvm_grow(p, va);
941: else if (rv == EACCES)
942: rv = EFAULT;
943: }
944: if (rv != 0) {
945: /*
946: * Pagein failed. If doing copyin/out, return to onfault
947: * address. Any other page fault in kernel, die; if user
948: * fault, deliver SIGSEGV.
949: */
950: fault:
951: if (psr & PSR_PS) {
952: kfault:
953: onfault = p->p_addr ?
954: (int)p->p_addr->u_pcb.pcb_onfault : 0;
955: if (!onfault) {
956: (void) splhigh();
957: printf("data fault: pc=0x%x sfva=0x%x sfsr=%b\n",
958: pc, sfva, sfsr, SFSR_BITS);
959: panic("kernel fault");
960: /* NOTREACHED */
961: }
962: tf->tf_pc = onfault;
963: tf->tf_npc = onfault + 4;
964: return;
965: }
966:
967: sv.sival_int = sfva;
968: trapsignal(p, SIGSEGV, ftype, SEGV_MAPERR, sv);
969: }
970: out:
971: if ((psr & PSR_PS) == 0) {
972: userret(p);
973: share_fpu(p, tf);
974: }
975: }
976: #endif
977:
978: /*
979: * System calls. `pc' is just a copy of tf->tf_pc.
980: *
981: * Note that the things labelled `out' registers in the trapframe were the
982: * `in' registers within the syscall trap code (because of the automatic
983: * `save' effect of each trap). They are, however, the %o registers of the
984: * thing that made the system call, and are named that way here.
985: */
986: void
987: syscall(code, tf, pc)
988: register_t code;
989: struct trapframe *tf;
990: register_t pc;
991: {
992: int i, nsys, *ap, nap;
993: struct sysent *callp;
994: struct proc *p;
995: int error, new;
996: struct args {
997: register_t i[8];
998: } args;
999: register_t rval[2];
1000: #ifdef DIAGNOSTIC
1001: extern struct pcb *cpcb;
1002: #endif
1003:
1004: uvmexp.syscalls++;
1005: p = curproc;
1006: #ifdef DIAGNOSTIC
1007: if (tf->tf_psr & PSR_PS)
1008: panic("syscall");
1009: if (cpcb != &p->p_addr->u_pcb)
1010: panic("syscall cpcb/ppcb");
1011: if (tf != (struct trapframe *)((caddr_t)cpcb + USPACE) - 1)
1012: panic("syscall trapframe");
1013: #endif
1014: p->p_md.md_tf = tf;
1015: new = code & (SYSCALL_G7RFLAG | SYSCALL_G2RFLAG);
1016: code &= ~(SYSCALL_G7RFLAG | SYSCALL_G2RFLAG);
1017:
1018: callp = p->p_emul->e_sysent;
1019: nsys = p->p_emul->e_nsysent;
1020:
1021: /*
1022: * The first six system call arguments are in the six %o registers.
1023: * Any arguments beyond that are in the `argument extension' area
1024: * of the user's stack frame (see <machine/frame.h>).
1025: *
1026: * Check for ``special'' codes that alter this, namely syscall and
1027: * __syscall. The latter takes a quad syscall number, so that other
1028: * arguments are at their natural alignments. Adjust the number
1029: * of ``easy'' arguments as appropriate; we will copy the hard
1030: * ones later as needed.
1031: */
1032: ap = &tf->tf_out[0];
1033: nap = 6;
1034:
1035: switch (code) {
1036: case SYS_syscall:
1037: code = *ap++;
1038: nap--;
1039: break;
1040: case SYS___syscall:
1041: if (callp != sysent)
1042: break;
1043: code = ap[_QUAD_LOWWORD];
1044: ap += 2;
1045: nap -= 2;
1046: break;
1047: }
1048:
1049: if (code < 0 || code >= nsys)
1050: callp += p->p_emul->e_nosys;
1051: else {
1052: callp += code;
1053: i = callp->sy_argsize / sizeof(register_t);
1054: if (i > nap) { /* usually false */
1055: if (i > 8)
1056: panic("syscall nargs");
1057: error = copyin((caddr_t)tf->tf_out[6] +
1058: offsetof(struct frame, fr_argx),
1059: (caddr_t)&args.i[nap], (i - nap) * sizeof(register_t));
1060: if (error) {
1061: #ifdef KTRACE
1062: if (KTRPOINT(p, KTR_SYSCALL))
1063: ktrsyscall(p, code,
1064: callp->sy_argsize, args.i);
1065: #endif
1066: goto bad;
1067: }
1068: i = nap;
1069: }
1070: copywords(ap, args.i, i * sizeof(register_t));
1071: }
1072: #ifdef KTRACE
1073: if (KTRPOINT(p, KTR_SYSCALL))
1074: ktrsyscall(p, code, callp->sy_argsize, args.i);
1075: #endif
1076: rval[0] = 0;
1077: rval[1] = tf->tf_out[1];
1078: #if NSYSTRACE > 0
1079: if (ISSET(p->p_flag, P_SYSTRACE))
1080: error = systrace_redirect(code, p, &args, rval);
1081: else
1082: #endif
1083: error = (*callp->sy_call)(p, &args, rval);
1084:
1085: switch (error) {
1086: case 0:
1087: /* Note: fork() does not return here in the child */
1088: tf->tf_out[0] = rval[0];
1089: tf->tf_out[1] = rval[1];
1090: if (new) {
1091: /* jmp %g2 (or %g7, deprecated) on success */
1092: i = tf->tf_global[new & SYSCALL_G2RFLAG ? 2 : 7];
1093: if (i & 3) {
1094: error = EINVAL;
1095: goto bad;
1096: }
1097: } else {
1098: /* old system call convention: clear C on success */
1099: tf->tf_psr &= ~PSR_C; /* success */
1100: i = tf->tf_npc;
1101: }
1102: tf->tf_pc = i;
1103: tf->tf_npc = i + 4;
1104: break;
1105:
1106: case ERESTART:
1107: case EJUSTRETURN:
1108: /* nothing to do */
1109: break;
1110:
1111: default:
1112: bad:
1113: if (p->p_emul->e_errno)
1114: error = p->p_emul->e_errno[error];
1115: tf->tf_out[0] = error;
1116: tf->tf_psr |= PSR_C; /* fail */
1117: i = tf->tf_npc;
1118: tf->tf_pc = i;
1119: tf->tf_npc = i + 4;
1120: break;
1121: }
1122:
1123: userret(p);
1124: #ifdef KTRACE
1125: if (KTRPOINT(p, KTR_SYSRET))
1126: ktrsysret(p, code, error, rval[0]);
1127: #endif
1128: share_fpu(p, tf);
1129: }
1130:
1131: /*
1132: * Process the tail end of a fork() for the child.
1133: */
1134: void
1135: child_return(arg)
1136: void *arg;
1137: {
1138: struct proc *p = arg;
1139: struct trapframe *tf = p->p_md.md_tf;
1140:
1141: /*
1142: * Return values in the frame set by cpu_fork().
1143: */
1144: tf->tf_out[0] = 0;
1145: tf->tf_out[1] = 0;
1146: tf->tf_psr &= ~PSR_C;
1147:
1148: userret(p);
1149: #ifdef KTRACE
1150: if (KTRPOINT(p, KTR_SYSRET))
1151: ktrsysret(p,
1152: (p->p_flag & P_PPWAIT) ? SYS_vfork : SYS_fork, 0, 0);
1153: #endif
1154: }
CVSweb