Annotation of sys/arch/sparc64/sparc64/trap.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: trap.c,v 1.51 2007/07/05 22:16:30 kettenis Exp $ */
2: /* $NetBSD: trap.c,v 1.73 2001/08/09 01:03:01 eeh Exp $ */
3:
4: /*
5: * Copyright (c) 1996
6: * The President and Fellows of Harvard College. All rights reserved.
7: * Copyright (c) 1992, 1993
8: * The Regents of the University of California. All rights reserved.
9: *
10: * This software was developed by the Computer Systems Engineering group
11: * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
12: * contributed to Berkeley.
13: *
14: * All advertising materials mentioning features or use of this software
15: * must display the following acknowledgement:
16: * This product includes software developed by the University of
17: * California, Lawrence Berkeley Laboratory.
18: * This product includes software developed by Harvard University.
19: *
20: * Redistribution and use in source and binary forms, with or without
21: * modification, are permitted provided that the following conditions
22: * are met:
23: * 1. Redistributions of source code must retain the above copyright
24: * notice, this list of conditions and the following disclaimer.
25: * 2. Redistributions in binary form must reproduce the above copyright
26: * notice, this list of conditions and the following disclaimer in the
27: * documentation and/or other materials provided with the distribution.
28: * 3. All advertising materials mentioning features or use of this software
29: * must display the following acknowledgement:
30: * This product includes software developed by the University of
31: * California, Berkeley and its contributors.
32: * This product includes software developed by Harvard University.
33: * 4. Neither the name of the University nor the names of its contributors
34: * may be used to endorse or promote products derived from this software
35: * without specific prior written permission.
36: *
37: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
38: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
42: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
43: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
44: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
45: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
46: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
47: * SUCH DAMAGE.
48: *
49: * @(#)trap.c 8.4 (Berkeley) 9/23/93
50: */
51:
52: #include <sys/param.h>
53: #include <sys/systm.h>
54: #include <sys/proc.h>
55: #include <sys/signalvar.h>
56: #include <sys/user.h>
57: #include <sys/kernel.h>
58: #include <sys/malloc.h>
59: #include <sys/resource.h>
60: #include <sys/signal.h>
61: #include <sys/wait.h>
62: #include <sys/syscall.h>
63: #include <sys/syslog.h>
64: #ifdef KTRACE
65: #include <sys/ktrace.h>
66: #endif
67:
68: #include "systrace.h"
69: #include <dev/systrace.h>
70:
71: #include <uvm/uvm_extern.h>
72:
73: #include <machine/cpu.h>
74: #include <machine/ctlreg.h>
75: #include <machine/trap.h>
76: #include <machine/instr.h>
77: #include <machine/pmap.h>
78:
79: #ifdef DDB
80: #include <machine/db_machdep.h>
81: #else
82: #include <machine/frame.h>
83: #endif
84: #ifdef COMPAT_SVR4
85: #include <machine/svr4_machdep.h>
86: #endif
87: #ifdef COMPAT_SVR4_32
88: #include <machine/svr4_32_machdep.h>
89: #endif
90:
91: #include <sparc64/fpu/fpu_extern.h>
92: #include <sparc64/sparc64/cache.h>
93:
94: #ifndef offsetof
95: #define offsetof(s, f) ((int)&((s *)0)->f)
96: #endif
97:
98: /* trapstats */
99: int trapstats = 0;
100: int protfix = 0;
101: int udmiss = 0; /* Number of normal/nucleus data/text miss/protection faults */
102: int udhit = 0;
103: int udprot = 0;
104: int utmiss = 0;
105: int kdmiss = 0;
106: int kdhit = 0;
107: int kdprot = 0;
108: int ktmiss = 0;
109: int iveccnt = 0; /* number if normal/nucleus interrupt/interrupt vector faults */
110: int uintrcnt = 0;
111: int kiveccnt = 0;
112: int kintrcnt = 0;
113: int intristk = 0; /* interrupts when already on intrstack */
114: int intrpoll = 0; /* interrupts not using vector lists */
115: int wfill = 0;
116: int kwfill = 0;
117: int wspill = 0;
118: int wspillskip = 0;
119: int rftucnt = 0;
120: int rftuld = 0;
121: int rftudone = 0;
122: int rftkcnt[5] = { 0, 0, 0, 0, 0 };
123:
124: /*
125: * Initial FPU state is all registers == all 1s, everything else == all 0s.
126: * This makes every floating point register a signalling NaN, with sign bit
127: * set, no matter how it is interpreted. Appendix N of the Sparc V8 document
128: * seems to imply that we should do this, and it does make sense.
129: */
130: __asm(".align 64");
131: struct fpstate64 initfpstate = {
132: { ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0,
133: ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0 }
134: };
135:
136: /*
137: * There are more than 100 trap types, but most are unused.
138: *
139: * Trap type 0 is taken over as an `Asynchronous System Trap'.
140: * This is left-over Vax emulation crap that should be fixed.
141: *
142: * Traps not supported on the spitfire are marked with `*',
143: * and additions are marked with `+'
144: */
145: static const char T[] = "*trap";
146: const char *trap_type[] = {
147: /* non-user vectors */
148: "ast", /* 0 */
149: "power on reset", /* 1 */
150: "watchdog reset", /* 2 */
151: "externally initiated reset",/*3 */
152: "software initiated reset",/* 4 */
153: "RED state exception", /* 5 */
154: T, T, /* 6..7 */
155: "instruction access exception", /* 8 */
156: "*instruction MMU miss",/* 9 */
157: "instruction access error",/* 0a */
158: T, T, T, T, T, /* 0b..0f */
159: "illegal instruction", /* 10 */
160: "privileged opcode", /* 11 */
161: "*unimplemented LDD", /* 12 */
162: "*unimplemented STD", /* 13 */
163: T, T, T, T, /* 14..17 */
164: T, T, T, T, T, T, T, T, /* 18..1f */
165: "fp disabled", /* 20 */
166: "fp exception ieee 754",/* 21 */
167: "fp exception other", /* 22 */
168: "tag overflow", /* 23 */
169: "clean window", /* 24 */
170: T, T, T, /* 25..27 -- trap continues */
171: "division by zero", /* 28 */
172: "*internal processor error",/* 29 */
173: T, T, T, T, T, T, /* 2a..2f */
174: "data access exception",/* 30 */
175: "*data access MMU miss",/* 31 */
176: "data access error", /* 32 */
177: "*data access protection",/* 33 */
178: "mem address not aligned", /* 34 */
179: "LDDF mem address not aligned",/* 35 */
180: "STDF mem address not aligned",/* 36 */
181: "privileged action", /* 37 */
182: "LDQF mem address not aligned",/* 38 */
183: "STQF mem address not aligned",/* 39 */
184: T, T, T, T, T, T, /* 3a..3f */
185: "*async data error", /* 40 */
186: "level 1 int", /* 41 */
187: "level 2 int", /* 42 */
188: "level 3 int", /* 43 */
189: "level 4 int", /* 44 */
190: "level 5 int", /* 45 */
191: "level 6 int", /* 46 */
192: "level 7 int", /* 47 */
193: "level 8 int", /* 48 */
194: "level 9 int", /* 49 */
195: "level 10 int", /* 4a */
196: "level 11 int", /* 4b */
197: "level 12 int", /* 4c */
198: "level 13 int", /* 4d */
199: "level 14 int", /* 4e */
200: "level 15 int", /* 4f */
201: T, T, T, T, T, T, T, T, /* 50..57 */
202: T, T, T, T, T, T, T, T, /* 58..5f */
203: "+interrupt vector", /* 60 */
204: "+PA_watchpoint", /* 61 */
205: "+VA_watchpoint", /* 62 */
206: "+corrected ECC error", /* 63 */
207: "+fast instruction access MMU miss",/* 64 */
208: T, T, T, /* 65..67 -- trap continues */
209: "+fast data access MMU miss",/* 68 */
210: T, T, T, /* 69..6b -- trap continues */
211: "+fast data access protection",/* 6c */
212: T, T, T, /* 6d..6f -- trap continues */
213: T, T, T, T, T, T, T, T, /* 70..77 */
214: T, T, T, T, T, T, T, T, /* 78..7f */
215: "spill 0 normal", /* 80 */
216: T, T, T, /* 81..83 -- trap continues */
217: "spill 1 normal", /* 84 */
218: T, T, T, /* 85..87 -- trap continues */
219: "spill 2 normal", /* 88 */
220: T, T, T, /* 89..8b -- trap continues */
221: "spill 3 normal", /* 8c */
222: T, T, T, /* 8d..8f -- trap continues */
223: "spill 4 normal", /* 90 */
224: T, T, T, /* 91..93 -- trap continues */
225: "spill 5 normal", /* 94 */
226: T, T, T, /* 95..97 -- trap continues */
227: "spill 6 normal", /* 98 */
228: T, T, T, /* 99..9b -- trap continues */
229: "spill 7 normal", /* 9c */
230: T, T, T, /* 9c..9f -- trap continues */
231: "spill 0 other", /* a0 */
232: T, T, T, /* a1..a3 -- trap continues */
233: "spill 1 other", /* a4 */
234: T, T, T, /* a5..a7 -- trap continues */
235: "spill 2 other", /* a8 */
236: T, T, T, /* a9..ab -- trap continues */
237: "spill 3 other", /* ac */
238: T, T, T, /* ad..af -- trap continues */
239: "spill 4 other", /* b0 */
240: T, T, T, /* b1..b3 -- trap continues */
241: "spill 5 other", /* b4 */
242: T, T, T, /* b5..b7 -- trap continues */
243: "spill 6 other", /* b8 */
244: T, T, T, /* b9..bb -- trap continues */
245: "spill 7 other", /* bc */
246: T, T, T, /* bc..bf -- trap continues */
247: "fill 0 normal", /* c0 */
248: T, T, T, /* c1..c3 -- trap continues */
249: "fill 1 normal", /* c4 */
250: T, T, T, /* c5..c7 -- trap continues */
251: "fill 2 normal", /* c8 */
252: T, T, T, /* c9..cb -- trap continues */
253: "fill 3 normal", /* cc */
254: T, T, T, /* cd..cf -- trap continues */
255: "fill 4 normal", /* d0 */
256: T, T, T, /* d1..d3 -- trap continues */
257: "fill 5 normal", /* d4 */
258: T, T, T, /* d5..d7 -- trap continues */
259: "fill 6 normal", /* d8 */
260: T, T, T, /* d9..db -- trap continues */
261: "fill 7 normal", /* dc */
262: T, T, T, /* dc..df -- trap continues */
263: "fill 0 other", /* e0 */
264: T, T, T, /* e1..e3 -- trap continues */
265: "fill 1 other", /* e4 */
266: T, T, T, /* e5..e7 -- trap continues */
267: "fill 2 other", /* e8 */
268: T, T, T, /* e9..eb -- trap continues */
269: "fill 3 other", /* ec */
270: T, T, T, /* ed..ef -- trap continues */
271: "fill 4 other", /* f0 */
272: T, T, T, /* f1..f3 -- trap continues */
273: "fill 5 other", /* f4 */
274: T, T, T, /* f5..f7 -- trap continues */
275: "fill 6 other", /* f8 */
276: T, T, T, /* f9..fb -- trap continues */
277: "fill 7 other", /* fc */
278: T, T, T, /* fc..ff -- trap continues */
279:
280: /* user (software trap) vectors */
281: "syscall", /* 100 */
282: "breakpoint", /* 101 */
283: "zero divide", /* 102 */
284: "flush windows", /* 103 */
285: "clean windows", /* 104 */
286: "range check", /* 105 */
287: "fix align", /* 106 */
288: "integer overflow", /* 107 */
289: "svr4 syscall", /* 108 */
290: "4.4 syscall", /* 109 */
291: "kgdb exec", /* 10a */
292: T, T, T, T, T, /* 10b..10f */
293: T, T, T, T, T, T, T, T, /* 11a..117 */
294: T, T, T, T, T, T, T, T, /* 118..11f */
295: "svr4 getcc", /* 120 */
296: "svr4 setcc", /* 121 */
297: "svr4 getpsr", /* 122 */
298: "svr4 setpsr", /* 123 */
299: "svr4 gethrtime", /* 124 */
300: "svr4 gethrvtime", /* 125 */
301: T, /* 126 */
302: "svr4 gethrestime", /* 127 */
303: T, T, T, T, T, T, T, T, /* 128..12f */
304: T, T, /* 130..131 */
305: "get condition codes", /* 132 */
306: "set condition codes", /* 133 */
307: T, T, T, T, /* 134..137 */
308: T, T, T, T, T, T, T, T, /* 138..13f */
309: T, T, T, T, T, T, T, T, /* 140..147 */
310: T, T, T, T, T, T, T, T, /* 148..14f */
311: T, T, T, T, T, T, T, T, /* 150..157 */
312: T, T, T, T, T, T, T, T, /* 158..15f */
313: T, T, T, T, /* 160..163 */
314: "SVID syscall64", /* 164 */
315: "SPARC Intl syscall64", /* 165 */
316: "OS vendor spec syscall", /* 166 */
317: "HW OEM syscall", /* 167 */
318: "ret from deferred trap", /* 168 */
319: };
320:
321: #define N_TRAP_TYPES (sizeof trap_type / sizeof *trap_type)
322:
323: static __inline void userret(struct proc *);
324: static __inline void share_fpu(struct proc *, struct trapframe64 *);
325:
326: void trap(struct trapframe64 *tf, unsigned type, vaddr_t pc, long tstate);
327: void data_access_fault(struct trapframe64 *tf, unsigned type, vaddr_t pc,
328: vaddr_t va, vaddr_t sfva, u_long sfsr);
329: void data_access_error(struct trapframe64 *tf, unsigned type,
330: vaddr_t afva, u_long afsr, vaddr_t sfva, u_long sfsr);
331: void text_access_fault(struct trapframe64 *tf, unsigned type,
332: vaddr_t pc, u_long sfsr);
333: void text_access_error(struct trapframe64 *tf, unsigned type,
334: vaddr_t pc, u_long sfsr, vaddr_t afva, u_long afsr);
335: void syscall(struct trapframe64 *, register_t code, register_t pc);
336:
337: /*
338: * Define the code needed before returning to user mode, for
339: * trap, mem_access_fault, and syscall.
340: */
341: static __inline void
342: userret(struct proc *p)
343: {
344: int sig;
345:
346: /* take pending signals */
347: while ((sig = CURSIG(p)) != 0)
348: postsig(sig);
349:
350: curcpu()->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri;
351: }
352:
353: /*
354: * If someone stole the FPU while we were away, do not enable it
355: * on return. This is not done in userret() above as it must follow
356: * the ktrsysret() in syscall(). Actually, it is likely that the
357: * ktrsysret should occur before the call to userret.
358: *
359: * Oh, and don't touch the FPU bit if we're returning to the kernel.
360: */
361: static __inline void share_fpu(p, tf)
362: struct proc *p;
363: struct trapframe64 *tf;
364: {
365: if (!(tf->tf_tstate & (PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)) &&
366: (tf->tf_tstate & (PSTATE_PEF<<TSTATE_PSTATE_SHIFT)) && fpproc != p)
367: tf->tf_tstate &= ~(PSTATE_PEF<<TSTATE_PSTATE_SHIFT);
368: }
369:
370: /*
371: * Called from locore.s trap handling, for non-MMU-related traps.
372: * (MMU-related traps go through mem_access_fault, below.)
373: */
374: void
375: trap(tf, type, pc, tstate)
376: struct trapframe64 *tf;
377: unsigned type;
378: vaddr_t pc;
379: long tstate;
380: {
381: struct proc *p;
382: struct pcb *pcb;
383: int pstate = (tstate>>TSTATE_PSTATE_SHIFT);
384: int64_t n;
385: union sigval sv;
386:
387: sv.sival_ptr = (void *)pc;
388:
389: /* This steps the PC over the trap. */
390: #define ADVANCE (n = tf->tf_npc, tf->tf_pc = n, tf->tf_npc = n + 4)
391:
392: uvmexp.traps++;
393: /*
394: * Generally, kernel traps cause a panic. Any exceptions are
395: * handled early here.
396: */
397: if (pstate & PSTATE_PRIV) {
398: #ifdef DDB
399: if (type == T_BREAKPOINT) {
400: write_all_windows();
401: if (kdb_trap(type, tf)) {
402: /* ADVANCE; */
403: return;
404: }
405: }
406: if (type == T_PA_WATCHPT || type == T_VA_WATCHPT) {
407: if (kdb_trap(type, tf)) {
408: /* DDB must turn off watchpoints or something */
409: return;
410: }
411: }
412: #endif
413: /*
414: * The kernel needs to use FPU registers for block
415: * load/store. If we trap in priviliged code, save
416: * the FPU state if there is any and enable the FPU.
417: *
418: * We rely on the kernel code properly enabling the FPU
419: * in %fprs, otherwise we'll hang here trying to enable
420: * the FPU.
421: */
422: if (type == T_FPDISABLED) {
423: struct proc *newfpproc;
424:
425: if (CLKF_INTR((struct clockframe *)tf) || !curproc)
426: newfpproc = &proc0;
427: else
428: newfpproc = curproc;
429:
430: if (fpproc != newfpproc) {
431: if (fpproc != NULL) {
432: /* someone else had it, maybe? */
433: savefpstate(fpproc->p_md.md_fpstate);
434: fpproc = NULL;
435: }
436: /* If we have an allocated fpstate, load it */
437: if (newfpproc->p_md.md_fpstate != 0) {
438: fpproc = newfpproc;
439: loadfpstate(fpproc->p_md.md_fpstate);
440: } else
441: fpproc = NULL;
442: }
443: /* Enable the FPU */
444: tf->tf_tstate |= (PSTATE_PEF<<TSTATE_PSTATE_SHIFT);
445: return;
446: }
447: if (type != T_SPILL_N_NORM && type != T_FILL_N_NORM)
448: goto dopanic;
449: }
450: if ((p = curproc) == NULL)
451: p = &proc0;
452: pcb = &p->p_addr->u_pcb;
453: p->p_md.md_tf = tf; /* for ptrace/signals */
454:
455: switch (type) {
456:
457: default:
458: if (type < 0x100) {
459: extern int trap_trace_dis;
460: dopanic:
461: trap_trace_dis = 1;
462:
463: panic("trap type 0x%x (%s): pc=%lx npc=%lx pstate=%b\n",
464: type, type < N_TRAP_TYPES ? trap_type[type] : T,
465: pc, (long)tf->tf_npc, pstate, PSTATE_BITS);
466: /* NOTREACHED */
467: }
468: #if defined(COMPAT_SVR4) || defined(COMPAT_SVR4_32)
469: badtrap:
470: #endif
471: trapsignal(p, SIGILL, type, ILL_ILLOPC, sv);
472: break;
473:
474: #if defined(COMPAT_SVR4) || defined(COMPAT_SVR4_32)
475: case T_SVR4_GETCC:
476: case T_SVR4_SETCC:
477: case T_SVR4_GETPSR:
478: case T_SVR4_SETPSR:
479: case T_SVR4_GETHRTIME:
480: case T_SVR4_GETHRVTIME:
481: case T_SVR4_GETHRESTIME:
482: #if defined(COMPAT_SVR4_32)
483: if (svr4_32_trap(type, p))
484: break;
485: #endif
486: #if defined(COMPAT_SVR4)
487: if (svr4_trap(type, p))
488: break;
489: #endif
490: goto badtrap;
491: #endif
492:
493: case T_AST:
494: want_ast = 0;
495: if (p->p_flag & P_OWEUPC) {
496: ADDUPROF(p);
497: }
498: if (curcpu()->ci_want_resched)
499: preempt(NULL);
500: break;
501:
502: case T_ILLINST:
503: {
504: union instr ins;
505:
506: if (copyin((caddr_t)pc, &ins, sizeof(ins)) != 0) {
507: /* XXX Can this happen? */
508: trapsignal(p, SIGILL, 0, ILL_ILLOPC, sv);
509: break;
510: }
511: if (ins.i_any.i_op == IOP_mem &&
512: (ins.i_op3.i_op3 == IOP3_LDQF ||
513: ins.i_op3.i_op3 == IOP3_STQF ||
514: ins.i_op3.i_op3 == IOP3_LDQFA ||
515: ins.i_op3.i_op3 == IOP3_STQFA)) {
516: if (emul_qf(ins.i_int, p, sv, tf))
517: ADVANCE;
518: break;
519: }
520: if (ins.i_any.i_op == IOP_reg &&
521: ins.i_op3.i_op3 == IOP3_POPC &&
522: ins.i_op3.i_rs1 == 0) {
523: if (emul_popc(ins.i_int, p, sv, tf))
524: ADVANCE;
525: break;
526: }
527: trapsignal(p, SIGILL, 0, ILL_ILLOPC, sv); /* XXX code?? */
528: break;
529: }
530:
531: case T_INST_EXCEPT:
532: case T_TEXTFAULT:
533: case T_PRIVINST:
534: trapsignal(p, SIGILL, 0, ILL_ILLOPC, sv); /* XXX code?? */
535: break;
536:
537: case T_FPDISABLED: {
538: struct fpstate64 *fs = p->p_md.md_fpstate;
539:
540: if (fs == NULL) {
541: /* NOTE: fpstate must be 64-bit aligned */
542: fs = malloc((sizeof *fs), M_SUBPROC, M_WAITOK);
543: *fs = initfpstate;
544: fs->fs_qsize = 0;
545: p->p_md.md_fpstate = fs;
546: }
547: /*
548: * If we have not found an FPU, we have to emulate it.
549: *
550: * Since All UltraSPARC CPUs have an FPU how can this happen?
551: */
552: if (!foundfpu) {
553: trapsignal(p, SIGILL, 0, ILL_COPROC, sv);
554: break;
555: }
556:
557: /*
558: * We may have more FPEs stored up and/or ops queued.
559: * If they exist, handle them and get out. Otherwise,
560: * resolve the FPU state, turn it on, and try again.
561: *
562: * Ultras should never have a FPU queue.
563: */
564: if (fs->fs_qsize) {
565: printf("trap: Warning fs_qsize is %d\n",fs->fs_qsize);
566: fpu_cleanup(p, fs);
567: break;
568: }
569: if (fpproc != p) { /* we do not have it */
570: if (fpproc != NULL) /* someone else had it */
571: savefpstate(fpproc->p_md.md_fpstate);
572: loadfpstate(fs);
573: fpproc = p; /* now we do have it */
574: uvmexp.fpswtch++;
575: }
576: tf->tf_tstate |= (PSTATE_PEF<<TSTATE_PSTATE_SHIFT);
577: break;
578: }
579:
580: case T_LDQF_ALIGN:
581: case T_STQF_ALIGN:
582: {
583: union instr ins;
584:
585: if (copyin((caddr_t)pc, &ins, sizeof(ins)) != 0) {
586: /* XXX Can this happen? */
587: trapsignal(p, SIGILL, 0, ILL_ILLOPC, sv);
588: break;
589: }
590: if (ins.i_any.i_op == IOP_mem &&
591: (ins.i_op3.i_op3 == IOP3_LDQF ||
592: ins.i_op3.i_op3 == IOP3_STQF ||
593: ins.i_op3.i_op3 == IOP3_LDQFA ||
594: ins.i_op3.i_op3 == IOP3_STQFA)) {
595: if (emul_qf(ins.i_int, p, sv, tf))
596: ADVANCE;
597: } else
598: trapsignal(p, SIGILL, 0, ILL_ILLOPC, sv);
599: break;
600: }
601:
602: case T_SPILL_N_NORM:
603: case T_FILL_N_NORM:
604: /*
605: * We got an alignment trap in the spill/fill handler.
606: *
607: * XXX We really should generate a bus error here, but
608: * we could be on the interrupt stack, and dumping
609: * core from the interrupt stack is not a good idea.
610: * It causes random crashes.
611: */
612: sigexit(p, SIGKILL);
613: break;
614:
615: case T_ALIGN:
616: case T_LDDF_ALIGN:
617: case T_STDF_ALIGN:
618: {
619: int64_t dsfsr, dsfar=0, isfsr;
620:
621: dsfsr = ldxa(SFSR, ASI_DMMU);
622: if (dsfsr & SFSR_FV)
623: dsfar = ldxa(SFAR, ASI_DMMU);
624: isfsr = ldxa(SFSR, ASI_IMMU);
625: /*
626: * If we're busy doing copyin/copyout continue
627: */
628: if (p->p_addr && p->p_addr->u_pcb.pcb_onfault) {
629: tf->tf_pc = (vaddr_t)p->p_addr->u_pcb.pcb_onfault;
630: tf->tf_npc = tf->tf_pc + 4;
631: break;
632: }
633: }
634:
635: if ((p->p_md.md_flags & MDP_FIXALIGN) != 0 &&
636: fixalign(p, tf) == 0) {
637: ADVANCE;
638: break;
639: }
640: /* XXX sv.sival_ptr should be the fault address! */
641: trapsignal(p, SIGBUS, 0, BUS_ADRALN, sv); /* XXX code?? */
642: break;
643:
644: case T_FP_IEEE_754:
645: case T_FP_OTHER:
646: /*
647: * Clean up after a floating point exception.
648: * fpu_cleanup can (and usually does) modify the
649: * state we save here, so we must `give up' the FPU
650: * chip context. (The software and hardware states
651: * will not match once fpu_cleanup does its job, so
652: * we must not save again later.)
653: */
654: if (p != fpproc)
655: panic("fpe without being the FP user");
656: savefpstate(p->p_md.md_fpstate);
657: fpproc = NULL;
658: /* tf->tf_psr &= ~PSR_EF; */ /* share_fpu will do this */
659: if (type == T_FP_OTHER && p->p_md.md_fpstate->fs_qsize == 0) {
660: /*
661: * Push the faulting instruction on the queue;
662: * we might need to emulate it.
663: */
664: copyin((caddr_t)pc, &p->p_md.md_fpstate->fs_queue[0].fq_instr, sizeof(int));
665: p->p_md.md_fpstate->fs_queue[0].fq_addr = (int *)pc;
666: p->p_md.md_fpstate->fs_qsize = 1;
667: }
668: ADVANCE;
669: fpu_cleanup(p, p->p_md.md_fpstate);
670: /* fpu_cleanup posts signals if needed */
671: break;
672:
673: case T_TAGOF:
674: trapsignal(p, SIGEMT, 0, EMT_TAGOVF, sv); /* XXX code?? */
675: break;
676:
677: case T_BREAKPOINT:
678: trapsignal(p, SIGTRAP, 0, TRAP_BRKPT, sv);
679: break;
680:
681: case T_DIV0:
682: ADVANCE;
683: trapsignal(p, SIGFPE, 0, FPE_INTDIV, sv);
684: break;
685:
686: case T_CLEANWIN:
687: uprintf("T_CLEANWIN\n"); /* XXX Should not get this */
688: ADVANCE;
689: break;
690:
691: case T_FLUSHWIN:
692: /* Software window flush for v8 software */
693: write_all_windows();
694: ADVANCE;
695: break;
696:
697: case T_RANGECHECK:
698: ADVANCE;
699: trapsignal(p, SIGILL, 0, ILL_ILLOPN, sv); /* XXX code?? */
700: break;
701:
702: case T_FIXALIGN:
703: #ifdef DEBUG_ALIGN
704: uprintf("T_FIXALIGN\n");
705: #endif
706: /* User wants us to fix alignment faults */
707: p->p_md.md_flags |= MDP_FIXALIGN;
708: ADVANCE;
709: break;
710:
711: case T_INTOF:
712: uprintf("T_INTOF\n"); /* XXX */
713: ADVANCE;
714: trapsignal(p, SIGFPE, FPE_INTOVF_TRAP, FPE_INTOVF, sv);
715: break;
716: }
717: userret(p);
718: share_fpu(p, tf);
719: #undef ADVANCE
720: }
721:
722: /*
723: * Save windows from PCB into user stack, and return 0. This is used on
724: * window overflow pseudo-traps (from locore.s, just before returning to
725: * user mode) and when ptrace or sendsig needs a consistent state.
726: * As a side effect, rwindow_save() always sets pcb_nsaved to 0.
727: *
728: * If the windows cannot be saved, pcb_nsaved is restored and we return -1.
729: *
730: * XXXXXX This cannot work properly. I need to re-examine this register
731: * window thing entirely.
732: */
733: int
734: rwindow_save(p)
735: struct proc *p;
736: {
737: struct pcb *pcb = &p->p_addr->u_pcb;
738: struct rwindow64 *rw = &pcb->pcb_rw[0];
739: u_int64_t rwdest;
740: int i, j;
741:
742: i = pcb->pcb_nsaved;
743: if (i == 0)
744: return (0);
745: while (i > 0) {
746: rwdest = rw[i--].rw_in[6];
747: if (rwdest & 1) {
748: struct rwindow64 rwstack = rw[i];
749:
750: rwdest += BIAS;
751: rwstack.rw_in[7] ^= p->p_addr->u_pcb.pcb_wcookie;
752: if (copyout((caddr_t)&rwstack, (caddr_t)(u_long)rwdest,
753: sizeof(rwstack))) {
754: return (-1);
755: }
756: } else {
757: struct rwindow32 rwstack;
758:
759: /* 32-bit window */
760: for (j = 0; j < 8; j++) {
761: rwstack.rw_local[j] = (int)rw[i].rw_local[j];
762: rwstack.rw_in[j] = (int)rw[i].rw_in[j];
763: }
764: /* Must truncate rwdest */
765: if (copyout(&rwstack, (caddr_t)(u_long)(u_int)rwdest,
766: sizeof(rwstack))) {
767: return (-1);
768: }
769: }
770: }
771: pcb->pcb_nsaved = 0;
772:
773: return (0);
774: }
775:
776: /*
777: * Kill user windows (before exec) by writing back to stack or pcb
778: * and then erasing any pcb tracks. Otherwise we might try to write
779: * the registers into the new process after the exec.
780: */
781: void
782: pmap_unuse_final(p)
783: struct proc *p;
784: {
785:
786: write_user_windows();
787: p->p_addr->u_pcb.pcb_nsaved = 0;
788: }
789:
790: /*
791: * This routine handles MMU generated faults. About half
792: * of them could be recoverable through uvm_fault.
793: */
794: void
795: data_access_fault(tf, type, pc, addr, sfva, sfsr)
796: struct trapframe64 *tf;
797: unsigned type;
798: vaddr_t pc;
799: vaddr_t addr;
800: vaddr_t sfva;
801: u_long sfsr;
802: {
803: u_int64_t tstate;
804: struct proc *p;
805: struct vmspace *vm;
806: vaddr_t va;
807: int rv;
808: vm_prot_t access_type;
809: vaddr_t onfault;
810: union sigval sv;
811:
812: uvmexp.traps++;
813: if ((p = curproc) == NULL) /* safety check */
814: p = &proc0;
815:
816: tstate = tf->tf_tstate;
817:
818: /* Find the faulting va to give to uvm_fault */
819: va = trunc_page(addr);
820:
821: /*
822: * Now munch on protections.
823: *
824: * If it was a FAST_DATA_ACCESS_MMU_MISS we have no idea what the
825: * access was since the SFSR is not set. But we should never get
826: * here from there.
827: */
828: if (type == T_FDMMU_MISS || (sfsr & SFSR_FV) == 0) {
829: /* Punt */
830: access_type = VM_PROT_READ;
831: } else {
832: access_type = (sfsr & SFSR_W) ? VM_PROT_READ|VM_PROT_WRITE
833: : VM_PROT_READ;
834: }
835: if (tstate & (PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)) {
836: #ifdef DDB
837: extern char Lfsprobe[];
838: /*
839: * If this was an access that we shouldn't try to page in,
840: * resume at the fault handler without any action.
841: */
842: if (p->p_addr && p->p_addr->u_pcb.pcb_onfault == Lfsprobe)
843: goto kfault;
844: #endif
845:
846: /*
847: * During autoconfiguration, faults are never OK unless
848: * pcb_onfault is set. Once running normally we must allow
849: * exec() to cause copy-on-write faults to kernel addresses.
850: */
851: if (cold)
852: goto kfault;
853: if (!(addr & TLB_TAG_ACCESS_CTX)) {
854: /* CTXT == NUCLEUS */
855: rv = uvm_fault(kernel_map, va, 0, access_type);
856: if (rv == 0)
857: return;
858: goto kfault;
859: }
860: } else
861: p->p_md.md_tf = tf;
862:
863: vm = p->p_vmspace;
864: /* alas! must call the horrible vm code */
865: onfault = (vaddr_t)p->p_addr->u_pcb.pcb_onfault;
866: p->p_addr->u_pcb.pcb_onfault = NULL;
867: rv = uvm_fault(&vm->vm_map, (vaddr_t)va, 0, access_type);
868: p->p_addr->u_pcb.pcb_onfault = (void *)onfault;
869:
870: /*
871: * If this was a stack access we keep track of the maximum
872: * accessed stack size. Also, if uvm_fault gets a protection
873: * failure it is due to accessing the stack region outside
874: * the current limit and we need to reflect that as an access
875: * error.
876: */
877: if ((caddr_t)va >= vm->vm_maxsaddr) {
878: if (rv == 0)
879: uvm_grow(p, va);
880: else if (rv == EACCES)
881: rv = EFAULT;
882: }
883: if (rv != 0) {
884: /*
885: * Pagein failed. If doing copyin/out, return to onfault
886: * address. Any other page fault in kernel, die; if user
887: * fault, deliver SIGSEGV.
888: */
889: if (tstate & (PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)) {
890: kfault:
891: onfault = p->p_addr ?
892: (long)p->p_addr->u_pcb.pcb_onfault : 0;
893: if (!onfault) {
894: extern int trap_trace_dis;
895: trap_trace_dis = 1; /* Disable traptrace for printf */
896: (void) splhigh();
897: panic("kernel data fault: pc=%lx addr=%lx\n",
898: pc, addr);
899: /* NOTREACHED */
900: }
901: tf->tf_pc = onfault;
902: tf->tf_npc = onfault + 4;
903: return;
904: }
905:
906: if (type == T_FDMMU_MISS || (sfsr & SFSR_FV) == 0)
907: sv.sival_ptr = (void *)va;
908: else
909: sv.sival_ptr = (void *)sfva;
910:
911: if (rv == ENOMEM) {
912: printf("UVM: pid %d (%s), uid %u killed: out of swap\n",
913: p->p_pid, p->p_comm,
914: p->p_cred && p->p_ucred ?
915: p->p_ucred->cr_uid : -1);
916: trapsignal(p, SIGKILL, access_type, SEGV_MAPERR, sv);
917: } else {
918: trapsignal(p, SIGSEGV, access_type, SEGV_MAPERR, sv);
919: }
920: }
921: if ((tstate & TSTATE_PRIV) == 0) {
922: userret(p);
923: share_fpu(p, tf);
924: }
925: }
926:
927: /*
928: * This routine handles deferred errors caused by the memory
929: * or I/O bus subsystems. Most of these are fatal, and even
930: * if they are not, recovery is painful. Also, the TPC and
931: * TNPC values are probably not valid if we're not doing a
932: * special PEEK/POKE code sequence.
933: */
934: void
935: data_access_error(tf, type, afva, afsr, sfva, sfsr)
936: struct trapframe64 *tf;
937: unsigned type;
938: vaddr_t sfva;
939: u_long sfsr;
940: vaddr_t afva;
941: u_long afsr;
942: {
943: u_long pc;
944: u_int64_t tstate;
945: struct proc *p;
946: vaddr_t onfault;
947: union sigval sv;
948:
949: uvmexp.traps++;
950: if ((p = curproc) == NULL) /* safety check */
951: p = &proc0;
952:
953: pc = tf->tf_pc;
954: tstate = tf->tf_tstate;
955:
956: sv.sival_ptr = (void *)pc;
957:
958: onfault = p->p_addr ? (long)p->p_addr->u_pcb.pcb_onfault : 0;
959: printf("data error type %x sfsr=%lx sfva=%lx afsr=%lx afva=%lx tf=%p\n",
960: type, sfsr, sfva, afsr, afva, tf);
961:
962: if (afsr == 0) {
963: printf("data_access_error: no fault\n");
964: goto out; /* No fault. Why were we called? */
965: }
966:
967: if (tstate & (PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)) {
968:
969: if (!onfault) {
970: extern int trap_trace_dis;
971:
972: trap_trace_dis = 1; /* Disable traptrace for printf */
973: (void) splhigh();
974: panic("data fault: pc=%lx addr=%lx sfsr=%b\n",
975: (u_long)pc, (long)sfva, sfsr, SFSR_BITS);
976: /* NOTREACHED */
977: }
978:
979: /*
980: * If this was a priviliged error but not a probe, we
981: * cannot recover, so panic.
982: */
983: if (afsr & ASFR_PRIV) {
984: panic("Privileged Async Fault: AFAR %p AFSR %lx\n%b",
985: (void *)afva, afsr, afsr, AFSR_BITS);
986: /* NOTREACHED */
987: }
988: tf->tf_pc = onfault;
989: tf->tf_npc = onfault + 4;
990: return;
991: }
992:
993: trapsignal(p, SIGSEGV, VM_PROT_READ|VM_PROT_WRITE, SEGV_MAPERR, sv);
994: out:
995: if ((tstate & TSTATE_PRIV) == 0) {
996: userret(p);
997: share_fpu(p, tf);
998: }
999: }
1000:
1001: /*
1002: * This routine handles MMU generated faults. About half
1003: * of them could be recoverable through uvm_fault.
1004: */
1005: void
1006: text_access_fault(tf, type, pc, sfsr)
1007: unsigned type;
1008: vaddr_t pc;
1009: struct trapframe64 *tf;
1010: u_long sfsr;
1011: {
1012: u_int64_t tstate;
1013: struct proc *p;
1014: struct vmspace *vm;
1015: vaddr_t va;
1016: int rv;
1017: vm_prot_t access_type;
1018: union sigval sv;
1019:
1020: sv.sival_ptr = (void *)pc;
1021:
1022: uvmexp.traps++;
1023: if ((p = curproc) == NULL) /* safety check */
1024: panic("text_access_fault: no curproc");
1025:
1026: tstate = tf->tf_tstate;
1027:
1028: va = trunc_page(pc);
1029:
1030: /* Now munch on protections... */
1031:
1032: access_type = VM_PROT_EXECUTE;
1033: if (tstate & (PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)) {
1034: extern int trap_trace_dis;
1035: trap_trace_dis = 1; /* Disable traptrace for printf */
1036: (void) splhigh();
1037: panic("kernel text_access_fault: pc=%lx va=%lx\n", pc, va);
1038: /* NOTREACHED */
1039: } else
1040: p->p_md.md_tf = tf;
1041:
1042: vm = p->p_vmspace;
1043: /* alas! must call the horrible vm code */
1044: rv = uvm_fault(&vm->vm_map, va, 0, access_type);
1045:
1046: /*
1047: * If this was a stack access we keep track of the maximum
1048: * accessed stack size. Also, if uvm_fault gets a protection
1049: * failure it is due to accessing the stack region outside
1050: * the current limit and we need to reflect that as an access
1051: * error.
1052: */
1053: if ((caddr_t)va >= vm->vm_maxsaddr) {
1054: if (rv == 0)
1055: uvm_grow(p, va);
1056: else if (rv == EACCES)
1057: rv = EFAULT;
1058: }
1059: if (rv != 0) {
1060: /*
1061: * Pagein failed. Any other page fault in kernel, die; if user
1062: * fault, deliver SIGSEGV.
1063: */
1064: if (tstate & TSTATE_PRIV) {
1065: extern int trap_trace_dis;
1066: trap_trace_dis = 1; /* Disable traptrace for printf */
1067: (void) splhigh();
1068: panic("kernel text fault: pc=%llx\n", (unsigned long long)pc);
1069: /* NOTREACHED */
1070: }
1071: trapsignal(p, SIGSEGV, access_type, SEGV_MAPERR, sv);
1072: }
1073: if ((tstate & TSTATE_PRIV) == 0) {
1074: userret(p);
1075: share_fpu(p, tf);
1076: }
1077: }
1078:
1079:
1080: /*
1081: * This routine handles deferred errors caused by the memory
1082: * or I/O bus subsystems. Most of these are fatal, and even
1083: * if they are not, recovery is painful. Also, the TPC and
1084: * TNPC values are probably not valid if we're not doing a
1085: * special PEEK/POKE code sequence.
1086: */
1087: void
1088: text_access_error(tf, type, pc, sfsr, afva, afsr)
1089: struct trapframe64 *tf;
1090: unsigned type;
1091: vaddr_t pc;
1092: u_long sfsr;
1093: vaddr_t afva;
1094: u_long afsr;
1095: {
1096: int64_t tstate;
1097: struct proc *p;
1098: struct vmspace *vm;
1099: vaddr_t va;
1100: int rv;
1101: vm_prot_t access_type;
1102: union sigval sv;
1103:
1104: sv.sival_ptr = (void *)pc;
1105: uvmexp.traps++;
1106: if ((p = curproc) == NULL) /* safety check */
1107: p = &proc0;
1108:
1109: tstate = tf->tf_tstate;
1110:
1111: if ((afsr) != 0) {
1112: extern int trap_trace_dis;
1113:
1114: trap_trace_dis++; /* Disable traptrace for printf */
1115: printf("text_access_error: memory error...\n");
1116: printf("text memory error type %d sfsr=%lx sfva=%lx afsr=%lx afva=%lx tf=%p\n",
1117: type, sfsr, pc, afsr, afva, tf);
1118: trap_trace_dis--; /* Reenable traptrace for printf */
1119:
1120: if (tstate & (PSTATE_PRIV<<TSTATE_PSTATE_SHIFT))
1121: panic("text_access_error: kernel memory error");
1122:
1123: /* User fault -- Berr */
1124: trapsignal(p, SIGBUS, 0, BUS_ADRALN, sv);
1125: }
1126:
1127: if ((sfsr & SFSR_FV) == 0 || (sfsr & SFSR_FT) == 0)
1128: goto out; /* No fault. Why were we called? */
1129:
1130: va = trunc_page(pc);
1131:
1132: /* Now munch on protections... */
1133: access_type = VM_PROT_EXECUTE;
1134: if (tstate & (PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)) {
1135: extern int trap_trace_dis;
1136: trap_trace_dis = 1; /* Disable traptrace for printf */
1137: (void) splhigh();
1138: panic("kernel text error: pc=%lx sfsr=%b\n", pc, sfsr, SFSR_BITS);
1139: /* NOTREACHED */
1140: } else
1141: p->p_md.md_tf = tf;
1142:
1143: vm = p->p_vmspace;
1144: /* alas! must call the horrible vm code */
1145: rv = uvm_fault(&vm->vm_map, va, 0, access_type);
1146:
1147: /*
1148: * If this was a stack access we keep track of the maximum
1149: * accessed stack size. Also, if uvm_fault gets a protection
1150: * failure it is due to accessing the stack region outside
1151: * the current limit and we need to reflect that as an access
1152: * error.
1153: */
1154: if ((caddr_t)va >= vm->vm_maxsaddr) {
1155: if (rv == 0)
1156: uvm_grow(p, va);
1157: else if (rv == EACCES)
1158: rv = EFAULT;
1159: }
1160: if (rv != 0) {
1161: /*
1162: * Pagein failed. If doing copyin/out, return to onfault
1163: * address. Any other page fault in kernel, die; if user
1164: * fault, deliver SIGSEGV.
1165: */
1166: if (tstate & TSTATE_PRIV) {
1167: extern int trap_trace_dis;
1168: trap_trace_dis = 1; /* Disable traptrace for printf */
1169: (void) splhigh();
1170: panic("kernel text error: pc=%lx sfsr=%b\n", pc,
1171: sfsr, SFSR_BITS);
1172: /* NOTREACHED */
1173: }
1174: trapsignal(p, SIGSEGV, access_type, SEGV_MAPERR, sv);
1175: }
1176: out:
1177: if ((tstate & TSTATE_PRIV) == 0) {
1178: userret(p);
1179: share_fpu(p, tf);
1180: }
1181: }
1182:
1183: /*
1184: * System calls. `pc' is just a copy of tf->tf_pc.
1185: *
1186: * Note that the things labelled `out' registers in the trapframe were the
1187: * `in' registers within the syscall trap code (because of the automatic
1188: * `save' effect of each trap). They are, however, the %o registers of the
1189: * thing that made the system call, and are named that way here.
1190: *
1191: * 32-bit system calls on a 64-bit system are a problem. Each system call
1192: * argument is stored in the smaller of the argument's true size or a
1193: * `register_t'. Now on a 64-bit machine all normal types can be stored in a
1194: * `register_t'. (The only exceptions would be 128-bit `quad's or 128-bit
1195: * extended precision floating point values, which we don't support.) For
1196: * 32-bit syscalls, 64-bit integers like `off_t's, double precision floating
1197: * point values, and several other types cannot fit in a 32-bit `register_t'.
1198: * These will require reading in two `register_t' values for one argument.
1199: *
1200: * In order to calculate the true size of the arguments and therefore whether
1201: * any argument needs to be split into two slots, the system call args
1202: * structure needs to be built with the appropriately sized register_t.
1203: * Otherwise the emul needs to do some magic to split oversized arguments.
1204: *
1205: * We can handle most this stuff for normal syscalls by using either a 32-bit
1206: * or 64-bit array of `register_t' arguments. Unfortunately ktrace always
1207: * expects arguments to be `register_t's, so it loses badly. What's worse,
1208: * ktrace may need to do size translations to massage the argument array
1209: * appropriately according to the emulation that is doing the ktrace.
1210: *
1211: */
1212: void
1213: syscall(tf, code, pc)
1214: register_t code;
1215: struct trapframe64 *tf;
1216: register_t pc;
1217: {
1218: int i, nsys, nap;
1219: int64_t *ap;
1220: const struct sysent *callp;
1221: struct proc *p;
1222: int error = 0, new;
1223: register_t args[8];
1224: register_t rval[2];
1225:
1226: uvmexp.syscalls++;
1227: p = curproc;
1228: #ifdef DIAGNOSTIC
1229: if (tf->tf_tstate & TSTATE_PRIV)
1230: panic("syscall from kernel");
1231: if (curpcb != &p->p_addr->u_pcb)
1232: panic("syscall: cpcb/ppcb mismatch");
1233: if (tf != (struct trapframe64 *)((caddr_t)curpcb + USPACE) - 1)
1234: panic("syscall: trapframe");
1235: #endif
1236: p->p_md.md_tf = tf;
1237: new = code & (SYSCALL_G7RFLAG | SYSCALL_G2RFLAG);
1238: code &= ~(SYSCALL_G7RFLAG | SYSCALL_G2RFLAG);
1239:
1240: callp = p->p_emul->e_sysent;
1241: nsys = p->p_emul->e_nsysent;
1242:
1243: /*
1244: * The first six system call arguments are in the six %o registers.
1245: * Any arguments beyond that are in the `argument extension' area
1246: * of the user's stack frame (see <machine/frame.h>).
1247: *
1248: * Check for ``special'' codes that alter this, namely syscall and
1249: * __syscall. The latter takes a quad syscall number, so that other
1250: * arguments are at their natural alignments. Adjust the number
1251: * of ``easy'' arguments as appropriate; we will copy the hard
1252: * ones later as needed.
1253: */
1254: ap = &tf->tf_out[0];
1255: nap = 6;
1256:
1257: switch (code) {
1258: case SYS_syscall:
1259: code = *ap++;
1260: nap--;
1261: break;
1262: case SYS___syscall:
1263: if (code < nsys &&
1264: callp[code].sy_call != callp[p->p_emul->e_nosys].sy_call)
1265: break; /* valid system call */
1266: if (tf->tf_out[6] & 1L) {
1267: /* longs *are* quadwords */
1268: code = ap[0];
1269: ap += 1;
1270: nap -= 1;
1271: } else {
1272: code = ap[_QUAD_LOWWORD];
1273: ap += 2;
1274: nap -= 2;
1275: }
1276: break;
1277: }
1278:
1279: if (code < 0 || code >= nsys)
1280: callp += p->p_emul->e_nosys;
1281: else if (tf->tf_out[6] & 1L) {
1282: register_t *argp;
1283:
1284: callp += code;
1285: i = callp->sy_narg; /* Why divide? */
1286: if (i > nap) { /* usually false */
1287: if (i > 8)
1288: panic("syscall nargs");
1289: /* Read the whole block in */
1290: error = copyin((caddr_t)(u_long)tf->tf_out[6] + BIAS +
1291: offsetof(struct frame64, fr_argx),
1292: (caddr_t)&args[nap], (i - nap) * sizeof(register_t));
1293: i = nap;
1294: }
1295: /* It should be faster to do <=6 longword copies than call bcopy */
1296: for (argp = args; i--;)
1297: *argp++ = *ap++;
1298:
1299: #ifdef KTRACE
1300: if (KTRPOINT(p, KTR_SYSCALL))
1301: ktrsyscall(p, code,
1302: callp->sy_argsize, args);
1303: #endif
1304: if (error)
1305: goto bad;
1306: } else {
1307: error = EFAULT;
1308: goto bad;
1309: }
1310: #ifdef SYSCALL_DEBUG
1311: scdebug_call(p, code, args);
1312: #endif
1313: rval[0] = 0;
1314: rval[1] = tf->tf_out[1];
1315: #if NSYSTRACE > 0
1316: if (ISSET(p->p_flag, P_SYSTRACE))
1317: error = systrace_redirect(code, p, args, rval);
1318: else
1319: #endif
1320: error = (*callp->sy_call)(p, args, rval);
1321:
1322: switch (error) {
1323: vaddr_t dest;
1324: case 0:
1325: /* Note: fork() does not return here in the child */
1326: tf->tf_out[0] = rval[0];
1327: tf->tf_out[1] = rval[1];
1328: if (new) {
1329: /* jmp %g2 (or %g7, deprecated) on success */
1330: dest = tf->tf_global[new & SYSCALL_G2RFLAG ? 2 : 7];
1331: if (dest & 3) {
1332: error = EINVAL;
1333: goto bad;
1334: }
1335: } else {
1336: /* old system call convention: clear C on success */
1337: tf->tf_tstate &= ~(((int64_t)(ICC_C|XCC_C))<<TSTATE_CCR_SHIFT); /* success */
1338: dest = tf->tf_npc;
1339: }
1340: tf->tf_pc = dest;
1341: tf->tf_npc = dest + 4;
1342: break;
1343:
1344: case ERESTART:
1345: case EJUSTRETURN:
1346: /* nothing to do */
1347: break;
1348:
1349: default:
1350: bad:
1351: if (p->p_emul->e_errno)
1352: error = p->p_emul->e_errno[error];
1353: tf->tf_out[0] = error;
1354: tf->tf_tstate |= (((int64_t)(ICC_C|XCC_C))<<TSTATE_CCR_SHIFT); /* fail */
1355: dest = tf->tf_npc;
1356: tf->tf_pc = dest;
1357: tf->tf_npc = dest + 4;
1358: break;
1359: }
1360:
1361: #ifdef SYSCALL_DEBUG
1362: scdebug_ret(p, code, error, rval);
1363: #endif
1364: userret(p);
1365: #ifdef KTRACE
1366: if (KTRPOINT(p, KTR_SYSRET))
1367: ktrsysret(p, code, error, rval[0]);
1368: #endif
1369: share_fpu(p, tf);
1370: }
1371:
1372: /*
1373: * Process the tail end of a fork() for the child.
1374: */
1375: void
1376: child_return(arg)
1377: void *arg;
1378: {
1379: struct proc *p = (struct proc *)arg;
1380: struct trapframe64 *tf = p->p_md.md_tf;
1381:
1382: /*
1383: * Return values in the frame set by cpu_fork().
1384: */
1385: tf->tf_out[0] = 0;
1386: tf->tf_out[1] = 0;
1387: tf->tf_tstate &= ~(((int64_t)(ICC_C|XCC_C))<<TSTATE_CCR_SHIFT);
1388:
1389: userret(p);
1390: #ifdef KTRACE
1391: if (KTRPOINT(p, KTR_SYSRET))
1392: ktrsysret(p,
1393: (p->p_flag & P_PPWAIT) ? SYS_vfork : SYS_fork, 0, 0);
1394: #endif
1395: }
1396:
CVSweb