Annotation of sys/arch/mips64/mips64/exception.S, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: exception.S,v 1.13 2007/05/25 20:47:19 miod Exp $ */
2:
3: /*
4: * Copyright (c) 2002-2003 Opsycon AB (www.opsycon.se / www.opsycon.com)
5: *
6: * Redistribution and use in source and binary forms, with or without
7: * modification, are permitted provided that the following conditions
8: * are met:
9: * 1. Redistributions of source code must retain the above copyright
10: * notice, this list of conditions and the following disclaimer.
11: * 2. Redistributions in binary form must reproduce the above copyright
12: * notice, this list of conditions and the following disclaimer in the
13: * documentation and/or other materials provided with the distribution.
14: *
15: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16: * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17: * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
19: * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25: * SUCH DAMAGE.
26: *
27: */
28:
29: /*
30: * This code handles exceptions and dispatches to the
31: * correct handler depending on the exception type.
32: *
33: * Exceptions are directed to the following addresses:
34: * 0xffffffffbfc00000 Reset, NMI etc. Not handled by the kernel.
35: * 0xffffffff80000000 TLB refill, not in exception.
36: * 0xffffffff80000080 XTLB refill, not in exception.
37: * 0xffffffffa0000100 Cache errors.
38: * 0xffffffff80000180 Interrupts. Same as next.
39: * 0xffffffff80000180 Everything else...
40: */
41:
42: #include <machine/param.h>
43: #include <machine/psl.h>
44: #include <machine/asm.h>
45: #include <machine/cpu.h>
46: #include <machine/regnum.h>
47: #include <machine/cpustate.h>
48:
49: #include "assym.h"
50:
51: .set mips3
52:
53: .data
54: .globl int_nest_cntr
55: int_nest_cntr:
56: .word -1
57: .text
58:
59: k_exception_table:
60: PTR_VAL k_intr
61: PTR_VAL k_general
62: PTR_VAL k_tlb_inv
63: PTR_VAL k_tlb_inv
64: PTR_VAL k_general
65: PTR_VAL k_general
66: PTR_VAL k_general
67: PTR_VAL k_general
68: PTR_VAL k_general
69: PTR_VAL k_general
70: PTR_VAL k_general
71: PTR_VAL k_general
72: PTR_VAL k_general
73: PTR_VAL k_general
74: PTR_VAL k_general
75: PTR_VAL k_general
76: PTR_VAL k_general
77: PTR_VAL k_general
78: PTR_VAL k_general
79: PTR_VAL k_general
80: PTR_VAL k_general
81: PTR_VAL k_general
82: PTR_VAL k_general
83: PTR_VAL k_general
84: PTR_VAL k_general
85: PTR_VAL k_general
86: PTR_VAL k_general
87: PTR_VAL k_general
88: PTR_VAL k_general
89: PTR_VAL k_general
90: PTR_VAL k_general
91: PTR_VAL k_general
92:
93: u_exception_table:
94: PTR_VAL u_intr
95: PTR_VAL u_general
96: PTR_VAL u_general
97: PTR_VAL u_general
98: PTR_VAL u_general
99: PTR_VAL u_general
100: PTR_VAL u_general
101: PTR_VAL u_general
102: PTR_VAL u_general
103: PTR_VAL u_general
104: PTR_VAL u_general
105: PTR_VAL u_general
106: PTR_VAL u_general
107: PTR_VAL u_general
108: PTR_VAL u_general
109: PTR_VAL u_general
110: PTR_VAL u_general
111: PTR_VAL u_general
112: PTR_VAL u_general
113: PTR_VAL u_general
114: PTR_VAL u_general
115: PTR_VAL u_general
116: PTR_VAL u_general
117: PTR_VAL u_general
118: PTR_VAL u_general
119: PTR_VAL u_general
120: PTR_VAL u_general
121: PTR_VAL u_general
122: PTR_VAL u_general
123: PTR_VAL u_general
124: PTR_VAL u_general
125: PTR_VAL u_general
126:
127: .set noreorder # Noreorder is default style!
128:
129: /*---------------------------------------------------------------- exception
130: * General exception handler dispatcher. This code is copied
131: * to the vector area and must thus be PIC and less than 128
132: * bytes long to fit. Only k0 and k1 may be used at this time.
133: */
134: .globl exception
135: exception:
136: .set noat
137: mfc0 k0, COP_0_STATUS_REG
138: mfc0 k1, COP_0_CAUSE_REG
139: and k0, k0, SR_KSU_USER
140: beqz k0, k_exception # Kernel mode mode
141: and k1, k1, CR_EXC_CODE
142:
143: LA k0, u_exception_table
144: PTR_ADDU k0, k0, k1
145: #ifdef __LP64__
146: PTR_ADDU k0, k0, k1 # yes, twice...
147: #endif
148: PTR_L k0, 0(k0)
149: j k0
150: nop
151:
152: k_exception:
153: LA k0, k_exception_table
154: PTR_ADDU k0, k0, k1
155: #ifdef __LP64__
156: PTR_ADDU k0, k0, k1 # yes, twice...
157: #endif
158: PTR_L k0, 0(k0)
159: j k0
160: nop
161: .set at
162: .globl e_exception
163: e_exception:
164:
165:
166: /*---------------------------------------------------------------- k_intr
167: * Handle an interrupt in kernel mode. This is easy since we
168: * just need to save away the 'save' registers and state.
169: * State is saved on kernel stack.
170: */
171:
172: NNON_LEAF(k_intr, FRAMESZ(KERN_EXC_FRAME_SIZE), ra)
173: .set noat
174: .mask 0x80000000, (CF_RA_OFFS - FRAMESZ(KERN_EXC_FRAME_SIZE))
175: PTR_SUB k0, sp, FRAMESZ(KERN_EXC_FRAME_SIZE)
176: SAVE_CPU(k0, CF_RA_OFFS)
177: #if 0
178: cfc0 v1, COP_0_ICR
179: SAVE_REG(v1, IC, k0, CF_RA_OFFS)
180: #endif
181: .set at
182: move sp, k0 # Already on kernel stack
183: LA gp, _gp
184: and t0, a1, ~(SR_COP_1_BIT | SR_EXL | SR_INT_ENAB | SR_KSU_MASK)
185: mtc0 t0, COP_0_STATUS_REG
186: LA t1, int_nest_cntr
187: lw t2, (t1)
188: addiu t2, 1
189: sw t2, (t1)
190: ITLBNOPFIX
191: PTR_S a0, 0(sp)
192: jal interrupt
193: PTR_S a3, CF_RA_OFFS + KERN_REG_SIZE(sp)
194:
195: mfc0 t0, COP_0_STATUS_REG # dis int preserve settings.
196: li t1, ~SR_INT_ENAB
197: and t0, t0, t1
198: mtc0 t0, COP_0_STATUS_REG
199:
200: LA t1, int_nest_cntr
201: lw t2, (t1)
202: addiu t2, -1
203: sw t2, (t1)
204:
205: PTR_L a0, CF_RA_OFFS + KERN_REG_SIZE(sp)
206: .set noat
207: #if 0
208: RESTORE_REG(t0, IC, sp, CF_RA_OFFS)
209: ctc0 t0, COP_0_ICR
210: #endif
211: RESTORE_CPU(sp, CF_RA_OFFS)
212: PTR_ADDU sp, sp, FRAMESZ(KERN_EXC_FRAME_SIZE)
213: sync
214: eret
215: .set at
216: END(k_intr)
217:
218: /*---------------------------------------------------------------- u_intr
219: * Handle an interrupt in user mode. Save the relevant user
220: * registers into the u.u_pcb struct. This will allow us
221: * to preempt the interrupted process. Full save is held
222: * off though until a switch() really is requiered.
223: */
224: NNON_LEAF(u_intr, FRAMESZ(CF_SZ), ra)
225: .set noat
226: .mask 0x80000000, (CF_RA_OFFS - FRAMESZ(CF_SZ))
227: PTR_L k0, curprocpaddr
228: SAVE_CPU(k0, 0)
229: #if 0
230: cfc0 v1, COP_0_ICR
231: SAVE_REG(v1, IC, k0, 0)
232: #endif
233: PTR_ADDU sp, k0, USPACE-FRAMESZ(CF_SZ)
234: LA gp, _gp
235: .set at
236: and t0, a1, ~(SR_COP_1_BIT | SR_EXL | SR_INT_ENAB | SR_KSU_MASK)
237: mtc0 t0, COP_0_STATUS_REG
238: LA t1, int_nest_cntr
239: lw t2, (t1)
240: addiu t2, 1
241: sw t2, (t1)
242: ITLBNOPFIX
243: PTR_S a0, 0(sp)
244: jal interrupt
245: PTR_S a3, CF_RA_OFFS(sp) # for debugging
246:
247: lw v0, astpending # any pending interrupts?
248: beq v0, zero, 4f
249: nop
250:
251: PTR_L t0, curprocpaddr
252: SAVE_CPU_SREG(t0, 0)
253:
254: #ifdef PERFCNTRS
255: lw t0, cpu_is_rm7k
256: beqz t0, 1f # not an RM7K. Don't do perf save.
257:
258: mfc0 v0, COP_0_PC_CTRL
259: PTR_L t0, curproc
260: sw v0, P_PC_CTRL(t0)
261: dmfc0 v0, COP_0_WATCH_1
262: dmfc0 v1, COP_0_WATCH_2
263: sd v0, P_WATCH_1(t0)
264: sd v1, P_WATCH_2(t0)
265: mfc0 v0, COP_0_WATCH_M
266: mfc0 v1, COP_0_PC_COUNT
267: sw v0, P_WATCH_M(t0)
268: sw v1, P_PC_COUNT(t0)
269: mtc0 zero, COP_0_PC_CTRL
270: dmtc0 zero, COP_0_WATCH_1
271: dmtc0 zero, COP_0_WATCH_2
272: nop;nop;nop;nop
273: 1:
274: #endif
275: jal softintr
276: nop
277: /*
278: * Restore user registers and return. NOTE: interrupts are enabled.
279: */
280: #ifdef PERFCNTRS
281: lw t0, cpu_is_rm7k
282: beqz t0, 1f # not an RM7K. Don't do perf setup.
283:
284: PTR_L t1, curproc # set up rm7k.
285: ld v0, P_WATCH_1(t1)
286: dmtc0 v0, COP_0_WATCH_1
287: ld v0, P_WATCH_2(t1)
288: dmtc0 v0, COP_0_WATCH_2
289: lw v0, P_WATCH_M(t1)
290: mtc0 v0, COP_0_WATCH_M
291: lw v0, P_PC_CTRL(t1)
292: lw v1, P_PC_COUNT(t1)
293: nop;nop
294: mtc0 v0, COP_0_PC_CTRL
295: nop;nop;nop;nop
296: mtc0 v1, COP_0_PC_COUNT
297: nop;nop;nop;nop
298: 1:
299: #endif
300: PTR_L t0, curprocpaddr
301: RESTORE_CPU_SREG(t0, 0)
302:
303: 4:
304: mfc0 t0, COP_0_STATUS_REG # dis int preserve settings.
305: li t1, ~SR_INT_ENAB
306: and t0, t0, t1
307: mtc0 t0, COP_0_STATUS_REG
308:
309: LA t1, int_nest_cntr
310: lw t2, (t1)
311: addiu t2, -1
312: sw t2, (t1)
313:
314: ori t0, SR_EXL # restoring to user mode.
315: mtc0 t0, COP_0_STATUS_REG # must set exception level bit.
316:
317: PTR_L k0, curprocpaddr
318: RESTORE_REG(a3, CPL, k0, 0)
319: sw a3, cpl
320: .set noat
321: RESTORE_REG(a0, PC, k0, 0)
322: #if 0
323: RESTORE_REG(t0, IC, k0, 0)
324: ctc0 t0, COP_0_ICR
325: #endif
326: RESTORE_CPU(k0, 0)
327: RESTORE_REG(sp, SP, k0, 0)
328: LI k0, 0
329: LI k1, 0
330: sync
331: eret
332: .set at
333: END(u_intr)
334:
335: /*---------------------------------------------------------------- k_general
336: * Handle a kernel general trap. This is very much like
337: * k_intr except that we call ktrap instead of interrupt.
338: */
339:
340: NNON_LEAF(k_general, FRAMESZ(KERN_EXC_FRAME_SIZE), ra)
341: .set noat
342: .mask 0x80000000, (CF_RA_OFFS - FRAMESZ(KERN_EXC_FRAME_SIZE))
343: PTR_SUB k0, sp, FRAMESZ(KERN_EXC_FRAME_SIZE)
344: SAVE_CPU(k0, CF_RA_OFFS)
345: #if 0
346: cfc0 v1, COP_0_ICR
347: SAVE_REG(v1, IC, k0, CF_RA_OFFS)
348: #endif
349: #if defined(DDB)
350: SAVE_CPU_SREG(k0, CF_RA_OFFS)
351: #endif
352: .set at
353: move sp, k0 # Already on kernel stack
354: LA gp, _gp
355: and t0, a1, ~(SR_COP_1_BIT | SR_EXL | SR_INT_ENAB | SR_KSU_MASK)
356: mtc0 t0, COP_0_STATUS_REG
357: ITLBNOPFIX
358: PTR_S a0, 0(sp)
359: jal trap
360: PTR_S a3, CF_RA_OFFS + KERN_REG_SIZE(sp)
361:
362: mfc0 t0, COP_0_STATUS_REG # dis int preserve settings.
363: li t1, ~SR_INT_ENAB
364: and t0, t0, t1
365: mtc0 t0, COP_0_STATUS_REG
366:
367: .set noat
368: #if 0
369: RESTORE_REG(t0, IC, sp, CF_RA_OFFS)
370: ctc0 t0, COP_0_ICR
371: #endif
372: RESTORE_REG(a0, PC, sp, CF_RA_OFFS)
373: RESTORE_CPU(sp, CF_RA_OFFS)
374: PTR_ADDU sp, sp, FRAMESZ(KERN_EXC_FRAME_SIZE)
375: sync
376: eret
377: .set at
378: END(k_general)
379:
380: /*---------------------------------------------------------------- u_general
381: * Handle a user general trap.
382: */
383: NNON_LEAF(u_general, FRAMESZ(CF_SZ), ra)
384: .set noat
385: .mask 0x80000000, (CF_RA_OFFS - FRAMESZ(CF_SZ))
386:
387: PTR_L k0, curprocpaddr
388: SAVE_CPU(k0, 0)
389: #if 0
390: cfc0 v1, COP_0_ICR
391: SAVE_REG(v1, IC, k0, 0)
392: #endif
393: SAVE_CPU_SREG(k0, 0)
394: PTR_ADDU sp, k0, USPACE-FRAMESZ(CF_SZ)
395: LA gp, _gp
396: .set at
397: and t0, a1, ~(SR_COP_1_BIT | SR_EXL | SR_INT_ENAB | SR_KSU_MASK)
398: mtc0 t0, COP_0_STATUS_REG
399: ITLBNOPFIX
400:
401: #ifdef PERFCNTRS
402: lw t0, cpu_is_rm7k
403: beqz t0, 1f # not an RM7K. Don't do perf save.
404:
405: mfc0 v0, COP_0_PC_CTRL
406: PTR_L t0, curproc
407: sw v0, P_PC_CTRL(t0)
408: dmfc0 v0, COP_0_WATCH_1
409: dmfc0 v1, COP_0_WATCH_2
410: sd v0, P_WATCH_1(t0)
411: sd v1, P_WATCH_2(t0)
412: mfc0 v0, COP_0_WATCH_M
413: mfc0 v1, COP_0_PC_COUNT
414: sw v0, P_WATCH_M(t0)
415: sw v1, P_PC_COUNT(t0)
416: mtc0 zero, COP_0_PC_CTRL
417: nop;nop;nop;nop
418: 1:
419: #endif
420:
421: jal trap
422: PTR_S a3, CF_RA_OFFS(sp) # for debugging
423:
424: lw v0, astpending
425: beq v0, zero, 4f
426: nop
427:
428: PTR_L t0, curprocpaddr
429: SAVE_CPU_SREG(t0, 0)
430:
431: jal softintr
432: nop
433:
434: PTR_L t0, curprocpaddr
435: RESTORE_CPU_SREG(t0, 0)
436:
437: 4:
438: #ifdef PERFCNTRS
439: lw t0, cpu_is_rm7k
440: beqz t0, 1f # not an RM7K. Don't do perf setup.
441:
442: LOAD t0, curproc # set up rm7k.
443: ld v0, P_WATCH_1(t0)
444: dmtc0 v0, COP_0_WATCH_1
445: ld v0, P_WATCH_2(t0)
446: dmtc0 v0, COP_0_WATCH_2
447: lw v0, P_WATCH_M(t0)
448: mtc0 v0, COP_0_WATCH_M
449: lw v0, P_PC_CTRL(t0)
450: lw v1, P_PC_COUNT(t0)
451: nop;nop
452: mtc0 v0, COP_0_PC_CTRL
453: nop;nop;nop;nop
454: mtc0 v1, COP_0_PC_COUNT
455: nop;nop;nop;nop
456: 1:
457: #endif
458: mfc0 t0, COP_0_STATUS_REG # dis int preserve settings.
459: li t1, ~SR_INT_ENAB
460: and t0, t0, t1
461: mtc0 t0, COP_0_STATUS_REG
462: ITLBNOPFIX
463:
464: ori t0, SR_EXL # restoring to user mode.
465: mtc0 t0, COP_0_STATUS_REG # must set exception level bit.
466: ITLBNOPFIX
467:
468: PTR_L k0, curprocpaddr
469: RESTORE_REG(a3, CPL, k0, 0)
470: sw a3, cpl
471: .set noat
472: RESTORE_CPU_SREG(k0, 0)
473: RESTORE_REG(a0, PC, k0, 0)
474: #if 0
475: RESTORE_REG(t0, IC, k0, 0)
476: ctc0 t0, COP_0_ICR
477: #endif
478: RESTORE_CPU(k0, 0)
479: RESTORE_REG(sp, SP, k0, 0)
480: LI k0, 0
481: LI k1, 0
482: sync
483: eret
484: .set at
485: END(u_general)
486:
487: #ifdef notyet
488: /*---------------------------------------------------------------- u_syscall
489: * Syscall exceptions are special such that they can be
490: * optimized by not saving more than what is really needed.
491: * Syscalls are actually 'function calls' from the user
492: * programs point of view and thus it does not expect us to
493: * save away all temporary registers etc. Just save state and
494: * args to avoid a lot of overhead.
495: */
496: NNON_LEAF(u_syscall, FRAMESZ(CF_SZ), ra)
497: .set noat
498: .mask 0x80000000, (CF_RA_OFFS - FRAMESZ(CF_SZ))
499:
500: REG_S a0, UADDR+PCB_REGS+(A0 * REGSZ)
501: REG_S a1, UADDR+PCB_REGS+(A1 * REGSZ)
502: REG_S a2, UADDR+PCB_REGS+(A2 * REGSZ)
503: REG_S a3, UADDR+PCB_REGS+(A3 * REGSZ)
504: mfc0 a0, COP_0_STATUS_REG # First arg is the status reg.
505: mfc0 a1, COP_0_CAUSE_REG # Second arg is the cause reg.
506: dmfc0 a3, COP_0_EXC_PC # Fourth arg is the pc.
507: REG_S sp, UADDR+PCB_REGS+(SP * REGSZ)
508: LA sp, KERNELSTACK - FRAMESZ(CF_SZ) # switch to kernel SP
509: REG_S ra, UADDR+PCB_REGS+(RA * REGSZ)
510: REG_S a0, UADDR+PCB_REGS+(SR * REGSZ)
511: REG_S a1, UADDR+PCB_REGS+(CAUSE * REGSZ)
512: REG_S a3, UADDR+PCB_REGS+(PC * REGSZ)
513: REG_S a3, CF_RA_OFFS(sp) # for debugging
514: LA gp, _gp # switch to kernel GP
515: lw a3, cpl
516: sw a3, UADDR+PCB_REGS+(CPL * REGSZ)
517: .set at
518: # Turn off fpu and enter kernel mode
519: and t0, a0, ~(SR_COP_1_BIT | SR_EXL | SR_KSU_MASK | SR_INT_ENAB)
520: mtc0 t0, COP_0_STATUS_REG
521: li a0, UADDR+PCB_REGS
522: ITLBNOPFIX
523: /*
524: * If CPU is a RM7000 save away performance stuff.
525: */
526: #if 0
527: lw t0, cpu_is_rm7k
528: beqz t0, 1f # not an RM7K. Don't do perf save.
529: mfc0 v0, COP_0_PC_CTRL
530: lw t0, curproc
531: sw v0, P_PC_CTRL(t0)
532: dmfc0 v0, COP_0_WATCH_1
533: dmfc0 v1, COP_0_WATCH_2
534: sd v0, P_WATCH_1(t0)
535: sd v1, P_WATCH_2(t0)
536: mfc0 v0, COP_0_WATCH_M
537: mfc0 v1, COP_0_PC_COUNT
538: sw v0, P_WATCH_M(t0)
539: sw v1, P_PC_COUNT(t0)
540: mtc0 zero, COP_0_PC_CTRL
541: dmtc0 zero, COP_0_WATCH_1
542: dmtc0 zero, COP_0_WATCH_2
543: 1:
544: #endif
545:
546: jal trap
547: nop
548:
549: mfc0 t0, COP_0_STATUS_REG # dis int preserve settings.
550: li t1, ~SR_INT_ENAB
551: and t0, t0, t1
552: mtc0 t0, COP_0_STATUS_REG
553: ITLBNOPFIX
554:
555: ori t0, SR_EXL
556: mtc0 t0, COP_0_STATUS_REG # set exception level
557: ITLBNOPFIX
558:
559: #if 0
560: lw t0, cpu_is_rm7k
561: beqz t0, 1f # not an RM7K. Don't do perf setup.
562:
563: PTR_L t0, curproc # set up rm7k.
564: ld v0, P_WATCH_1(t0)
565: dmtc0 v0, COP_0_WATCH_1
566: ld v0, P_WATCH_2(t0)
567: dmtc0 v0, COP_0_WATCH_2
568: lw v0, P_WATCH_M(t0)
569: mtc0 v0, COP_0_WATCH_M
570: lw v0, P_PC_CTRL(t0)
571: lw v1, P_PC_COUNT(t0)
572: nop;nop
573: mtc0 v0, COP_0_PC_CTRL
574: nop;nop;nop;nop
575: mtc0 v1, COP_0_PC_COUNT
576: nop;nop;nop;nop
577: 1:
578: #endif
579: lw a3, UADDR+PCB_REGS+(CPL * REGSZ)
580: sw a3, cpl
581:
582: .set noat
583:
584: REG_L a0, UADDR+PCB_REGS+(SR * REGSZ)
585: mtc0 a0, COP_0_STATUS_REG # still exception level
586: REG_L a0, UADDR+PCB_REGS+(PC * REGSZ)
587: REG_L v0, UADDR+PCB_REGS+(V0 * REGSZ)
588: dmtc0 a0, COP_0_EXC_PC # set return address
589: REG_L v1, UADDR+PCB_REGS+(V1 * REGSZ)
590: REG_L gp, UADDR+PCB_REGS+(GP * REGSZ)
591: REG_L sp, UADDR+PCB_REGS+(SP * REGSZ)
592: REG_L ra, UADDR+PCB_REGS+(RA * REGSZ)
593: sync
594: eret
595: .set at
596: END(u_syscall)
597: #endif
598:
599:
600: /*-------------------------------------------------------------- proc_trampoline
601: * Setup for and return to user.
602: */
603: LEAF(proc_trampoline, 0)
604: sw zero, cpl # lower to spl0
605: lw t0, ipending
606: beq t0, zero, 0f
607: nop
608:
609: jal setsoftintr0 # process any pending ints
610: nop
611: 0:
612: jal s0
613: move a0,s1 # set up for return to user.
614:
615: #if 0
616: lw t0, cpu_is_rm7k
617: beqz t0, 1f # not an RM7K. Don't do IC reg.
618:
619: LOAD t0, curproc # set up rm7k.
620: ld v0, P_WATCH_1(t0)
621: dmtc0 v0, COP_0_WATCH_1
622: ld v0, P_WATCH_2(t0)
623: dmtc0 v0, COP_0_WATCH_2
624: lw v0, P_WATCH_M(t0)
625: mtc0 v0, COP_0_WATCH_M
626: lw v0, P_PC_CTRL(t0)
627: lw v1, P_PC_COUNT(t0)
628: nop;nop
629: mtc0 v0, COP_0_PC_CTRL
630: nop;nop;nop;nop
631: mtc0 v1, COP_0_PC_COUNT
632: nop;nop;nop;nop
633: li v0, IC_INT_PERF
634: ctc0 v0, COP_0_ICR # enable perfcntr interrupt.
635: 1:
636: #endif
637: mfc0 t0, COP_0_STATUS_REG # dis int preserve settings.
638: li t1, ~SR_INT_ENAB
639: and t0, t0, t1
640: mtc0 t0, COP_0_STATUS_REG
641: ITLBNOPFIX
642:
643: ori t0, SR_EXL # restoring to user mode.
644: mtc0 t0, COP_0_STATUS_REG # must set exception level bit.
645: ITLBNOPFIX
646:
647: .set noat
648: PTR_L k0, curprocpaddr
649: RESTORE_CPU_SREG(k0, 0)
650: RESTORE_REG(a0, PC, k0, 0)
651: #if 0
652: RESTORE_REG(t0, IC, k0, 0)
653: ctc0 t0, COP_0_ICR
654: #endif
655: RESTORE_CPU(k0, 0)
656: RESTORE_REG(sp, SP, k0, 0)
657: LI k0, 0
658: LI k1, 0
659: sync
660: eret
661: .set at
662: END(proc_trampoline)
CVSweb