Annotation of sys/arch/sh/sh/locore_subr.S, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: locore_subr.S,v 1.6 2007/03/02 06:11:54 miod Exp $ */
2: /* $NetBSD: locore_subr.S,v 1.28 2006/01/23 22:52:09 uwe Exp $ */
3:
4: /*
5: * Copyright (c) 2007 Miodrag Vallat.
6: *
7: * Permission to use, copy, modify, and distribute this software for any
8: * purpose with or without fee is hereby granted, provided that the above
9: * copyright notice, this permission notice, and the disclaimer below
10: * appear in all copies.
11: *
12: * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13: * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14: * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15: * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16: * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17: * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18: * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19: */
20: /*-
21: * Copyright (c) 2002 The NetBSD Foundation, Inc.
22: * All rights reserved.
23: *
24: * Redistribution and use in source and binary forms, with or without
25: * modification, are permitted provided that the following conditions
26: * are met:
27: * 1. Redistributions of source code must retain the above copyright
28: * notice, this list of conditions and the following disclaimer.
29: * 2. Redistributions in binary form must reproduce the above copyright
30: * notice, this list of conditions and the following disclaimer in the
31: * documentation and/or other materials provided with the distribution.
32: * 3. All advertising materials mentioning features or use of this software
33: * must display the following acknowledgement:
34: * This product includes software developed by the NetBSD
35: * Foundation, Inc. and its contributors.
36: * 4. Neither the name of The NetBSD Foundation nor the names of its
37: * contributors may be used to endorse or promote products derived
38: * from this software without specific prior written permission.
39: *
40: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
41: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
42: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
43: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
44: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
45: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
46: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
47: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
48: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
49: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
50: * POSSIBILITY OF SUCH DAMAGE.
51: */
52:
53: #include "assym.h"
54:
55: #include <sys/syscall.h> /* SYS_sigreturn, SYS_exit */
56: #include <sh/asm.h>
57: #include <sh/locore.h>
58: #include <sh/param.h> /* UPAGES */
59: #include <sh/mmu_sh3.h>
60: #include <sh/mmu_sh4.h>
61:
62: /*
63: * LINTSTUB: include <sys/types.h>
64: * LINTSTUB: include <sys/proc.h>
65: * LINTSTUB: include <sh/locore.h>
66: */
67:
68: /*
69: * Save integer registers in the pcb.
70: * reg points to pcb->pcb_sf.
71: */
72: #define SAVEPCB(reg) \
73: add #SF_SIZE, reg ; \
74: sts.l mach, @-/**/reg ; \
75: sts.l macl, @-/**/reg ; \
76: stc.l r7_bank,@-/**/reg ; \
77: stc.l sr, @-/**/reg ; \
78: stc.l r6_bank,@-/**/reg ; \
79: sts.l pr, @-/**/reg ; \
80: mov.l r8, @-/**/reg ; \
81: mov.l r9, @-/**/reg ; \
82: mov.l r10, @-/**/reg ; \
83: mov.l r11, @-/**/reg ; \
84: mov.l r12, @-/**/reg ; \
85: mov.l r13, @-/**/reg ; \
86: mov.l r14, @-/**/reg ; \
87: mov.l r15, @-/**/reg
88:
89: /*
90: * Save floating point registers to a fpreg structure.
91: * reg points to the structure, tmp and tmp2 are two scratch integer registers.
92: */
93: #define SAVEFP(reg, tmp, tmp2) \
94: add #124, reg ; \
95: sts fpscr, tmp2 ; \
96: add #(FP_SIZE - 124), reg ; \
97: mov #0, tmp; \
98: mov.l tmp2, @-/**/reg ; \
99: lds tmp, fpscr; \
100: sts.l fpul, @-/**/reg ; \
101: frchg; \
102: fmov.s fr15, @-/**/reg ; \
103: fmov.s fr14, @-/**/reg ; \
104: fmov.s fr13, @-/**/reg ; \
105: fmov.s fr12, @-/**/reg ; \
106: fmov.s fr11, @-/**/reg ; \
107: fmov.s fr10, @-/**/reg ; \
108: fmov.s fr9, @-/**/reg ; \
109: fmov.s fr8, @-/**/reg ; \
110: fmov.s fr7, @-/**/reg ; \
111: fmov.s fr6, @-/**/reg ; \
112: fmov.s fr5, @-/**/reg ; \
113: fmov.s fr4, @-/**/reg ; \
114: fmov.s fr3, @-/**/reg ; \
115: fmov.s fr2, @-/**/reg ; \
116: fmov.s fr1, @-/**/reg ; \
117: fmov.s fr0, @-/**/reg ; \
118: frchg; \
119: fmov.s fr15, @-/**/reg ; \
120: fmov.s fr14, @-/**/reg ; \
121: fmov.s fr13, @-/**/reg ; \
122: fmov.s fr12, @-/**/reg ; \
123: fmov.s fr11, @-/**/reg ; \
124: fmov.s fr10, @-/**/reg ; \
125: fmov.s fr9, @-/**/reg ; \
126: fmov.s fr8, @-/**/reg ; \
127: fmov.s fr7, @-/**/reg ; \
128: fmov.s fr6, @-/**/reg ; \
129: fmov.s fr5, @-/**/reg ; \
130: fmov.s fr4, @-/**/reg ; \
131: fmov.s fr3, @-/**/reg ; \
132: fmov.s fr2, @-/**/reg ; \
133: fmov.s fr1, @-/**/reg ; \
134: fmov.s fr0, @-/**/reg ; \
135: lds tmp2, fpscr
136:
137: /*
138: * Load floating point registers from a fpreg structure.
139: * reg points to the structure, tmp is a scratch integer register.
140: */
141: #define LOADFP(reg, tmp) \
142: mov #0, tmp; \
143: lds tmp, fpscr; \
144: fmov.s @/**/reg/**/+, fr0 ; \
145: fmov.s @/**/reg/**/+, fr1 ; \
146: fmov.s @/**/reg/**/+, fr2 ; \
147: fmov.s @/**/reg/**/+, fr3 ; \
148: fmov.s @/**/reg/**/+, fr4 ; \
149: fmov.s @/**/reg/**/+, fr5 ; \
150: fmov.s @/**/reg/**/+, fr6 ; \
151: fmov.s @/**/reg/**/+, fr7 ; \
152: fmov.s @/**/reg/**/+, fr8 ; \
153: fmov.s @/**/reg/**/+, fr9 ; \
154: fmov.s @/**/reg/**/+, fr10 ; \
155: fmov.s @/**/reg/**/+, fr11 ; \
156: fmov.s @/**/reg/**/+, fr12 ; \
157: fmov.s @/**/reg/**/+, fr13 ; \
158: fmov.s @/**/reg/**/+, fr14 ; \
159: fmov.s @/**/reg/**/+, fr15 ; \
160: frchg; \
161: fmov.s @/**/reg/**/+, fr0 ; \
162: fmov.s @/**/reg/**/+, fr1 ; \
163: fmov.s @/**/reg/**/+, fr2 ; \
164: fmov.s @/**/reg/**/+, fr3 ; \
165: fmov.s @/**/reg/**/+, fr4 ; \
166: fmov.s @/**/reg/**/+, fr5 ; \
167: fmov.s @/**/reg/**/+, fr6 ; \
168: fmov.s @/**/reg/**/+, fr7 ; \
169: fmov.s @/**/reg/**/+, fr8 ; \
170: fmov.s @/**/reg/**/+, fr9 ; \
171: fmov.s @/**/reg/**/+, fr10 ; \
172: fmov.s @/**/reg/**/+, fr11 ; \
173: fmov.s @/**/reg/**/+, fr12 ; \
174: fmov.s @/**/reg/**/+, fr13 ; \
175: fmov.s @/**/reg/**/+, fr14 ; \
176: fmov.s @/**/reg/**/+, fr15 ; \
177: lds.l @/**/reg/**/+, fpul ; \
178: lds.l @/**/reg/**/+, fpscr
179:
180: .text
181: .align 5 /* align cache line size (32B) */
182: /*
183: * LINTSTUB: Func: void cpu_switch(struct proc *p, struct proc *XXX_IGNORED)
184: * Find a runnable proc and switch to it. Wait if necessary.
185: */
186: ENTRY(cpu_switch)
187: /* Save current proc's context to switchframe */
188: mov.l .L_SF, r0
189: mov.l @(r0, r4), r1
190: SAVEPCB(r1)
191: add #PCB_FP, r1
192: SAVEFP(r1, r8, r9)
193:
194: .L_find_and_switch:
195: /* Search next proc. cpu_switch_search may or may not sleep. */
196: mov.l .L_cpu_switch_search, r0
197: jsr @r0
198: mov r4, r8 /* save old proc */
199:
200: /* Skip context switch if same proc. */
201: cmp/eq r8, r0
202: bt/s 1f
203: mov r0, r4 /* new proc */
204:
205: /* Setup kernel stack */
206: mov.l .L_SF, r0
207: mov.l @(r0, r4), r1 /* switch frame */
208: mov.l @(SF_R7_BANK, r1), r0 /* stack top */
209: mov.l @(SF_R6_BANK, r1), r2 /* current frame */
210: mov.l @(SF_R15, r1), r3 /* current stack */
211: /* During kernel stack switching, all interrupts are disabled. */
212: __EXCEPTION_BLOCK(r1, r5)
213: /* switch to new kernel stack */
214: ldc r0, r7_bank
215: ldc r2, r6_bank
216: mov r3, r15
217:
218: /* Wire u-area */
219: MOV (switch_resume, r0)
220: jsr @r0
221: mov r4, r8 /* save new proc */
222: mov r8, r4
223: __EXCEPTION_UNBLOCK(r0, r1)
224: /* Now OK to use kernel stack. */
225:
226: /* Restore new proc's context from switchframe */
227: 1: mov.l .L_SF, r0
228: mov.l @(r0, r4), r1
229: add #4, r1 /* r15 already restored */
230: mov.l @r1+, r14
231: mov.l @r1+, r13
232: mov.l @r1+, r12
233: mov.l @r1+, r11
234: mov.l @r1+, r10
235: mov.l @r1+, r9
236: mov.l @r1+, r8
237: lds.l @r1+, pr
238: add #4, r1 /* r6_bank already restored */
239: ldc.l @r1+, sr
240: add #4, r1 /* r7_bank already restored */
241: lds.l @r1+, macl
242: lds.l @r1+, mach
243:
244: mov.l @(r0, r4), r1
245: add #PCB_FP, r1
246: LOADFP(r1, r0)
247: rts
248: nop
249: .align 2
250: .L_SF: .long (P_MD_PCB)
251: .L_cpu_switch_search: .long _C_LABEL(cpu_switch_search)
252: FUNC_SYMBOL(switch_resume)
253:
254:
255: /*
256: * LINTSTUB: Func: void switch_exit(struct proc *p, void (*exit_func)(struct proc *))
257: * Called only from cpu_exit(p). Before we call exit_func to
258: * free proc's resources (including kernel stack) we need to
259: * switch to the proc0's kernel stack. Then we jump into the
260: * middle of cpu_switch to find and switch to a new proc.
261: */
262: ALTENTRY(switch_exit)
263: mov.l .L_switch_exit_proc0_pcb, r1
264: mov.l .L_switch_exit_curpcb, r0
265: mov.l @r1, r1
266: mov.l r1, @r0 /* curpcb = proc0.p_md.md_pcb */
267:
268: mov.l @(SF_R7_BANK, r1), r0 /* stack top */
269: mov.l @(SF_R6_BANK, r1), r2 /* current frame */
270: mov.l @(SF_R15, r1), r3 /* current stack */
271:
272: /* switch to proc0's kernel stack */
273: __EXCEPTION_BLOCK(r1, r6)
274: ldc r0, r7_bank
275: ldc r2, r6_bank
276: mov r3, r15
277: __EXCEPTION_UNBLOCK(r0, r1)
278:
279: /* safe to call (*exit_func)(p); now */
280: jsr @r5
281: nop /* proc is already in r4 */
282:
283: /* proceed to cpu_switch */
284: bra .L_find_and_switch
285: mov #0, r4 /* no "previous" proc */
286:
287: .align 2
288: .L_switch_exit_proc0_pcb:
289: .long _C_LABEL(proc0) + P_MD_PCB
290: .L_switch_exit_curpcb:
291: .long _C_LABEL(curpcb)
292:
293:
294: #ifdef SH3
295: /*
296: * LINTSTUB: Func: void sh3_switch_resume(struct proc *p)
297: * Set current u-area PTE array to curupte.
298: * No need to flush any entries. it is depended on u-area mapping is
299: * wired, and its mapping never cause modified/reference fault.
300: * u-area TLB fault is only covered by TLB miss exception.
301: * When the situation that "VPN match but not Valid" occur, SH3 jump to
302: * "generic exception" handler instead of TLB miss exception.
303: * But OpenBSD/sh code doesn't handle it. As the result, it causes
304: * hard reset. (never can access kernel stack).
305: */
306: NENTRY(sh3_switch_resume)
307: mov.l .L_UPTE, r0
308: mov.l .L_curupte, r1
309: add r4, r0 /* p->p_md.md_upte */
310: rts
311: mov.l r0, @r1
312: .align 2
313: .L_UPTE: .long P_MD_UPTE
314: .L_curupte: .long _C_LABEL(curupte)
315: SET_ENTRY_SIZE(sh3_switch_resume)
316: #endif /* SH3 */
317:
318:
319: #ifdef SH4
320: /*
321: * LINTSTUB: Func: void sh4_switch_resume(struct proc *p)
322: * Wire u-area. invalidate TLB entry for kernel stack to prevent
323: * TLB multiple hit.
324: */
325: NENTRY(sh4_switch_resume)
326: mov.l .L_UPTE__sh4, r0
327: add r0, r4 /* p->p_md.md_upte */
328: mov #UPAGES,r3
329: mov #1, r2
330: mov.l @r4, r0 /* if (p->p_md.md_upte[0].addr == 0) return; */
331: tst r0, r0
332: bt 2f
333:
334: /* Save old ASID and set ASID to zero */
335: xor r0, r0
336: mov.l .L_4_PTEH, r1
337: mov.l @r1, r7
338: mov.l r0, @r1
339:
340: mov.l .L_VPN_MASK, r6
341: mov.l .L_4_UTLB_AA_A, r5
342:
343: /* TLB address array must be accessed via P2. Setup jump address. */
344: mova 1f, r0
345: mov.l .L_P2BASE, r1
346: or r1, r0
347: jmp @r0 /* run P2 */
348: nop
349:
350: /* Probe VPN match TLB entry and invalidate it. */
351: .align 2 /* mova target must be 4byte alignment */
352: 1: mov.l @(4, r4), r0
353: and r6, r0
354: mov.l r0, @r5 /* clear D, V */
355:
356: /* Wire u-area TLB entry */
357: /* Address array */
358: mov.l @r4+, r0 /* addr */
359: mov.l @r4+, r1 /* data */
360: mov.l r1, @r0 /* *addr = data */
361:
362: /* Data array */
363: mov.l @r4+, r0 /* addr */
364: mov.l @r4+, r1 /* data */
365: mov.l r1, @r0 /* *addr = data */
366: cmp/eq r2, r3
367: bf/s 1b
368: add #1, r2
369:
370: /* restore ASID */
371: mov.l .L_4_PTEH, r0
372: mov.l r7, @r0
373: mova 2f, r0
374: jmp @r0 /* run P1 */
375: nop
376: .align 2
377: 2: rts /* mova target must be 4byte alignment */
378: nop
379: .align 2
380: .L_UPTE__sh4: .long P_MD_UPTE
381: .L_4_PTEH: .long SH4_PTEH
382: .L_4_UTLB_AA_A: .long (SH4_UTLB_AA | SH4_UTLB_A)
383: .L_4_ITLB_AA: .long SH4_ITLB_AA
384: .L_VPN_MASK: .long 0xfffff000
385: .L_P2BASE: .long 0xa0000000
386: SET_ENTRY_SIZE(sh4_switch_resume)
387: #endif /* SH4 */
388:
389:
390: /*
391: * LINTSTUB: Func: int _cpu_intr_raise(int s)
392: * raise SR.IMASK to 's'. if current SR.IMASK is greater equal 's',
393: * nothing to do. returns previous SR.IMASK.
394: */
395: NENTRY(_cpu_intr_raise)
396: stc sr, r2
397: mov #0x78, r1
398: mov r2, r0
399: shll r1 /* r1 = 0xf0 */
400: and r1, r0 /* r0 = SR & 0xf0 */
401: cmp/ge r4, r0 /* r0 >= r4 ? T = 1 */
402: bt/s 1f
403: not r1, r1 /* r1 = 0xffffff0f */
404: and r1, r2 /* r2 = SR & ~0xf0 */
405: or r2, r4 /* r4 = (SR & ~0xf0) | s */
406: ldc r4, sr /* SR = r4 (don't move to delay slot) */
407: 1: rts
408: nop /* return (SR & 0xf0) */
409: SET_ENTRY_SIZE(_cpu_intr_raise)
410:
411:
412: /*
413: * LINTSTUB: Func: int _cpu_intr_suspend(void)
414: * Mask all external interrupt. Returns previous SR.IMASK.
415: */
416: NENTRY(_cpu_intr_suspend)
417: stc sr, r0 /* r0 = SR */
418: mov #0x78, r1
419: shll r1 /* r1 = 0x000000f0 */
420: mov r0, r2 /* r2 = SR */
421: or r1, r2 /* r2 |= 0x000000f0 */
422: ldc r2, sr /* SR = r2 */
423: rts
424: and r1, r0 /* r0 = SR & 0x000000f0 */
425: SET_ENTRY_SIZE(_cpu_intr_suspend)
426:
427:
428:
429: /*
430: * LINTSTUB: Func: int _cpu_intr_resume(int s)
431: * Set 's' to SR.IMASK. Returns previous SR.IMASK.
432: */
433: NENTRY(_cpu_intr_resume)
434: stc sr, r0 /* r0 = SR */
435: mov #0x78, r2
436: shll r2 /* r2 = 0x000000f0 */
437: not r2, r1 /* r1 = 0xffffff0f */
438: and r0, r1 /* r1 = (SR & ~0xf0) */
439: or r1, r4 /* r4 = (SR & ~0xf0) | level */
440: ldc r4, sr /* SR = r0 (don't move to delay slot) */
441: rts
442: and r2, r0 /* return (SR & 0xf0) */
443: SET_ENTRY_SIZE(_cpu_intr_resume)
444:
445:
446: /*
447: * LINTSTUB: Func: int _cpu_exception_suspend(void)
448: * Block exception (SR.BL). if external interrupt raise, pending interrupt.
449: * if exception occur, jump to 0xa0000000 (hard reset).
450: */
451: NENTRY(_cpu_exception_suspend)
452: stc sr, r0 /* r0 = SR */
453: mov #0x10, r1
454: swap.b r1, r1
455: mov r0, r2 /* r2 = r0 */
456: swap.w r1, r1 /* r1 = 0x10000000 */
457: or r1, r2 /* r2 |= 0x10000000 */
458: ldc r2, sr /* SR = r2 */
459: rts
460: and r1, r0 /* r0 &= 0x10000000 */
461: SET_ENTRY_SIZE(_cpu_exception_suspend)
462:
463:
464: /*
465: * LINTSTUB: Func: void _cpu_exception_resume(int s)
466: * restore 's' exception mask. (SR.BL)
467: */
468: NENTRY(_cpu_exception_resume)
469: stc sr, r0 /* r0 = SR */
470: mov #0x10, r1
471: swap.b r1, r1
472: swap.w r1, r1
473: not r1, r1 /* r1 = ~0x10000000 */
474: and r1, r0 /* r0 &= ~0x10000000 */
475: or r4, r0 /* r0 |= old SR.BL */
476: ldc r0, sr /* SR = r0 (don't move to delay slot) */
477: rts
478: nop
479: SET_ENTRY_SIZE(_cpu_exception_resume)
480:
481:
482: /*
483: * LINTSTUB: Func: void _cpu_spin(uint32_t count)
484: * Loop for 'count' * 10 cycles.
485: * [...]
486: * add IF ID EX MA WB
487: * nop IF ID EX MA WB
488: * cmp/pl IF ID EX MA WB - -
489: * nop IF ID EX MA - - WB
490: * bt IF ID EX . . MA WB
491: * nop IF ID - - EX MA WB
492: * nop IF - - ID EX MA WB
493: * nop - - - IF ID EX MA WB
494: * add IF ID EX MA WB
495: * nop IF ID EX MA WB
496: * cmp/pl IF ID EX MA WB - -
497: * nop IF ID EX MA - - WB
498: * bt IF ID EX . . MA
499: * [...]
500: */
501: .align 5 /* align cache line size (32B) */
502: NENTRY(_cpu_spin)
503: 1: nop /* 1 */
504: nop /* 2 */
505: nop /* 3 */
506: add #-1, r4 /* 4 */
507: nop /* 5 */
508: cmp/pl r4 /* 6 */
509: nop /* 7 */
510: bt 1b /* 8, 9, 10 */
511: rts
512: nop
513: SET_ENTRY_SIZE(_cpu_spin)
514:
515:
516: /*
517: * proc_trapmpoline:
518: * Call the service function with one argument specified by the r12 and r11
519: * respectively. set by cpu_fork().
520: */
521: NENTRY(proc_trampoline)
522: jsr @r12
523: mov r11, r4
524: __EXCEPTION_RETURN
525: /* NOTREACHED */
526: SET_ENTRY_SIZE(proc_trampoline)
527:
528:
529: /*
530: * LINTSTUB: Var: char sigcode[1]
531: * Signal trampoline.
532: *
533: * The kernel arranges for the signal handler to be invoked directly.
534: * This trampoline is used only to perform the return.
535: *
536: * On entry, the stack looks like this:
537: *
538: * sp-> sigcontext structure
539: */
540: NENTRY(sigcode)
541: mov r15, r4 /* get pointer to sigcontext */
542: mov.l .L_SYS_sigreturn, r0
543: trapa #0x80 /* and call sigreturn() */
544: mov.l .L_SYS_exit, r0
545: trapa #0x80 /* exit if sigreturn fails */
546: /* NOTREACHED */
547:
548: .align 2
549: .L_SYS_sigreturn: .long SYS_sigreturn
550: .L_SYS_exit: .long SYS_exit
551:
552: /* LINTSTUB: Var: char esigcode[1] */
553: .globl _C_LABEL(esigcode)
554: _C_LABEL(esigcode):
555: SET_ENTRY_SIZE(sigcode)
556:
557: /*
558: * LINTSTUB: Func: void savectx(struct pcb *pcb)
559: * save struct switchframe.
560: */
561: ENTRY(savectx)
562: SAVEPCB(r4)
563: add #PCB_FP, r4
564: SAVEFP(r4, r0, r1)
565: rts
566: nop
567: SET_ENTRY_SIZE(savectx)
568:
569: /*
570: * void fpu_save(struct fpreg *fp)
571: *
572: * Saves fpu context.
573: */
574: ENTRY(fpu_save)
575: SAVEFP(r4, r0, r1)
576: rts
577: nop
578: SET_ENTRY_SIZE(fpu_save)
579:
580: /*
581: * void fpu_restore(struct fpreg *fp)
582: *
583: * Restores fpu context.
584: */
585: ENTRY(fpu_restore)
586: LOADFP(r4, r0)
587: rts
588: nop
589: SET_ENTRY_SIZE(fpu_restore)
590:
591: /*
592: * LINTSTUB: Func: int copyout(const void *ksrc, void *udst, size_t len)
593: * Copy len bytes into the user address space.
594: */
595: ENTRY(copyout)
596: mov.l r14, @-r15
597: sts.l pr, @-r15
598: mov r15, r14
599:
600: mov #EFAULT, r0 /* assume there was a problem */
601: mov r4, r3
602: mov r5, r2
603: mov r5, r4
604: add r6, r2
605: cmp/hs r5, r2 /* bomb if uaddr+len wraps */
606: bf 2f
607: mov.l .L_copyout_VM_MAXUSER_ADDRESS, r1
608: cmp/hi r1, r2 /* bomb if uaddr isn't in user space */
609: bt 2f
610:
611: mov.l .L_copyout_curpcb, r1 /* set fault hander */
612: mov.l @r1, r2
613: mov.l .L_copyout_onfault, r1
614: mov.l r1, @(PCB_ONFAULT,r2)
615: mov.l .L_copyout_memcpy, r1
616: jsr @r1 /* memcpy(uaddr, kaddr, len) */
617: mov r3, r5
618:
619: mov #0, r0
620: 1:
621: mov.l .L_copyout_curpcb, r1 /* clear fault handler */
622: mov.l @r1, r2
623: mov #0, r1
624: mov.l r1, @(PCB_ONFAULT,r2)
625: 2:
626: mov r14, r15
627: lds.l @r15+, pr
628: rts
629: mov.l @r15+, r14
630:
631: 3:
632: bra 1b
633: mov #EFAULT, r0
634:
635: .align 2
636: .L_copyout_onfault:
637: .long 3b
638: .L_copyout_VM_MAXUSER_ADDRESS:
639: .long VM_MAXUSER_ADDRESS
640: .L_copyout_curpcb:
641: .long _C_LABEL(curpcb)
642: .L_copyout_memcpy:
643: .long _C_LABEL(memcpy)
644: SET_ENTRY_SIZE(copyout)
645:
646:
647: /*
648: * LINTSTUB: Func: int copyin(const void *usrc, void *kdst, size_t len)
649: * Copy len bytes from the user address space.
650: */
651: ENTRY(copyin)
652: mov.l r14, @-r15
653: sts.l pr, @-r15
654: mov r15, r14
655:
656: mov #EFAULT, r0 /* assume there was a problem */
657: mov r4, r3
658: mov r5, r4
659: mov r3, r2
660: add r6, r2
661: cmp/hs r3, r2 /* bomb if uaddr+len wraps */
662: bf 2f
663: mov.l .L_copyin_VM_MAXUSER_ADDRESS, r1
664: cmp/hi r1, r2 /* bomb if uaddr isn't in user space */
665: bt 2f
666:
667: mov.l .L_copyin_curpcb, r1 /* set fault hander */
668: mov.l @r1, r2
669: mov.l .L_copyin_onfault, r1
670: mov.l r1, @(PCB_ONFAULT,r2)
671: mov.l .L_copyin_memcpy, r1
672: jsr @r1 /* memcpy(kaddr, uaddr, len) */
673: mov r3, r5
674:
675: mov #0, r0
676: 1:
677: mov.l .L_copyin_curpcb, r1 /* clear fault hander */
678: mov.l @r1, r2
679: mov #0, r1
680: mov.l r1, @(PCB_ONFAULT,r2)
681: 2:
682: mov r14, r15
683: lds.l @r15+, pr
684: rts
685: mov.l @r15+, r14
686:
687: 3:
688: bra 1b
689: mov #EFAULT, r0
690:
691: .align 2
692: .L_copyin_onfault:
693: .long 3b
694: .L_copyin_VM_MAXUSER_ADDRESS:
695: .long VM_MAXUSER_ADDRESS
696: .L_copyin_curpcb:
697: .long _C_LABEL(curpcb)
698: .L_copyin_memcpy:
699: .long _C_LABEL(memcpy)
700: SET_ENTRY_SIZE(copyin)
701:
702:
703: /*
704: * LINTSTUB: Func: int copyoutstr(const void *ksrc, void *udst, size_t maxlen, size_t *lencopied)
705: * Copy a NUL-terminated string, at most maxlen characters long,
706: * into the user address space. Return the number of characters
707: * copied (including the NUL) in *lencopied. If the string is
708: * too long, return ENAMETOOLONG; else return 0 or EFAULT.
709: */
710: ENTRY(copyoutstr)
711: mov.l r8, @-r15
712:
713: mov #EFAULT, r3 /* assume there was a problem */
714: mov r4, r8
715: mov.l .L_copyoutstr_curpcb, r1 /* set fault handler */
716: mov.l @r1, r2
717: mov.l .L_copyoutstr_onfault, r1
718: mov.l r1, @(PCB_ONFAULT,r2)
719: mov.l .L_copyoutstr_VM_MAXUSER_ADDRESS, r1
720: cmp/hi r1, r5 /* bomb if udst isn't in user space */
721: bt 4f
722: mov r1, r0
723: sub r5, r0
724: cmp/hi r6, r0 /* don't beyond user space */
725: bf 2f
726: bra 2f
727: mov r6, r0
728:
729: .align 2
730: 1:
731: mov.b @r4+, r1 /* copy str */
732: mov.b r1, @r5
733: extu.b r1, r1
734: add #1, r5
735: tst r1, r1
736: bf 2f
737: bra 3f
738: mov #0, r3
739: .align 2
740: 2:
741: add #-1, r0
742: cmp/eq #-1, r0
743: bf 1b
744: mov.l .L_copyoutstr_VM_MAXUSER_ADDRESS, r1
745: cmp/hs r1, r5
746: bt 3f
747: mov #ENAMETOOLONG, r3
748:
749: 3:
750: tst r7, r7 /* set lencopied if needed */
751: bt 4f
752: mov r4, r1
753: sub r8, r1
754: mov.l r1, @r7
755: 4:
756: mov.l .L_copyoutstr_curpcb, r1 /* clear fault handler */
757: mov.l @r1, r2
758: mov #0, r1
759: mov.l r1, @(PCB_ONFAULT,r2)
760:
761: mov r3, r0
762: rts
763: mov.l @r15+, r8
764:
765: 5:
766: bra 4b
767: mov #EFAULT, r3
768:
769: .align 2
770: .L_copyoutstr_onfault:
771: .long 5b
772: .L_copyoutstr_VM_MAXUSER_ADDRESS:
773: .long VM_MAXUSER_ADDRESS
774: .L_copyoutstr_curpcb:
775: .long _C_LABEL(curpcb)
776: SET_ENTRY_SIZE(copyoutstr)
777:
778:
779: /*
780: * LINTSTUB: Func: int copyinstr(const void *src, void *dst, size_t maxlen, size_t *lencopied)
781: * Copy a NUL-terminated string, at most maxlen characters long,
782: * from the user address space. Return the number of characters
783: * copied (including the NUL) in *lencopied. If the string is
784: * too long, return ENAMETOOLONG; else return 0 or EFAULT.
785: */
786: ENTRY(copyinstr)
787: mov.l r8, @-r15
788: mov #EFAULT, r3 /* assume there was a problem */
789: mov r4, r8
790: mov.l .L_copyinstr_curpcb, r1 /* set fault handler */
791: mov.l @r1, r2
792: mov.l .L_copyinstr_onfault, r1
793: mov.l r1, @(PCB_ONFAULT,r2)
794:
795: mov.l .L_copyinstr_VM_MAXUSER_ADDRESS, r1
796: cmp/hi r1, r4 /* bomb if src isn't in user space */
797: bt 4f
798: mov r1, r0
799: sub r4, r0
800: cmp/hi r6, r0 /* don't beyond user space */
801: bf 2f
802: bra 2f
803: mov r6, r0
804:
805: .align 2
806: 1:
807: mov.b @r4+, r1 /* copy str */
808: mov.b r1, @r5
809: extu.b r1, r1
810: add #1, r5
811: tst r1, r1
812: bf 2f
813: bra 3f
814: mov #0, r3
815: .align 2
816: 2:
817: add #-1, r0
818: cmp/eq #-1, r0
819: bf 1b
820: mov.l .L_copyinstr_VM_MAXUSER_ADDRESS, r1
821: cmp/hs r1, r5
822: bt 3f
823: mov #ENAMETOOLONG, r3
824:
825: 3:
826: tst r7, r7 /* set lencopied if needed */
827: bt 4f
828: mov r4, r1
829: sub r8, r1
830: mov.l r1, @r7
831: 4:
832: mov.l .L_copyinstr_curpcb, r1 /* clear fault handler */
833: mov.l @r1, r2
834: mov #0, r1
835: mov.l r1, @(PCB_ONFAULT,r2)
836:
837: mov r3, r0
838: rts
839: mov.l @r15+, r8
840:
841: 5:
842: bra 4b
843: mov #EFAULT, r3
844:
845: .align 2
846: .L_copyinstr_onfault:
847: .long 5b
848: .L_copyinstr_VM_MAXUSER_ADDRESS:
849: .long VM_MAXUSER_ADDRESS
850: .L_copyinstr_curpcb:
851: .long _C_LABEL(curpcb)
852: SET_ENTRY_SIZE(copyinstr)
853:
854: /*
855: * LINTSTUB: Func: int kcopy(const void *src, void *dst, size_t len)
856: */
857: ENTRY(kcopy)
858: mov.l r8, @-r15
859: mov.l r14, @-r15
860: sts.l pr, @-r15
861: mov r15, r14
862:
863: mov r4, r3
864: mov.l .L_kcopy_curpcb, r1
865: mov.l @r1, r2
866: mov.l @(PCB_ONFAULT,r2) ,r8 /* save old fault handler */
867: mov.l .L_kcopy_onfault, r1
868: mov.l r1, @(PCB_ONFAULT,r2) /* set fault handler */
869: mov.l .L_kcopy_memcpy, r1
870: mov r5, r4
871: jsr @r1 /* memcpy(dst, src, len) */
872: mov r3, r5
873: mov #0, r0
874: 1:
875: mov.l .L_kcopy_curpcb, r1 /* restore fault handler */
876: mov.l @r1, r2
877: mov.l r8, @(PCB_ONFAULT,r2)
878:
879: mov r14, r15
880: lds.l @r15+, pr
881: mov.l @r15+, r14
882: rts
883: mov.l @r15+, r8
884:
885: 2:
886: bra 1b
887: mov #EFAULT, r0
888:
889: .align 2
890: .L_kcopy_onfault:
891: .long 2b
892: .L_kcopy_curpcb:
893: .long _C_LABEL(curpcb)
894: .L_kcopy_memcpy:
895: .long _C_LABEL(memcpy)
896: SET_ENTRY_SIZE(kcopy)
897:
898:
899: #if defined(DDB)
900:
901: /*
902: * LINTSTUB: Func: int setjmp(label_t *jmpbuf)
903: */
904: ENTRY(setjmp)
905: add #4*9, r4
906: mov.l r8, @-r4
907: mov.l r9, @-r4
908: mov.l r10, @-r4
909: mov.l r11, @-r4
910: mov.l r12, @-r4
911: mov.l r13, @-r4
912: mov.l r14, @-r4
913: mov.l r15, @-r4
914: sts.l pr, @-r4
915: rts
916: xor r0, r0
917: SET_ENTRY_SIZE(setjmp)
918:
919: /*
920: * LINTSTUB: Func: void longjmp(label_t *jmpbuf)
921: */
922: ENTRY(longjmp)
923: lds.l @r4+, pr
924: mov.l @r4+, r15
925: mov.l @r4+, r14
926: mov.l @r4+, r13
927: mov.l @r4+, r12
928: mov.l @r4+, r11
929: mov.l @r4+, r10
930: mov.l @r4+, r9
931: mov.l @r4+, r8
932: rts
933: mov #1, r0 /* return 1 from setjmp */
934: SET_ENTRY_SIZE(longjmp)
935:
936: #endif /* DDB */
CVSweb