Annotation of sys/arch/alpha/alpha/locore.s, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: locore.s,v 1.30 2007/05/28 23:10:10 beck Exp $ */
2: /* $NetBSD: locore.s,v 1.94 2001/04/26 03:10:44 ross Exp $ */
3:
4: /*-
5: * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
6: * All rights reserved.
7: *
8: * This code is derived from software contributed to The NetBSD Foundation
9: * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10: * NASA Ames Research Center.
11: *
12: * Redistribution and use in source and binary forms, with or without
13: * modification, are permitted provided that the following conditions
14: * are met:
15: * 1. Redistributions of source code must retain the above copyright
16: * notice, this list of conditions and the following disclaimer.
17: * 2. Redistributions in binary form must reproduce the above copyright
18: * notice, this list of conditions and the following disclaimer in the
19: * documentation and/or other materials provided with the distribution.
20: * 3. All advertising materials mentioning features or use of this software
21: * must display the following acknowledgement:
22: * This product includes software developed by the NetBSD
23: * Foundation, Inc. and its contributors.
24: * 4. Neither the name of The NetBSD Foundation nor the names of its
25: * contributors may be used to endorse or promote products derived
26: * from this software without specific prior written permission.
27: *
28: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38: * POSSIBILITY OF SUCH DAMAGE.
39: */
40:
41: /*
42: * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
43: * All rights reserved.
44: *
45: * Author: Chris G. Demetriou
46: *
47: * Permission to use, copy, modify and distribute this software and
48: * its documentation is hereby granted, provided that both the copyright
49: * notice and this permission notice appear in all copies of the
50: * software, derivative works or modified versions, and any portions
51: * thereof, and that both notices appear in supporting documentation.
52: *
53: * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
54: * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
55: * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
56: *
57: * Carnegie Mellon requests users of this software to return to
58: *
59: * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
60: * School of Computer Science
61: * Carnegie Mellon University
62: * Pittsburgh PA 15213-3890
63: *
64: * any improvements or extensions that they make and grant Carnegie the
65: * rights to redistribute these changes.
66: */
67:
68: .file 1 __FILE__
69:
70: .stabs __FILE__,100,0,0,kernel_text
71:
72: #include <machine/asm.h>
73:
74: #include "assym.h"
75:
76: .stabs __FILE__,132,0,0,kernel_text
77:
78: #if defined(MULTIPROCESSOR)
79:
80: /*
81: * Get various per-cpu values. A pointer to our cpu_info structure
82: * is stored in SysValue. These macros clobber v0, t0, t8..t11.
83: *
84: * All return values are in v0.
85: */
86: #define GET_CPUINFO call_pal PAL_OSF1_rdval
87:
88: #define GET_CURPROC \
89: call_pal PAL_OSF1_rdval ; \
90: addq v0, CPU_INFO_CURPROC, v0
91:
92: #define GET_FPCURPROC \
93: call_pal PAL_OSF1_rdval ; \
94: addq v0, CPU_INFO_FPCURPROC, v0
95:
96: #define GET_CURPCB \
97: call_pal PAL_OSF1_rdval ; \
98: addq v0, CPU_INFO_CURPCB, v0
99:
100: #define GET_IDLE_PCB(reg) \
101: call_pal PAL_OSF1_rdval ; \
102: ldq reg, CPU_INFO_IDLE_PCB_PADDR(v0)
103:
104: #else /* if not MULTIPROCESSOR... */
105:
106: IMPORT(cpu_info_store, CPU_INFO_SIZEOF)
107:
108: #define GET_CPUINFO lda v0, cpu_info_store
109:
110: #define GET_CURPROC lda v0, cpu_info_store + CPU_INFO_CURPROC
111:
112: #define GET_FPCURPROC lda v0, cpu_info_store + CPU_INFO_FPCURPROC
113:
114: #define GET_CURPCB lda v0, cpu_info_store + CPU_INFO_CURPCB
115:
116: #define GET_IDLE_PCB(reg) \
117: lda reg, cpu_info_store ; \
118: ldq reg, CPU_INFO_IDLE_PCB_PADDR(reg)
119: #endif
120:
121: /*
122: * Perform actions necessary to switch to a new context. The
123: * hwpcb should be in a0. Clobbers v0, t0, t8..t11, a0.
124: */
125: #define SWITCH_CONTEXT \
126: /* Make a note of the context we're running on. */ \
127: GET_CURPCB ; \
128: stq a0, 0(v0) ; \
129: \
130: /* Swap in the new context. */ \
131: call_pal PAL_OSF1_swpctx
132:
133:
134: /* don't reorder instructions; paranoia. */
135: .set noreorder
136: .text
137:
138: .macro bfalse reg, dst
139: beq \reg, \dst
140: .endm
141:
142: .macro btrue reg, dst
143: bne \reg, \dst
144: .endm
145:
146: /*
147: * This is for kvm_mkdb, and should be the address of the beginning
148: * of the kernel text segment (not necessarily the same as kernbase).
149: */
150: EXPORT(kernel_text)
151: .loc 1 __LINE__
152: kernel_text:
153:
154: /*
155: * bootstack: a temporary stack, for booting.
156: *
157: * Extends from 'start' down.
158: */
159: bootstack:
160:
161: /*
162: * __start: Kernel start.
163: *
164: * Arguments:
165: * a0 is the first free page frame number (PFN)
166: * a1 is the page table base register (PTBR)
167: * a2 is the bootinfo magic number
168: * a3 is the pointer to the bootinfo structure
169: *
170: * All arguments are passed to alpha_init().
171: */
172: NESTED_NOPROFILE(__start,1,0,ra,0,0)
173: br pv,Lstart1
174: Lstart1: LDGP(pv)
175:
176: /* Switch to the boot stack. */
177: lda sp,bootstack
178:
179: /* Load KGP with current GP. */
180: or a0,zero,s0 /* save pfn */
181: or gp,zero,a0
182: call_pal PAL_OSF1_wrkgp /* clobbers a0, t0, t8-t11 */
183: or s0,zero,a0 /* restore pfn */
184:
185: /*
186: * Call alpha_init() to do pre-main initialization.
187: * alpha_init() gets the arguments we were called with,
188: * which are already in a0, a1, a2, a3 and a4.
189: */
190: CALL(alpha_init)
191:
192: /* Set up the virtual page table pointer. */
193: ldiq a0, VPTBASE
194: call_pal PAL_OSF1_wrvptptr /* clobbers a0, t0, t8-t11 */
195:
196: /*
197: * Switch to proc0's PCB.
198: */
199: lda a0, proc0
200: ldq a0, P_MD_PCBPADDR(a0) /* phys addr of PCB */
201: SWITCH_CONTEXT
202:
203: /*
204: * We've switched to a new page table base, so invalidate the TLB
205: * and I-stream. This happens automatically everywhere but here.
206: */
207: ldiq a0, -2 /* TBIA */
208: call_pal PAL_OSF1_tbi
209: call_pal PAL_imb
210:
211: /*
212: * All ready to go! Call main()!
213: */
214: CALL(main)
215:
216: /* This should never happen. */
217: PANIC("main() returned",Lmain_returned_pmsg)
218: END(__start)
219:
220: /**************************************************************************/
221:
222: /*
223: * Pull in the PROM interface routines; these are needed for
224: * prom printf (while bootstrapping), and for determining the
225: * boot device, etc.
226: */
227: #include <alpha/alpha/prom_disp.s>
228:
229: /**************************************************************************/
230:
231: /*
232: * Pull in the PALcode function stubs.
233: */
234: #include <alpha/alpha/pal.s>
235:
236: /**************************************************************************/
237:
238: /**************************************************************************/
239:
240: #if defined(MULTIPROCESSOR)
241: /*
242: * Pull in the multiprocessor glue.
243: */
244: #include <alpha/alpha/multiproc.s>
245: #endif /* MULTIPROCESSOR */
246:
247: /**************************************************************************/
248:
249: /**************************************************************************/
250:
251: #if defined(DDB)
252: /*
253: * Pull in debugger glue.
254: */
255: #include <alpha/alpha/debug.s>
256: #endif /* DDB */
257:
258: /**************************************************************************/
259:
260: /**************************************************************************/
261:
262: .text
263: .stabs __FILE__,132,0,0,backtolocore1 /* done with includes */
264: .loc 1 __LINE__
265: backtolocore1:
266: /**************************************************************************/
267:
268: /*
269: * Signal "trampoline" code. Invoked from RTE setup by sendsig().
270: *
271: * On entry, stack & registers look like:
272: *
273: * a0 signal number
274: * a1 signal specific code
275: * a2 pointer to signal context frame (scp)
276: * a3 address of handler
277: * sp+0 saved hardware state
278: * .
279: * .
280: * scp+0 beginning of signal context frame
281: */
282:
283: NESTED(sigcode,0,0,ra,0,0)
284: lda sp, -16(sp) /* save the sigcontext pointer */
285: stq a2, 0(sp)
286: jsr ra, (t12) /* call the signal handler (t12==pv) */
287: ldq a0, 0(sp) /* get the sigcontext pointer */
288: lda sp, 16(sp)
289: CALLSYS_NOERROR(sigreturn) /* and call sigreturn() with it. */
290: mov v0, a0 /* if that failed, get error code */
291: CALLSYS_NOERROR(exit) /* and call exit() with it. */
292: XNESTED(esigcode,0)
293: END(sigcode)
294:
295: /**************************************************************************/
296:
297: /*
298: * exception_return: return from trap, exception, or syscall
299: */
300:
301: BSS(ssir, 8)
302:
303: LEAF(exception_return, 1) /* XXX should be NESTED */
304: br pv, 1f
305: 1: LDGP(pv)
306:
307: #if defined(MULTIPROCESSOR)
308: /* XXX XXX XXX */
309: /*
310: * Check the current processor ID. If we're not the primary
311: * CPU, then just restore registers and bail out.
312: */
313: call_pal PAL_OSF1_whami
314: lda t0, hwrpb
315: ldq t0, 0(t0)
316: ldq t1, RPB_PRIMARY_CPU_ID(t0)
317: cmpeq t1, v0, t0
318: beq t0, 4f /* == 0: bail out now */
319: #endif
320:
321: ldq s1, (FRAME_PS * 8)(sp) /* get the saved PS */
322: and s1, ALPHA_PSL_IPL_MASK, t0 /* look at the saved IPL */
323: bne t0, 4f /* != 0: can't do AST or SIR */
324:
325: /* see if we can do an SIR */
326: 2: ldq t1, ssir /* SIR pending? */
327: bne t1, 5f /* yes */
328: /* no */
329:
330: /* check for AST */
331: 3: and s1, ALPHA_PSL_USERMODE, t0 /* are we returning to user? */
332: beq t0, 4f /* no: just return */
333: /* yes */
334:
335: /* GET_CPUINFO clobbers v0, t0, t8...t11. */
336: GET_CPUINFO
337: ldq t2, CPU_INFO_ASTPENDING(v0) /* AST pending? */
338: bne t2, 6f /* yes */
339: /* no: return & deal with FP */
340:
341: /*
342: * We are going back to usermode. Enable the FPU based on whether
343: * the current proc is fpcurproc. v0 already contains the cpu_info
344: * pointer from above.
345: */
346: ldq t1, CPU_INFO_CURPROC(v0)
347: ldq t2, CPU_INFO_FPCURPROC(v0)
348: cmpeq t1, t2, t1
349: mov zero, a0
350: cmovne t1, 1, a0
351: call_pal PAL_OSF1_wrfen
352:
353: /* restore the registers, and return */
354: 4: bsr ra, exception_restore_regs /* jmp/CALL trashes pv/t12 */
355: ldq ra,(FRAME_RA*8)(sp)
356: .set noat
357: ldq at_reg,(FRAME_AT*8)(sp)
358:
359: lda sp,(FRAME_SW_SIZE*8)(sp)
360: call_pal PAL_OSF1_rti
361: .set at
362: /* NOTREACHED */
363:
364: /* We've got a SIR */
365: 5: ldiq a0, ALPHA_PSL_IPL_SOFT
366: call_pal PAL_OSF1_swpipl
367: mov v0, s2 /* remember old IPL */
368: CALL(softintr_dispatch)
369:
370: /* SIR handled; restore IPL and check again */
371: mov s2, a0
372: call_pal PAL_OSF1_swpipl
373: br 2b
374:
375: /* We've got an AST */
376: 6: ldiq a0, ALPHA_PSL_IPL_0 /* drop IPL to zero */
377: call_pal PAL_OSF1_swpipl
378: mov v0, s2 /* remember old IPL */
379:
380: mov sp, a0 /* only arg is frame */
381: CALL(ast)
382:
383: /* AST handled; restore IPL and check again */
384: mov s2, a0
385: call_pal PAL_OSF1_swpipl
386: br 3b
387:
388: END(exception_return)
389:
390: LEAF(exception_save_regs, 0)
391: stq v0,(FRAME_V0*8)(sp)
392: stq a3,(FRAME_A3*8)(sp)
393: stq a4,(FRAME_A4*8)(sp)
394: stq a5,(FRAME_A5*8)(sp)
395: stq s0,(FRAME_S0*8)(sp)
396: stq s1,(FRAME_S1*8)(sp)
397: stq s2,(FRAME_S2*8)(sp)
398: stq s3,(FRAME_S3*8)(sp)
399: stq s4,(FRAME_S4*8)(sp)
400: stq s5,(FRAME_S5*8)(sp)
401: stq s6,(FRAME_S6*8)(sp)
402: stq t0,(FRAME_T0*8)(sp)
403: stq t1,(FRAME_T1*8)(sp)
404: stq t2,(FRAME_T2*8)(sp)
405: stq t3,(FRAME_T3*8)(sp)
406: stq t4,(FRAME_T4*8)(sp)
407: stq t5,(FRAME_T5*8)(sp)
408: stq t6,(FRAME_T6*8)(sp)
409: stq t7,(FRAME_T7*8)(sp)
410: stq t8,(FRAME_T8*8)(sp)
411: stq t9,(FRAME_T9*8)(sp)
412: stq t10,(FRAME_T10*8)(sp)
413: stq t11,(FRAME_T11*8)(sp)
414: stq t12,(FRAME_T12*8)(sp)
415: RET
416: END(exception_save_regs)
417:
418: LEAF(exception_restore_regs, 0)
419: ldq v0,(FRAME_V0*8)(sp)
420: ldq a3,(FRAME_A3*8)(sp)
421: ldq a4,(FRAME_A4*8)(sp)
422: ldq a5,(FRAME_A5*8)(sp)
423: ldq s0,(FRAME_S0*8)(sp)
424: ldq s1,(FRAME_S1*8)(sp)
425: ldq s2,(FRAME_S2*8)(sp)
426: ldq s3,(FRAME_S3*8)(sp)
427: ldq s4,(FRAME_S4*8)(sp)
428: ldq s5,(FRAME_S5*8)(sp)
429: ldq s6,(FRAME_S6*8)(sp)
430: ldq t0,(FRAME_T0*8)(sp)
431: ldq t1,(FRAME_T1*8)(sp)
432: ldq t2,(FRAME_T2*8)(sp)
433: ldq t3,(FRAME_T3*8)(sp)
434: ldq t4,(FRAME_T4*8)(sp)
435: ldq t5,(FRAME_T5*8)(sp)
436: ldq t6,(FRAME_T6*8)(sp)
437: ldq t7,(FRAME_T7*8)(sp)
438: ldq t8,(FRAME_T8*8)(sp)
439: ldq t9,(FRAME_T9*8)(sp)
440: ldq t10,(FRAME_T10*8)(sp)
441: ldq t11,(FRAME_T11*8)(sp)
442: ldq t12,(FRAME_T12*8)(sp)
443: RET
444: END(exception_restore_regs)
445:
446: /**************************************************************************/
447:
448: /*
449: * XentArith:
450: * System arithmetic trap entry point.
451: */
452:
453: PALVECT(XentArith) /* setup frame, save registers */
454:
455: /* a0, a1, & a2 already set up */
456: ldiq a3, ALPHA_KENTRY_ARITH
457: mov sp, a4 ; .loc 1 __LINE__
458: CALL(trap)
459:
460: jmp zero, exception_return
461: END(XentArith)
462:
463: /**************************************************************************/
464:
465: /*
466: * XentIF:
467: * System instruction fault trap entry point.
468: */
469:
470: PALVECT(XentIF) /* setup frame, save registers */
471:
472: /* a0, a1, & a2 already set up */
473: ldiq a3, ALPHA_KENTRY_IF
474: mov sp, a4 ; .loc 1 __LINE__
475: CALL(trap)
476: jmp zero, exception_return
477: END(XentIF)
478:
479: /**************************************************************************/
480:
481: /*
482: * XentInt:
483: * System interrupt entry point.
484: */
485:
486: PALVECT(XentInt) /* setup frame, save registers */
487:
488: /* a0, a1, & a2 already set up */
489: mov sp, a3 ; .loc 1 __LINE__
490: CALL(interrupt)
491: jmp zero, exception_return
492: END(XentInt)
493:
494: /**************************************************************************/
495:
496: /*
497: * XentMM:
498: * System memory management fault entry point.
499: */
500:
501: PALVECT(XentMM) /* setup frame, save registers */
502:
503: /* a0, a1, & a2 already set up */
504: ldiq a3, ALPHA_KENTRY_MM
505: mov sp, a4 ; .loc 1 __LINE__
506: CALL(trap)
507:
508: jmp zero, exception_return
509: END(XentMM)
510:
511: /**************************************************************************/
512:
513: /*
514: * XentSys:
515: * System call entry point.
516: */
517:
518: ESETUP(XentSys) ; .loc 1 __LINE__
519:
520: stq v0,(FRAME_V0*8)(sp) /* in case we need to restart */
521: stq s0,(FRAME_S0*8)(sp)
522: stq s1,(FRAME_S1*8)(sp)
523: stq s2,(FRAME_S2*8)(sp)
524: stq s3,(FRAME_S3*8)(sp)
525: stq s4,(FRAME_S4*8)(sp)
526: stq s5,(FRAME_S5*8)(sp)
527: stq s6,(FRAME_S6*8)(sp)
528: stq a0,(FRAME_A0*8)(sp)
529: stq a1,(FRAME_A1*8)(sp)
530: stq a2,(FRAME_A2*8)(sp)
531: stq a3,(FRAME_A3*8)(sp)
532: stq a4,(FRAME_A4*8)(sp)
533: stq a5,(FRAME_A5*8)(sp)
534: stq ra,(FRAME_RA*8)(sp)
535:
536: /* syscall number, passed in v0, is first arg, frame pointer second */
537: mov v0,a0
538: mov sp,a1 ; .loc 1 __LINE__
539: CALL(syscall)
540:
541: jmp zero, exception_return
542: END(XentSys)
543:
544: /**************************************************************************/
545:
546: /*
547: * XentUna:
548: * System unaligned access entry point.
549: */
550:
551: LEAF(XentUna, 3) /* XXX should be NESTED */
552: .set noat
553: lda sp,-(FRAME_SW_SIZE*8)(sp)
554: stq at_reg,(FRAME_AT*8)(sp)
555: .set at
556: stq ra,(FRAME_RA*8)(sp)
557: bsr ra, exception_save_regs /* jmp/CALL trashes pv/t12 */
558:
559: /* a0, a1, & a2 already set up */
560: ldiq a3, ALPHA_KENTRY_UNA
561: mov sp, a4 ; .loc 1 __LINE__
562: CALL(trap)
563:
564: jmp zero, exception_return
565: END(XentUna)
566:
567: /**************************************************************************/
568:
569: /*
570: * savefpstate: Save a process's floating point state.
571: *
572: * Arguments:
573: * a0 'struct fpstate *' to save into
574: */
575:
576: LEAF(savefpstate, 1)
577: LDGP(pv)
578: /* save all of the FP registers */
579: lda t1, FPREG_FPR_REGS(a0) /* get address of FP reg. save area */
580: stt $f0, (0 * 8)(t1) /* save first register, using hw name */
581: stt $f1, (1 * 8)(t1) /* etc. */
582: stt $f2, (2 * 8)(t1)
583: stt $f3, (3 * 8)(t1)
584: stt $f4, (4 * 8)(t1)
585: stt $f5, (5 * 8)(t1)
586: stt $f6, (6 * 8)(t1)
587: stt $f7, (7 * 8)(t1)
588: stt $f8, (8 * 8)(t1)
589: stt $f9, (9 * 8)(t1)
590: stt $f10, (10 * 8)(t1)
591: stt $f11, (11 * 8)(t1)
592: stt $f12, (12 * 8)(t1)
593: stt $f13, (13 * 8)(t1)
594: stt $f14, (14 * 8)(t1)
595: stt $f15, (15 * 8)(t1)
596: stt $f16, (16 * 8)(t1)
597: stt $f17, (17 * 8)(t1)
598: stt $f18, (18 * 8)(t1)
599: stt $f19, (19 * 8)(t1)
600: stt $f20, (20 * 8)(t1)
601: stt $f21, (21 * 8)(t1)
602: stt $f22, (22 * 8)(t1)
603: stt $f23, (23 * 8)(t1)
604: stt $f24, (24 * 8)(t1)
605: stt $f25, (25 * 8)(t1)
606: stt $f26, (26 * 8)(t1)
607: stt $f27, (27 * 8)(t1)
608: stt $f28, (28 * 8)(t1)
609: stt $f29, (29 * 8)(t1)
610: stt $f30, (30 * 8)(t1)
611:
612: /*
613: * Then save the FPCR; note that the necessary 'trapb's are taken
614: * care of on kernel entry and exit.
615: */
616: mf_fpcr ft0
617: stt ft0, FPREG_FPR_CR(a0) /* store to FPCR save area */
618:
619: RET
620: END(savefpstate)
621:
622: /**************************************************************************/
623:
624: /*
625: * restorefpstate: Restore a process's floating point state.
626: *
627: * Arguments:
628: * a0 'struct fpstate *' to restore from
629: */
630:
631: LEAF(restorefpstate, 1)
632: LDGP(pv)
633: /*
634: * Restore the FPCR; note that the necessary 'trapb's are taken care of
635: * on kernel entry and exit.
636: */
637: ldt ft0, FPREG_FPR_CR(a0) /* load from FPCR save area */
638: mt_fpcr ft0
639:
640: /* Restore all of the FP registers. */
641: lda t1, FPREG_FPR_REGS(a0) /* get address of FP reg. save area */
642: ldt $f0, (0 * 8)(t1) /* restore first reg., using hw name */
643: ldt $f1, (1 * 8)(t1) /* etc. */
644: ldt $f2, (2 * 8)(t1)
645: ldt $f3, (3 * 8)(t1)
646: ldt $f4, (4 * 8)(t1)
647: ldt $f5, (5 * 8)(t1)
648: ldt $f6, (6 * 8)(t1)
649: ldt $f7, (7 * 8)(t1)
650: ldt $f8, (8 * 8)(t1)
651: ldt $f9, (9 * 8)(t1)
652: ldt $f10, (10 * 8)(t1)
653: ldt $f11, (11 * 8)(t1)
654: ldt $f12, (12 * 8)(t1)
655: ldt $f13, (13 * 8)(t1)
656: ldt $f14, (14 * 8)(t1)
657: ldt $f15, (15 * 8)(t1)
658: ldt $f16, (16 * 8)(t1)
659: ldt $f17, (17 * 8)(t1)
660: ldt $f18, (18 * 8)(t1)
661: ldt $f19, (19 * 8)(t1)
662: ldt $f20, (20 * 8)(t1)
663: ldt $f21, (21 * 8)(t1)
664: ldt $f22, (22 * 8)(t1)
665: ldt $f23, (23 * 8)(t1)
666: ldt $f24, (24 * 8)(t1)
667: ldt $f25, (25 * 8)(t1)
668: ldt $f26, (26 * 8)(t1)
669: ldt $f27, (27 * 8)(t1)
670: .set noat
671: ldt $f28, (28 * 8)(t1)
672: .set at
673: ldt $f29, (29 * 8)(t1)
674: ldt $f30, (30 * 8)(t1)
675:
676: RET
677: END(restorefpstate)
678:
679: /**************************************************************************/
680:
681: /*
682: * savectx: save process context, i.e. callee-saved registers
683: *
684: * Note that savectx() only works for processes other than curproc,
685: * since cpu_switch will copy over the info saved here. (It _can_
686: * sanely be used for curproc iff cpu_switch won't be called again, e.g.
687: * if called from boot().)
688: *
689: * Arguments:
690: * a0 'struct user *' of the process that needs its context saved
691: *
692: * Return:
693: * v0 0. (note that for child processes, it seems
694: * like savectx() returns 1, because the return address
695: * in the PCB is set to the return address from savectx().)
696: */
697:
698: LEAF(savectx, 1)
699: br pv, 1f
700: 1: LDGP(pv)
701: stq sp, U_PCB_HWPCB_KSP(a0) /* store sp */
702: stq s0, U_PCB_CONTEXT+(0 * 8)(a0) /* store s0 - s6 */
703: stq s1, U_PCB_CONTEXT+(1 * 8)(a0)
704: stq s2, U_PCB_CONTEXT+(2 * 8)(a0)
705: stq s3, U_PCB_CONTEXT+(3 * 8)(a0)
706: stq s4, U_PCB_CONTEXT+(4 * 8)(a0)
707: stq s5, U_PCB_CONTEXT+(5 * 8)(a0)
708: stq s6, U_PCB_CONTEXT+(6 * 8)(a0)
709: stq ra, U_PCB_CONTEXT+(7 * 8)(a0) /* store ra */
710: call_pal PAL_OSF1_rdps /* NOTE: doesn't kill a0 */
711: stq v0, U_PCB_CONTEXT+(8 * 8)(a0) /* store ps, for ipl */
712:
713: mov zero, v0
714: RET
715: END(savectx)
716:
717: /**************************************************************************/
718:
719: IMPORT(whichqs, 4)
720:
721: /*
722: * When no processes are on the runq, cpu_switch branches to idle
723: * to wait for something to come ready.
724: * Note: this is really a part of cpu_switch() but defined here for kernel
725: * profiling.
726: */
727: LEAF(idle, 0)
728: br pv, 1f
729: 1: LDGP(pv)
730: /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
731: GET_CURPROC
732: stq zero, 0(v0) /* curproc <- NULL for stats */
733: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
734: CALL(sched_unlock_idle) /* release sched_lock */
735: #endif
736: mov zero, a0 /* enable all interrupts */
737: call_pal PAL_OSF1_swpipl
738: 2: ldl t0, whichqs /* look for non-empty queue */
739: beq t0, 2b
740: ldiq a0, ALPHA_PSL_IPL_HIGH /* disable all interrupts */
741: call_pal PAL_OSF1_swpipl
742: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
743: CALL(sched_lock_idle) /* acquire sched_lock */
744: #endif
745: jmp zero, cpu_switch_queuescan /* jump back into the fire */
746: END(idle)
747:
748: /*
749: * cpu_switch()
750: * Find the highest priority process and resume it.
751: */
752: LEAF(cpu_switch, 0)
753: LDGP(pv)
754: /*
755: * do an inline savectx(), to save old context
756: * Note: GET_CURPROC clobbers v0, t0, t8...t11.
757: */
758: GET_CURPROC
759: ldq a0, 0(v0)
760: ldq a1, P_ADDR(a0)
761: /* NOTE: ksp is stored by the swpctx */
762: stq s0, U_PCB_CONTEXT+(0 * 8)(a1) /* store s0 - s6 */
763: stq s1, U_PCB_CONTEXT+(1 * 8)(a1)
764: stq s2, U_PCB_CONTEXT+(2 * 8)(a1)
765: stq s3, U_PCB_CONTEXT+(3 * 8)(a1)
766: stq s4, U_PCB_CONTEXT+(4 * 8)(a1)
767: stq s5, U_PCB_CONTEXT+(5 * 8)(a1)
768: stq s6, U_PCB_CONTEXT+(6 * 8)(a1)
769: stq ra, U_PCB_CONTEXT+(7 * 8)(a1) /* store ra */
770: call_pal PAL_OSF1_rdps /* NOTE: doesn't kill a0 */
771: stq v0, U_PCB_CONTEXT+(8 * 8)(a1) /* store ps, for ipl */
772:
773: mov a0, s0 /* save old curproc */
774: mov a1, s1 /* save old U-area */
775:
776: cpu_switch_queuescan:
777: br pv, 1f
778: 1: LDGP(pv)
779: ldl t0, whichqs /* look for non-empty queue */
780: beq t0, idle /* and if none, go idle */
781: mov t0, t3 /* t3 = saved whichqs */
782: mov zero, t2 /* t2 = lowest bit set */
783: blbs t0, 3f /* if low bit set, done! */
784:
785: 2: srl t0, 1, t0 /* try next bit */
786: addq t2, 1, t2
787: blbc t0, 2b /* if clear, try again */
788:
789: 3: /*
790: * Remove process from queue
791: */
792: lda t1, qs /* get queues */
793: sll t2, 4, t0 /* queue head is 16 bytes */
794: addq t1, t0, t0 /* t0 = qp = &qs[firstbit] */
795:
796: ldq t4, PH_LINK(t0) /* t4 = p = highest pri proc */
797: bne t4, 4f /* make sure p != NULL */
798: PANIC("cpu_switch",Lcpu_switch_pmsg) /* nothing in queue! */
799:
800: 4:
801: ldq t5, P_FORW(t4) /* t5 = p->p_forw */
802: stq t5, PH_LINK(t0) /* qp->ph_link = p->p_forw */
803: stq t0, P_BACK(t5) /* p->p_forw->p_back = qp */
804: stq zero, P_BACK(t4) /* firewall: p->p_back = NULL */
805: cmpeq t0, t5, t0 /* see if queue is empty */
806: beq t0, 5f /* nope, it's not! */
807:
808: ldiq t0, 1 /* compute bit in whichqs */
809: sll t0, t2, t0
810: xor t3, t0, t3 /* clear bit in whichqs */
811: stl t3, whichqs
812:
813: 5:
814: mov t4, s2 /* save new proc */
815: ldq s3, P_MD_PCBPADDR(s2) /* save new pcbpaddr */
816: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
817: /*
818: * Done mucking with the run queues, release the
819: * scheduler lock, but keep interrupts out.
820: */
821: CALL(sched_unlock_idle)
822: #endif
823:
824: /*
825: * Check to see if we're switching to ourself. If we are,
826: * don't bother loading the new context.
827: *
828: * Note that even if we re-enter cpu_switch() from idle(),
829: * s0 will still contain the old curproc value because any
830: * users of that register between then and now must have
831: * saved it. Also note that switch_exit() ensures that
832: * s0 is clear before jumping here to find a new process.
833: */
834: cmpeq s0, s2, t0 /* oldproc == newproc? */
835: bne t0, 7f /* Yes! Skip! */
836:
837: /*
838: * Deactivate the old address space before activating the
839: * new one. We need to do this before activating the
840: * new process's address space in the event that new
841: * process is using the same vmspace as the old. If we
842: * do this after we activate, then we might end up
843: * incorrectly marking the pmap inactive!
844: *
845: * We don't deactivate if we came here from switch_exit
846: * (old pmap no longer exists; vmspace has been freed).
847: * oldproc will be NULL in this case. We have actually
848: * taken care of calling pmap_deactivate() in cpu_exit(),
849: * before the vmspace went away.
850: */
851: beq s0, 6f
852:
853: mov s0, a0 /* pmap_deactivate(oldproc) */
854: CALL(pmap_deactivate)
855:
856: 6: /*
857: * Activate the new process's address space and perform
858: * the actual context swap.
859: */
860:
861: mov s2, a0 /* pmap_activate(p) */
862: CALL(pmap_activate)
863:
864: mov s3, a0 /* swap the context */
865: SWITCH_CONTEXT
866:
867: 7: /*
868: * Now that the switch is done, update curproc and other
869: * globals. We must do this even if switching to ourselves
870: * because we might have re-entered cpu_switch() from idle(),
871: * in which case curproc would be NULL.
872: *
873: * Note: GET_CPUINFO clobbers v0, t0, t8...t11.
874: */
875: EXPORT(__bwx_switch0)
876: addq s2, P_STAT, t3 /* p->p_stat = SONPROC */
877: ldq_u t1, 0(t3)
878: ldiq t0, SONPROC
879: insbl t0, t3, t0
880: mskbl t1, t3, t1
881: or t0, t1, t0
882: stq_u t0, 0(t3)
883: EXPORT(__bwx_switch1)
884:
885: GET_CPUINFO
886: /* p->p_cpu initialized in fork1() for single-processor */
887: #if defined(MULTIPROCESSOR)
888: stq v0, P_CPU(s2) /* p->p_cpu = curcpu() */
889: #endif
890: stq s2, CPU_INFO_CURPROC(v0) /* curproc = p */
891: stq zero, CPU_INFO_WANT_RESCHED(v0) /* we've rescheduled */
892:
893: /*
894: * Now running on the new u struct.
895: * Restore registers and return.
896: */
897: ldq t0, P_ADDR(s2)
898:
899: /* NOTE: ksp is restored by the swpctx */
900: ldq s0, U_PCB_CONTEXT+(0 * 8)(t0) /* restore s0 - s6 */
901: ldq s1, U_PCB_CONTEXT+(1 * 8)(t0)
902: ldq s2, U_PCB_CONTEXT+(2 * 8)(t0)
903: ldq s3, U_PCB_CONTEXT+(3 * 8)(t0)
904: ldq s4, U_PCB_CONTEXT+(4 * 8)(t0)
905: ldq s5, U_PCB_CONTEXT+(5 * 8)(t0)
906: ldq s6, U_PCB_CONTEXT+(6 * 8)(t0)
907: ldq ra, U_PCB_CONTEXT+(7 * 8)(t0) /* restore ra */
908: ldq a0, U_PCB_CONTEXT+(8 * 8)(t0) /* restore ipl */
909: and a0, ALPHA_PSL_IPL_MASK, a0
910: call_pal PAL_OSF1_swpipl
911:
912: ldiq v0, 1 /* possible ret to savectx() */
913: RET
914: END(cpu_switch)
915:
916: #ifndef SMALL_KERNEL
917: /*
918: * BWX-enhanced version of the p->p_stat assignment, to be copied
919: * over the __bwx_switch0 area.
920:
921: * Do not put anything between the end of cpu_switch and this!
922: */
923: EXPORT(__bwx_switch2)
924: ldiq t0, SONPROC /* p->p_stat = SONPROC */
925: stb t0, P_STAT(s2)
926: EXPORT(__bwx_switch3)
927: #endif
928:
929: /*
930: * switch_trampoline()
931: *
932: * Arrange for a function to be invoked neatly, after a cpu_fork().
933: *
934: * Invokes the function specified by the s0 register with the return
935: * address specified by the s1 register and with one argument specified
936: * by the s2 register.
937: */
938: LEAF(switch_trampoline, 0)
939: #if defined(MULTIPROCESSOR)
940: CALL(proc_trampoline_mp)
941: #endif
942: mov s0, pv
943: mov s1, ra
944: mov s2, a0
945: jmp zero, (pv)
946: END(switch_trampoline)
947:
948: /*
949: * switch_exit(struct proc *p)
950: * Make a the named process exit. Partially switch to our idle thread
951: * (we don't update curproc or restore registers), and jump into the middle
952: * of cpu_switch to switch into a few process. The process reaper will
953: * free the dead process's VM resources. MUST BE CALLED AT SPLHIGH.
954: */
955: LEAF(switch_exit, 1)
956: LDGP(pv)
957:
958: /* save the exiting proc pointer */
959: mov a0, s2
960:
961: /* Switch to our idle stack. */
962: GET_IDLE_PCB(a0) /* clobbers v0, t0, t8-t11 */
963: SWITCH_CONTEXT
964:
965: /*
966: * Now running as idle thread, except for the value of 'curproc' and
967: * the saved regs.
968: */
969:
970: /* Schedule the vmspace and stack to be freed. */
971: mov s2, a0
972: CALL(exit2)
973:
974: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
975: CALL(sched_lock_idle) /* acquire sched_lock */
976: #endif
977:
978: /*
979: * Now jump back into the middle of cpu_switch(). Note that
980: * we must clear s0 to guarantee that the check for switching
981: * to ourselves in cpu_switch() will fail. This is safe since
982: * s0 will be restored when a new process is resumed.
983: */
984: mov zero, s0
985: jmp zero, cpu_switch_queuescan
986: END(switch_exit)
987:
988: /**************************************************************************/
989:
990: /*
991: * Copy a null-terminated string within the kernel's address space.
992: * If lenp is not NULL, store the number of chars copied in *lenp
993: *
994: * int copystr(char *from, char *to, size_t len, size_t *lenp);
995: */
996: LEAF(copystr, 4)
997: LDGP(pv)
998:
999: mov a2, t0 /* t0 = i = len */
1000: bne a2, 1f /* if (len != 0), proceed */
1001: ldiq t1, 1 /* else bail */
1002: br zero, 2f
1003:
1004: 1: ldq_u t1, 0(a0) /* t1 = *from */
1005: extbl t1, a0, t1
1006: ldq_u t3, 0(a1) /* set up t2 with quad around *to */
1007: insbl t1, a1, t2
1008: mskbl t3, a1, t3
1009: or t3, t2, t3 /* add *from to quad around *to */
1010: stq_u t3, 0(a1) /* write out that quad */
1011:
1012: subl a2, 1, a2 /* len-- */
1013: beq t1, 2f /* if (*from == 0), bail out */
1014: addq a1, 1, a1 /* to++ */
1015: addq a0, 1, a0 /* from++ */
1016: bne a2, 1b /* if (len != 0) copy more */
1017:
1018: 2: beq a3, 3f /* if (lenp != NULL) */
1019: subl t0, a2, t0 /* *lenp = (i - len) */
1020: stq t0, 0(a3)
1021: 3: beq t1, 4f /* *from == '\0'; leave quietly */
1022:
1023: ldiq v0, ENAMETOOLONG /* *from != '\0'; error. */
1024: RET
1025:
1026: 4: mov zero, v0 /* return 0. */
1027: RET
1028: END(copystr)
1029:
1030: NESTED(copyinstr, 4, 16, ra, IM_RA|IM_S0, 0)
1031: LDGP(pv)
1032: lda sp, -16(sp) /* set up stack frame */
1033: stq ra, (16-8)(sp) /* save ra */
1034: stq s0, (16-16)(sp) /* save s0 */
1035: ldiq t0, VM_MAX_ADDRESS /* make sure that src addr */
1036: cmpult a0, t0, t1 /* is in user space. */
1037: beq t1, copyerr /* if it's not, error out. */
1038: /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1039: GET_CURPROC
1040: mov v0, s0
1041: lda v0, copyerr /* set up fault handler. */
1042: .set noat
1043: ldq at_reg, 0(s0)
1044: ldq at_reg, P_ADDR(at_reg)
1045: stq v0, U_PCB_ONFAULT(at_reg)
1046: .set at
1047: CALL(copystr) /* do the copy. */
1048: .set noat
1049: ldq at_reg, 0(s0) /* kill the fault handler. */
1050: ldq at_reg, P_ADDR(at_reg)
1051: stq zero, U_PCB_ONFAULT(at_reg)
1052: .set at
1053: ldq ra, (16-8)(sp) /* restore ra. */
1054: ldq s0, (16-16)(sp) /* restore s0. */
1055: lda sp, 16(sp) /* kill stack frame. */
1056: RET /* v0 left over from copystr */
1057: END(copyinstr)
1058:
1059: NESTED(copyoutstr, 4, 16, ra, IM_RA|IM_S0, 0)
1060: LDGP(pv)
1061: lda sp, -16(sp) /* set up stack frame */
1062: stq ra, (16-8)(sp) /* save ra */
1063: stq s0, (16-16)(sp) /* save s0 */
1064: ldiq t0, VM_MAX_ADDRESS /* make sure that dest addr */
1065: cmpult a1, t0, t1 /* is in user space. */
1066: beq t1, copyerr /* if it's not, error out. */
1067: /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1068: GET_CURPROC
1069: mov v0, s0
1070: lda v0, copyerr /* set up fault handler. */
1071: .set noat
1072: ldq at_reg, 0(s0)
1073: ldq at_reg, P_ADDR(at_reg)
1074: stq v0, U_PCB_ONFAULT(at_reg)
1075: .set at
1076: CALL(copystr) /* do the copy. */
1077: .set noat
1078: ldq at_reg, 0(s0) /* kill the fault handler. */
1079: ldq at_reg, P_ADDR(at_reg)
1080: stq zero, U_PCB_ONFAULT(at_reg)
1081: .set at
1082: ldq ra, (16-8)(sp) /* restore ra. */
1083: ldq s0, (16-16)(sp) /* restore s0. */
1084: lda sp, 16(sp) /* kill stack frame. */
1085: RET /* v0 left over from copystr */
1086: END(copyoutstr)
1087:
1088: /*
1089: * Copy a bytes within the kernel's address space.
1090: *
1091: * Although bcopy() is not specified to handle overlapping regions,
1092: * this version does do so.
1093: *
1094: * void bcopy(char *from, char *to, size_t len);
1095: */
1096: LEAF(memcpy,3)
1097: cmoveq zero,a0,t5
1098: cmoveq zero,a1,a0
1099: cmoveq zero,t5,a1
1100:
1101: XLEAF(bcopy,3)
1102: XLEAF(ovbcopy,3)
1103:
1104: /* Check for negative length */
1105: ble a2,bcopy_done
1106:
1107: /* Check for overlap */
1108: subq a1,a0,t5
1109: cmpult t5,a2,t5
1110: bne t5,bcopy_overlap
1111:
1112: /* a3 = end address */
1113: addq a0,a2,a3
1114:
1115: /* Get the first word */
1116: ldq_u t2,0(a0)
1117:
1118: /* Do they have the same alignment? */
1119: xor a0,a1,t0
1120: and t0,7,t0
1121: and a1,7,t1
1122: bne t0,bcopy_different_alignment
1123:
1124: /* src & dst have same alignment */
1125: beq t1,bcopy_all_aligned
1126:
1127: ldq_u t3,0(a1)
1128: addq a2,t1,a2
1129: mskqh t2,a0,t2
1130: mskql t3,a0,t3
1131: or t2,t3,t2
1132:
1133: /* Dst is 8-byte aligned */
1134:
1135: bcopy_all_aligned:
1136: /* If less than 8 bytes,skip loop */
1137: subq a2,1,t0
1138: and a2,7,a2
1139: bic t0,7,t0
1140: beq t0,bcopy_samealign_lp_end
1141:
1142: bcopy_samealign_lp:
1143: stq_u t2,0(a1)
1144: addq a1,8,a1
1145: ldq_u t2,8(a0)
1146: subq t0,8,t0
1147: addq a0,8,a0
1148: bne t0,bcopy_samealign_lp
1149:
1150: bcopy_samealign_lp_end:
1151: /* If we're done, exit */
1152: bne a2,bcopy_small_left
1153: stq_u t2,0(a1)
1154: RET
1155:
1156: bcopy_small_left:
1157: mskql t2,a2,t4
1158: ldq_u t3,0(a1)
1159: mskqh t3,a2,t3
1160: or t4,t3,t4
1161: stq_u t4,0(a1)
1162: RET
1163:
1164: bcopy_different_alignment:
1165: /*
1166: * this is the fun part
1167: */
1168: addq a0,a2,a3
1169: cmpule a2,8,t0
1170: bne t0,bcopy_da_finish
1171:
1172: beq t1,bcopy_da_noentry
1173:
1174: /* Do the initial partial word */
1175: subq zero,a1,t0
1176: and t0,7,t0
1177: ldq_u t3,7(a0)
1178: extql t2,a0,t2
1179: extqh t3,a0,t3
1180: or t2,t3,t5
1181: insql t5,a1,t5
1182: ldq_u t6,0(a1)
1183: mskql t6,a1,t6
1184: or t5,t6,t5
1185: stq_u t5,0(a1)
1186: addq a0,t0,a0
1187: addq a1,t0,a1
1188: subq a2,t0,a2
1189: ldq_u t2,0(a0)
1190:
1191: bcopy_da_noentry:
1192: subq a2,1,t0
1193: bic t0,7,t0
1194: and a2,7,a2
1195: beq t0,bcopy_da_finish2
1196:
1197: bcopy_da_lp:
1198: ldq_u t3,7(a0)
1199: addq a0,8,a0
1200: extql t2,a0,t4
1201: extqh t3,a0,t5
1202: subq t0,8,t0
1203: or t4,t5,t5
1204: stq t5,0(a1)
1205: addq a1,8,a1
1206: beq t0,bcopy_da_finish1
1207: ldq_u t2,7(a0)
1208: addq a0,8,a0
1209: extql t3,a0,t4
1210: extqh t2,a0,t5
1211: subq t0,8,t0
1212: or t4,t5,t5
1213: stq t5,0(a1)
1214: addq a1,8,a1
1215: bne t0,bcopy_da_lp
1216:
1217: bcopy_da_finish2:
1218: /* Do the last new word */
1219: mov t2,t3
1220:
1221: bcopy_da_finish1:
1222: /* Do the last partial word */
1223: ldq_u t2,-1(a3)
1224: extql t3,a0,t3
1225: extqh t2,a0,t2
1226: or t2,t3,t2
1227: br zero,bcopy_samealign_lp_end
1228:
1229: bcopy_da_finish:
1230: /* Do the last word in the next source word */
1231: ldq_u t3,-1(a3)
1232: extql t2,a0,t2
1233: extqh t3,a0,t3
1234: or t2,t3,t2
1235: insqh t2,a1,t3
1236: insql t2,a1,t2
1237: lda t4,-1(zero)
1238: mskql t4,a2,t5
1239: cmovne t5,t5,t4
1240: insqh t4,a1,t5
1241: insql t4,a1,t4
1242: addq a1,a2,a4
1243: ldq_u t6,0(a1)
1244: ldq_u t7,-1(a4)
1245: bic t6,t4,t6
1246: bic t7,t5,t7
1247: and t2,t4,t2
1248: and t3,t5,t3
1249: or t2,t6,t2
1250: or t3,t7,t3
1251: stq_u t3,-1(a4)
1252: stq_u t2,0(a1)
1253: RET
1254:
1255: bcopy_overlap:
1256: /*
1257: * Basically equivalent to previous case, only backwards.
1258: * Not quite as highly optimized
1259: */
1260: addq a0,a2,a3
1261: addq a1,a2,a4
1262:
1263: /* less than 8 bytes - don't worry about overlap */
1264: cmpule a2,8,t0
1265: bne t0,bcopy_ov_short
1266:
1267: /* Possibly do a partial first word */
1268: and a4,7,t4
1269: beq t4,bcopy_ov_nostart2
1270: subq a3,t4,a3
1271: subq a4,t4,a4
1272: ldq_u t1,0(a3)
1273: subq a2,t4,a2
1274: ldq_u t2,7(a3)
1275: ldq t3,0(a4)
1276: extql t1,a3,t1
1277: extqh t2,a3,t2
1278: or t1,t2,t1
1279: mskqh t3,t4,t3
1280: mskql t1,t4,t1
1281: or t1,t3,t1
1282: stq t1,0(a4)
1283:
1284: bcopy_ov_nostart2:
1285: bic a2,7,t4
1286: and a2,7,a2
1287: beq t4,bcopy_ov_lp_end
1288:
1289: bcopy_ov_lp:
1290: /* This could be more pipelined, but it doesn't seem worth it */
1291: ldq_u t0,-8(a3)
1292: subq a4,8,a4
1293: ldq_u t1,-1(a3)
1294: subq a3,8,a3
1295: extql t0,a3,t0
1296: extqh t1,a3,t1
1297: subq t4,8,t4
1298: or t0,t1,t0
1299: stq t0,0(a4)
1300: bne t4,bcopy_ov_lp
1301:
1302: bcopy_ov_lp_end:
1303: beq a2,bcopy_done
1304:
1305: ldq_u t0,0(a0)
1306: ldq_u t1,7(a0)
1307: ldq_u t2,0(a1)
1308: extql t0,a0,t0
1309: extqh t1,a0,t1
1310: or t0,t1,t0
1311: insql t0,a1,t0
1312: mskql t2,a1,t2
1313: or t2,t0,t2
1314: stq_u t2,0(a1)
1315:
1316: bcopy_done:
1317: RET
1318:
1319: bcopy_ov_short:
1320: ldq_u t2,0(a0)
1321: br zero,bcopy_da_finish
1322:
1323: END(memcpy)
1324:
1325: /*
1326: * kcopy(const void *src, void *dst, size_t len);
1327: *
1328: * Copy len bytes from src to dst, aborting if we encounter a fatal
1329: * page fault.
1330: *
1331: * kcopy() _must_ save and restore the old fault handler since it is
1332: * called by uiomove(), which may be in the path of servicing a non-fatal
1333: * page fault.
1334: */
1335: NESTED(kcopy, 3, 32, ra, IM_RA|IM_S0|IM_S1, 0)
1336: LDGP(pv)
1337: lda sp, -32(sp) /* set up stack frame */
1338: stq ra, (32-8)(sp) /* save ra */
1339: stq s0, (32-16)(sp) /* save s0 */
1340: stq s1, (32-24)(sp) /* save s1 */
1341: /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1342: GET_CURPROC
1343: mov v0, s1
1344: lda v0, kcopyerr /* set up fault handler. */
1345: .set noat
1346: ldq at_reg, 0(s1)
1347: ldq at_reg, P_ADDR(at_reg)
1348: ldq s0, U_PCB_ONFAULT(at_reg) /* save old handler. */
1349: stq v0, U_PCB_ONFAULT(at_reg)
1350: .set at
1351: CALL(bcopy) /* do the copy. */
1352: .set noat
1353: ldq at_reg, 0(s1) /* restore the old handler. */
1354: ldq at_reg, P_ADDR(at_reg)
1355: stq s0, U_PCB_ONFAULT(at_reg)
1356: .set at
1357: ldq ra, (32-8)(sp) /* restore ra. */
1358: ldq s0, (32-16)(sp) /* restore s0. */
1359: ldq s1, (32-24)(sp) /* restore s1. */
1360: lda sp, 32(sp) /* kill stack frame. */
1361: mov zero, v0 /* return 0. */
1362: RET
1363: END(kcopy)
1364:
1365: LEAF(kcopyerr, 0)
1366: LDGP(pv)
1367: .set noat
1368: ldq at_reg, 0(s1) /* restore the old handler. */
1369: ldq at_reg, P_ADDR(at_reg)
1370: stq s0, U_PCB_ONFAULT(at_reg)
1371: .set at
1372: ldq ra, (32-8)(sp) /* restore ra. */
1373: ldq s0, (32-16)(sp) /* restore s0. */
1374: ldq s1, (32-24)(sp) /* restore s1. */
1375: lda sp, 32(sp) /* kill stack frame. */
1376: ldiq v0, EFAULT /* return EFAULT. */
1377: RET
1378: END(kcopyerr)
1379:
1380: NESTED(copyin, 3, 16, ra, IM_RA|IM_S0, 0)
1381: LDGP(pv)
1382: lda sp, -16(sp) /* set up stack frame */
1383: stq ra, (16-8)(sp) /* save ra */
1384: stq s0, (16-16)(sp) /* save s0 */
1385: ldiq t0, VM_MAX_ADDRESS /* make sure that src addr */
1386: cmpult a0, t0, t1 /* is in user space. */
1387: beq t1, copyerr /* if it's not, error out. */
1388: /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1389: GET_CURPROC
1390: mov v0, s0
1391: lda v0, copyerr /* set up fault handler. */
1392: .set noat
1393: ldq at_reg, 0(s0)
1394: ldq at_reg, P_ADDR(at_reg)
1395: stq v0, U_PCB_ONFAULT(at_reg)
1396: .set at
1397: CALL(bcopy) /* do the copy. */
1398: .set noat
1399: ldq at_reg, 0(s0) /* kill the fault handler. */
1400: ldq at_reg, P_ADDR(at_reg)
1401: stq zero, U_PCB_ONFAULT(at_reg)
1402: .set at
1403: ldq ra, (16-8)(sp) /* restore ra. */
1404: ldq s0, (16-16)(sp) /* restore s0. */
1405: lda sp, 16(sp) /* kill stack frame. */
1406: mov zero, v0 /* return 0. */
1407: RET
1408: END(copyin)
1409:
1410: NESTED(copyout, 3, 16, ra, IM_RA|IM_S0, 0)
1411: LDGP(pv)
1412: lda sp, -16(sp) /* set up stack frame */
1413: stq ra, (16-8)(sp) /* save ra */
1414: stq s0, (16-16)(sp) /* save s0 */
1415: ldiq t0, VM_MAX_ADDRESS /* make sure that dest addr */
1416: cmpult a1, t0, t1 /* is in user space. */
1417: beq t1, copyerr /* if it's not, error out. */
1418: /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1419: GET_CURPROC
1420: mov v0, s0
1421: lda v0, copyerr /* set up fault handler. */
1422: .set noat
1423: ldq at_reg, 0(s0)
1424: ldq at_reg, P_ADDR(at_reg)
1425: stq v0, U_PCB_ONFAULT(at_reg)
1426: .set at
1427: CALL(bcopy) /* do the copy. */
1428: .set noat
1429: ldq at_reg, 0(s0) /* kill the fault handler. */
1430: ldq at_reg, P_ADDR(at_reg)
1431: stq zero, U_PCB_ONFAULT(at_reg)
1432: .set at
1433: ldq ra, (16-8)(sp) /* restore ra. */
1434: ldq s0, (16-16)(sp) /* restore s0. */
1435: lda sp, 16(sp) /* kill stack frame. */
1436: mov zero, v0 /* return 0. */
1437: RET
1438: END(copyout)
1439:
1440: LEAF(copyerr, 0)
1441: LDGP(pv)
1442: ldq ra, (16-8)(sp) /* restore ra. */
1443: ldq s0, (16-16)(sp) /* restore s0. */
1444: lda sp, 16(sp) /* kill stack frame. */
1445: ldiq v0, EFAULT /* return EFAULT. */
1446: RET
1447: END(copyerr)
1448:
1449: /**************************************************************************/
1450:
1451: /*
1452: * console 'restart' routine to be placed in HWRPB.
1453: */
1454: LEAF(XentRestart, 1) /* XXX should be NESTED */
1455: .set noat
1456: lda sp,-(FRAME_SIZE*8)(sp)
1457: stq at_reg,(FRAME_AT*8)(sp)
1458: .set at
1459: stq v0,(FRAME_V0*8)(sp)
1460: stq a0,(FRAME_A0*8)(sp)
1461: stq a1,(FRAME_A1*8)(sp)
1462: stq a2,(FRAME_A2*8)(sp)
1463: stq a3,(FRAME_A3*8)(sp)
1464: stq a4,(FRAME_A4*8)(sp)
1465: stq a5,(FRAME_A5*8)(sp)
1466: stq s0,(FRAME_S0*8)(sp)
1467: stq s1,(FRAME_S1*8)(sp)
1468: stq s2,(FRAME_S2*8)(sp)
1469: stq s3,(FRAME_S3*8)(sp)
1470: stq s4,(FRAME_S4*8)(sp)
1471: stq s5,(FRAME_S5*8)(sp)
1472: stq s6,(FRAME_S6*8)(sp)
1473: stq t0,(FRAME_T0*8)(sp)
1474: stq t1,(FRAME_T1*8)(sp)
1475: stq t2,(FRAME_T2*8)(sp)
1476: stq t3,(FRAME_T3*8)(sp)
1477: stq t4,(FRAME_T4*8)(sp)
1478: stq t5,(FRAME_T5*8)(sp)
1479: stq t6,(FRAME_T6*8)(sp)
1480: stq t7,(FRAME_T7*8)(sp)
1481: stq t8,(FRAME_T8*8)(sp)
1482: stq t9,(FRAME_T9*8)(sp)
1483: stq t10,(FRAME_T10*8)(sp)
1484: stq t11,(FRAME_T11*8)(sp)
1485: stq t12,(FRAME_T12*8)(sp)
1486: stq ra,(FRAME_RA*8)(sp)
1487:
1488: br pv,1f
1489: 1: LDGP(pv)
1490:
1491: mov sp,a0
1492: CALL(console_restart)
1493:
1494: call_pal PAL_halt
1495: END(XentRestart)
1496:
1497: /**************************************************************************/
1498:
1499: /*
1500: * Kernel setjmp and longjmp. Rather minimalist.
1501: *
1502: * longjmp(label_t *a)
1503: * will generate a "return (1)" from the last call to
1504: * setjmp(label_t *a)
1505: * by restoring registers from the stack,
1506: */
1507:
1508: .set noreorder
1509:
1510: LEAF(setjmp, 1)
1511: LDGP(pv)
1512:
1513: stq ra, (0 * 8)(a0) /* return address */
1514: stq s0, (1 * 8)(a0) /* callee-saved registers */
1515: stq s1, (2 * 8)(a0)
1516: stq s2, (3 * 8)(a0)
1517: stq s3, (4 * 8)(a0)
1518: stq s4, (5 * 8)(a0)
1519: stq s5, (6 * 8)(a0)
1520: stq s6, (7 * 8)(a0)
1521: stq sp, (8 * 8)(a0)
1522:
1523: ldiq t0, 0xbeeffedadeadbabe /* set magic number */
1524: stq t0, (9 * 8)(a0)
1525:
1526: mov zero, v0 /* return zero */
1527: RET
1528: END(setjmp)
1529:
1530: LEAF(longjmp, 1)
1531: LDGP(pv)
1532:
1533: ldiq t0, 0xbeeffedadeadbabe /* check magic number */
1534: ldq t1, (9 * 8)(a0)
1535: cmpeq t0, t1, t0
1536: beq t0, longjmp_botch /* if bad, punt */
1537:
1538: ldq ra, (0 * 8)(a0) /* return address */
1539: ldq s0, (1 * 8)(a0) /* callee-saved registers */
1540: ldq s1, (2 * 8)(a0)
1541: ldq s2, (3 * 8)(a0)
1542: ldq s3, (4 * 8)(a0)
1543: ldq s4, (5 * 8)(a0)
1544: ldq s5, (6 * 8)(a0)
1545: ldq s6, (7 * 8)(a0)
1546: ldq sp, (8 * 8)(a0)
1547:
1548: ldiq v0, 1
1549: RET
1550:
1551: longjmp_botch:
1552: lda a0, longjmp_botchmsg
1553: mov ra, a1
1554: CALL(panic)
1555: call_pal PAL_bugchk
1556:
1557: .data
1558: longjmp_botchmsg:
1559: .asciz "longjmp botch from %p"
1560: .text
1561: END(longjmp)
1562:
1563: /*
1564: * void sts(int rn, u_int32_t *rval);
1565: * void stt(int rn, u_int64_t *rval);
1566: * void lds(int rn, u_int32_t *rval);
1567: * void ldt(int rn, u_int64_t *rval);
1568: */
1569:
1570: #ifndef NO_IEEE
1571: .macro make_freg_util name, op
1572: LEAF(alpha_\name, 2)
1573: and a0, 0x1f, a0
1574: s8addq a0, pv, pv
1575: addq pv, 1f - alpha_\name, pv
1576: jmp (pv)
1577: 1:
1578: rn = 0
1579: .rept 32
1580: \op $f0 + rn, 0(a1)
1581: RET
1582: rn = rn + 1
1583: .endr
1584: END(alpha_\name)
1585: .endm
1586: /*
1587: LEAF(alpha_sts, 2)
1588: LEAF(alpha_stt, 2)
1589: LEAF(alpha_lds, 2)
1590: LEAF(alpha_ldt, 2)
1591: */
1592: make_freg_util sts, sts
1593: make_freg_util stt, stt
1594: make_freg_util lds, lds
1595: make_freg_util ldt, ldt
1596:
1597: LEAF(alpha_read_fpcr, 0); f30save = 0; rettmp = 8; framesz = 16
1598: lda sp, -framesz(sp)
1599: stt $f30, f30save(sp)
1600: mf_fpcr $f30
1601: stt $f30, rettmp(sp)
1602: ldt $f30, f30save(sp)
1603: ldq v0, rettmp(sp)
1604: lda sp, framesz(sp)
1605: RET
1606: END(alpha_read_fpcr)
1607:
1608: LEAF(alpha_write_fpcr, 1); f30save = 0; fpcrtmp = 8; framesz = 16
1609: lda sp, -framesz(sp)
1610: stq a0, fpcrtmp(sp)
1611: stt $f30, f30save(sp)
1612: ldt $f30, fpcrtmp(sp)
1613: mt_fpcr $f30
1614: ldt $f30, f30save(sp)
1615: lda sp, framesz(sp)
1616: RET
1617: END(alpha_write_fpcr)
1618: #endif
1619:
1620: #if 0
1621: NESTED(transfer_check,0,0,ra,0,0)
1622: CALL(U_need_2_run_config)
1623: END(transfer_check)
1624: #endif
1625:
1626: /* Random data that shouldn't be necessary. */
1627: .data
1628: EXPORT(cold)
1629: .long 1 /* cold start flag (.long -> _4_ bytes) */
1630: .align 3
1631: EXPORT(esym)
1632: .quad 1 /* store end of kernel symbol table here */
1633:
1634:
1635: /**************************************************************************/
CVSweb