Annotation of sys/arch/mac68k/mac68k/locore.s, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: locore.s,v 1.58 2007/05/15 13:46:22 martin Exp $ */
2: /* $NetBSD: locore.s,v 1.103 1998/07/09 06:02:50 scottr Exp $ */
3:
4: /*
5: * Copyright (c) 1988 University of Utah.
6: * Copyright (c) 1982, 1990 The Regents of the University of California.
7: * All rights reserved.
8: *
9: * This code is derived from software contributed to Berkeley by
10: * the Systems Programming Group of the University of Utah Computer
11: * Science Department.
12: *
13: * Redistribution and use in source and binary forms, with or without
14: * modification, are permitted provided that the following conditions
15: * are met:
16: * 1. Redistributions of source code must retain the above copyright
17: * notice, this list of conditions and the following disclaimer.
18: * 2. Redistributions in binary form must reproduce the above copyright
19: * notice, this list of conditions and the following disclaimer in the
20: * documentation and/or other materials provided with the distribution.
21: * 3. Neither the name of the University nor the names of its contributors
22: * may be used to endorse or promote products derived from this software
23: * without specific prior written permission.
24: *
25: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35: * SUCH DAMAGE.
36: */
37: /*-
38: * Copyright (C) 1993 Allen K. Briggs, Chris P. Caputo,
39: * Michael L. Finch, Bradley A. Grantham, and
40: * Lawrence A. Kesteloot
41: * All rights reserved.
42: *
43: * Redistribution and use in source and binary forms, with or without
44: * modification, are permitted provided that the following conditions
45: * are met:
46: * 1. Redistributions of source code must retain the above copyright
47: * notice, this list of conditions and the following disclaimer.
48: * 2. Redistributions in binary form must reproduce the above copyright
49: * notice, this list of conditions and the following disclaimer in the
50: * documentation and/or other materials provided with the distribution.
51: * 3. All advertising materials mentioning features or use of this software
52: * must display the following acknowledgement:
53: * This product includes software developed by the Alice Group.
54: * 4. The names of the Alice Group or any of its members may not be used
55: * to endorse or promote products derived from this software without
56: * specific prior written permission.
57: *
58: * THIS SOFTWARE IS PROVIDED BY THE ALICE GROUP ``AS IS'' AND ANY EXPRESS OR
59: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
60: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
61: * IN NO EVENT SHALL THE ALICE GROUP BE LIABLE FOR ANY DIRECT, INDIRECT,
62: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
63: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
64: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
65: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
66: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
67: * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
68: *
69: */
70:
71: /*
72: * from: Utah $Hdr: locore.s 1.58 91/04/22$
73: *
74: * @(#)locore.s 7.11 (Berkeley) 5/9/91
75: */
76:
77: #include "assym.h"
78: #include <machine/asm.h>
79: #include <machine/trap.h>
80:
81: /*
82: * This is for kvm_mkdb, and should be the address of the beginning
83: * of the kernel text segment (not necessarily the same as kernbase).
84: */
85: .text
86: GLOBAL(kernel_text)
87:
88: /*
89: * Clear and skip the first page of text; it will not be mapped.
90: */
91: .fill NBPG / 4, 4, 0
92:
93: /*
94: * Initialization
95: */
96:
97: .data
98: | Scratch memory. Careful when messing with these...
99: ASLOCAL(longscratch)
100: .long 0
101: ASLOCAL(longscratch2)
102: .long 0
103: ASLOCAL(pte_tmp) | for get_pte()
104: .long 0
105: GLOBAL(macos_crp1)
106: .long 0
107: GLOBAL(macos_crp2)
108: .long 0
109: GLOBAL(macos_tc)
110: .long 0
111: GLOBAL(macos_tt0)
112: .long 0
113: GLOBAL(macos_tt1)
114: .long 0
115: GLOBAL(bletch)
116: .long 0
117:
118: GLOBAL(sanity_check)
119: .long 0x18621862 | this is our stack overflow checker.
120:
121: .space 4 * NBPG
122: ASLOCAL(tmpstk)
123:
124: #include <mac68k/mac68k/vectors.s>
125:
126: BSS(esym,4)
127:
128: ASENTRY_NOPROFILE(start)
129: GLOBAL(kernel_start)
130: movw #PSL_HIGHIPL,sr | no interrupts. ever.
131: lea _ASM_LABEL(tmpstk),sp | give ourselves a temporary stack
132:
133: movl #CACHE_OFF,d0
134: movc d0,cacr | clear and disable on-chip cache(s)
135:
136: /* Initialize source/destination control registers for movs */
137: movql #FC_USERD,d0 | user space
138: movc d0,sfc | as source
139: movc d0,dfc | and destination of transfers
140:
141: /*
142: * Some parameters provided by MacOS
143: *
144: * LAK: This section is the new way to pass information from the booter
145: * to the kernel. At A1 there is an environment variable which has
146: * a bunch of stuff in ascii format, "VAR=value\0VAR=value\0\0".
147: */
148: movl a1,sp@- | Address of buffer
149: movl d4,sp@- | Some flags... (mostly not used)
150: jbsr _C_LABEL(getenvvars) | Parse the environment buffer
151: addql #8,sp
152:
153: /* Determine MMU/MPU from what we can test empirically */
154: movl #0x200,d0 | data freeze bit
155: movc d0,cacr | only exists on 68030
156: movc cacr,d0 | read it back
157: tstl d0 | zero?
158: jeq Lnot68030 | yes, we have 68020/68040
159:
160: lea _C_LABEL(mmutype),a0 | no, we have 68030
161: movl #MMU_68030,a0@ | set to reflect 68030 PMMU
162: lea _C_LABEL(cputype),a0
163: movl #CPU_68030,a0@ | and 68030 MPU
164: jra Lstart1
165:
166: Lnot68030:
167: bset #31,d0 | data cache enable bit
168: movc d0,cacr | only exists on 68040
169: movc cacr,d0 | read it back
170: tstl d0 | zero?
171: beq Lis68020 | yes, we have 68020
172:
173: movql #CACHE40_OFF,d0 | now turn it back off
174: movc d0,cacr | before we access any data
175: .word 0xf4f8 | cpusha bc ;push and invalidate caches
176: lea _C_LABEL(mmutype),a0
177: movl #MMU_68040,a0@ | Reflect 68040 MMU
178: lea _C_LABEL(cputype),a0
179: movl #CPU_68040,a0@ | and 68040 MPU
180: jra Lstart1
181:
182: Lis68020:
183: movl #CACHE_OFF,d0 | disable and clear cache
184: movc d0,cacr
185: lea _C_LABEL(mmutype),a0 | Must be 68020+68851
186: movl #MMU_68851,a0@ | Reflect 68851 PMMU
187: lea _C_LABEL(cputype),a0
188: movl #CPU_68020,a0@ | and 68020 MPU
189:
190: Lstart1:
191: /*
192: * Now that we know what CPU we have, initialize the address error
193: * and bus error handlers in the vector table:
194: *
195: * vectab+8 bus error
196: * vectab+12 address error
197: */
198: lea _C_LABEL(cputype),a0
199: lea _C_LABEL(vectab),a2
200: #if defined(M68040)
201: cmpl #CPU_68040,a0@ | 68040?
202: jne 1f | no, skip
203: movl #_C_LABEL(buserr40),a2@(8)
204: movl #_C_LABEL(addrerr4060),a2@(12)
205: jra Lstart2
206: 1:
207: #endif
208: #if defined(M68020) || defined(M68030)
209: #if defined(M68030)
210: cmpl #CPU_68030,a0@ | 68030?
211: jeq 1f | yes, ok
212: #endif
213: #if defined(M68020)
214: cmpl #CPU_68020,a0@ | 68020?
215: jeq 1f | yes, ok
216: #endif
217: jra 9f
218: 1:
219: movl #_C_LABEL(busaddrerr2030),a2@(8)
220: movl #_C_LABEL(busaddrerr2030),a2@(12)
221: jra Lstart2
222: #endif
223: 9:
224: /* Config botch; no hope. */
225: movl _C_LABEL(MacOSROMBase),a1 | Load MacOS ROMBase
226: jra Ldoboot1
227:
228: Lstart2:
229: jbsr _C_LABEL(setmachdep) | Set some machine-dep stuff
230: jbsr _C_LABEL(consinit) | XXX Should only be if graybar on
231:
232: /*
233: * Figure out MacOS mappings and bootstrap OpenBSD
234: */
235: lea _C_LABEL(macos_tc),a0 | get current TC
236: cmpl #MMU_68040,_C_LABEL(mmutype) | check to see if 68040
237: jeq Lget040TC
238:
239: pmove tc,a0@
240: jra Lstart3
241:
242: Lget040TC:
243: #if 0
244: movl _C_LABEL(current_mac_model),a1 | if an AV Mac, save current
245: cmpl #MACH_CLASSAV,a1@(CPUINFO_CLASS) | TC so internal video will
246: jne LnotAV | get configured
247: #endif
248: .long 0x4e7a0003 | movc tc,d0
249: jra LsaveTC
250: LnotAV:
251: movql #0,d0 | otherwise,
252: .long 0x4e7b0003 | movc d0,tc ;Disable MMU
253: LsaveTC:
254: movl d0,a0@
255:
256: Lstart3:
257: movl a0@,sp@- | get Mac OS mapping, relocate video,
258: jbsr _C_LABEL(bootstrap_mac68k) | bootstrap pmap, et al.
259: addql #4,sp
260:
261: /*
262: * Set up the vector table, and race to get the MMU
263: * enabled.
264: */
265: movl #_C_LABEL(vectab),d0 | set Vector Base Register
266: movc d0,vbr
267:
268: /*
269: * We might not be running physical, but we don't have read-only mappings
270: * yet either. It's time to override copypage() with the 68040
271: * optimized version, copypage040(), if possible.
272: * This relies upon the fact that copypage() immediately follows
273: * copypage040() in memory.
274: */
275: movl #_C_LABEL(mmutype),a0
276: cmpl #MMU_68040,a0@
277: jgt Lmmu_enable
278: movl #_C_LABEL(copypage040),a0
279: movl #_C_LABEL(copypage),a1
280: movl a1, a2
281: 1:
282: movw a0@+, a2@+
283: cmpl a0, a1
284: jgt 1b
285:
286: Lmmu_enable:
287: movl _C_LABEL(Sysseg),a1 | system segment table addr
288: addl _C_LABEL(load_addr),a1 | Make it physical addr
289: cmpl #MMU_68040,_C_LABEL(mmutype)
290: jne Lenablepre040MMU | if not 040, skip
291:
292: movql #0,d0
293: .long 0x4e7b0003 | movc d0,tc ;Disable MMU
294: .long 0x4e7b0004 | movc d0,itt0 ;Disable itt0
295: .long 0x4e7b0005 | movc d0,itt1 ;Disable itt1
296: .long 0x4e7b0006 | movc d0,dtt0 ;Disable dtt0
297: .long 0x4e7b0007 | movc d0,dtt1 ;Disable dtt1
298: movl a1,d1
299: .word 0xf4d8 | cinva bc
300: .word 0xf518 | pflusha
301: .long 0x4e7b1807 | movc d1,srp
302: movl #0x8000,d0
303: .long 0x4e7b0003 | movc d0,tc ;Enable MMU
304: movl #CACHE40_ON,d0
305: movc d0,cacr | turn on both caches
306: jra Lloaddone
307:
308: Lenablepre040MMU:
309: tstl _C_LABEL(mmutype) | TTx instructions will break 68851
310: jgt LnokillTT
311:
312: lea _ASM_LABEL(longscratch),a0 | disable TTx registers on 68030
313: movl #0,a0@
314: .long 0xf0100800 | movl a0@,tt0
315: .long 0xf0100c00 | movl a0@,tt1
316:
317: LnokillTT:
318: lea _C_LABEL(protorp),a0
319: movl #0x80000202,a0@ | nolimit + share global + 4 byte PTEs
320: movl a1,a0@(4) | + segtable address
321: pmove a0@,srp | load the supervisor root pointer
322: movl #0x80000002,a0@ | reinit upper half for CRP loads
323: lea _ASM_LABEL(longscratch),a2
324: movl #0x82c0aa00,a2@ | value to load TC with
325: pmove a2@,tc | load it
326:
327: Lloaddone:
328:
329: /*
330: * Should be running mapped from this point on
331: */
332: /* select the software page size now */
333: lea _ASM_LABEL(tmpstk),sp | temporary stack
334: jbsr _C_LABEL(uvm_setpagesize) | select software page size
335:
336: /* set kernel stack, user SP, proc0, and initial pcb */
337: movl _C_LABEL(proc0paddr),a1 | get proc0 pcb addr
338: lea a1@(USPACE-4),sp | set kernel stack to end of area
339: lea _C_LABEL(proc0),a2 | initialize proc0.p_addr so that
340: movl a1,a2@(P_ADDR) | we don't deref NULL in trap()
341: movl #USRSTACK-4,a2
342: movl a2,usp | init user SP
343: movl a1,_C_LABEL(curpcb) | proc0 is running
344:
345: /* flush TLB and turn on caches */
346: jbsr _ASM_LABEL(TBIA) | invalidate TLB
347: cmpl #MMU_68040,_C_LABEL(mmutype) | 68040?
348: jeq Lnocache0 | yes, cache already on
349: movl #CACHE_ON,d0
350: movc d0,cacr | clear cache(s)
351:
352: Lnocache0:
353: /* Final setup for call to main(). */
354: jbsr _C_LABEL(mac68k_init)
355:
356: /*
357: * Create a fake exception frame so that cpu_fork() can copy it.
358: * main() never returns; we exit to user mode from a forked process
359: * later on.
360: */
361: clrw sp@- | vector offset/frame type
362: clrl sp@- | PC - filled in by "execve"
363: movw #PSL_USER,sp@- | in user mode
364: clrl sp@- | stack adjust count and padding
365: lea sp@(-64),sp | construct space for D0-D7/A0-A7
366: lea _C_LABEL(proc0),a0 | save pointer to frame
367: movl sp,a0@(P_MD_REGS) | in proc0.p_md.md_regs
368:
369: jra _C_LABEL(main) | main()
370: PANIC("main() returned")
371: /* NOTREACHED */
372:
373: /*
374: * proc_trampoline
375: * Call function in register a2 with a3 as an arg and then rei. Note
376: * that we restore the stack before calling, thus giving "a2" more stack.
377: * (for the case that, e.g., if curproc had a deeply nested call chain...)
378: * cpu_fork() also depends on struct frame being a second arg to the
379: * function in a2.
380: */
381: GLOBAL(proc_trampoline)
382: movl a3,sp@- | push function arg (curproc)
383: jbsr a2@ | call function
384: addql #4,sp | pop arg
385: movl sp@(FR_SP),a0 | usp to a0
386: movl a0,usp | setup user's stack pointer
387: movml sp@+,#0x7fff | restore all but sp
388: addql #8,sp | pop sp and stack adjust
389: jra _ASM_LABEL(rei) | all done
390:
391: /*
392: * Trap/interrupt vector routines
393: */
394: #include <m68k/m68k/trap_subr.s>
395:
396: .data
397: GLOBAL(m68k_fault_addr)
398: .long 0
399:
400: #if defined(M68040)
401: ENTRY_NOPROFILE(addrerr4060)
402: clrl sp@- | stack adjust count
403: moveml #0xFFFF,sp@- | save user registers
404: movl usp,a0 | save the user SP
405: movl a0,sp@(FR_SP) | in the savearea
406: movl sp@(FR_HW+8),sp@-
407: clrl sp@- | dummy code
408: movl #T_ADDRERR,sp@- | mark address error
409: jra _ASM_LABEL(faultstkadj) | and deal with it
410: #endif
411:
412: #if defined(M68040)
413: ENTRY_NOPROFILE(buserr40)
414: clrl sp@- | stack adjust count
415: moveml #0xFFFF,sp@- | save user registers
416: movl usp,a0 | save the user SP
417: movl a0,sp@(FR_SP) | in the savearea
418: movl sp@(FR_HW+20),d1 | get fault address
419: moveq #0,d0
420: movw sp@(FR_HW+12),d0 | get SSW
421: btst #11,d0 | check for mis-aligned
422: jeq Lbe1stpg | no skip
423: addl #3,d1 | get into next page
424: andl #PG_FRAME,d1 | and truncate
425: Lbe1stpg:
426: movl d1,sp@- | pass fault address.
427: movl d0,sp@- | pass SSW as code
428: btst #10,d0 | test ATC
429: jeq Lberr40 | it is a bus error
430: movl #T_MMUFLT,sp@- | show that we are an MMU fault
431: jra _ASM_LABEL(faultstkadj) | and deal with it
432: Lberr40:
433: tstl _C_LABEL(nofault) | catch bus error?
434: jeq Lisberr | no, handle as usual
435: movl sp@(FR_HW+8+20),_C_LABEL(m68k_fault_addr) | save fault addr
436: movl _C_LABEL(nofault),sp@- | yes,
437: jbsr _C_LABEL(longjmp) | longjmp(nofault)
438: /* NOTREACHED */
439: #endif
440:
441: ENTRY_NOPROFILE(busaddrerr2030)
442: #if !(defined(M68020) || defined(M68030))
443: jra _badtrap
444: #else
445: clrl sp@- | stack adjust count
446: moveml #0xFFFF,sp@- | save user registers
447: movl usp,a0 | save the user SP
448: movl a0,sp@(FR_SP) | in the savearea
449: moveq #0,d0
450: movw sp@(FR_HW+10),d0 | grab SSW for fault processing
451: btst #12,d0 | RB set?
452: jeq LbeX0 | no, test RC
453: bset #14,d0 | yes, must set FB
454: movw d0,sp@(FR_HW+10) | for hardware too
455: LbeX0:
456: btst #13,d0 | RC set?
457: jeq LbeX1 | no, skip
458: bset #15,d0 | yes, must set FC
459: movw d0,sp@(FR_HW+10) | for hardware too
460: LbeX1:
461: btst #8,d0 | data fault?
462: jeq Lbe0 | no, check for hard cases
463: movl sp@(FR_HW+16),d1 | fault address is as given in frame
464: jra Lbe10 | thats it
465: Lbe0:
466: btst #4,sp@(FR_HW+6) | long (type B) stack frame?
467: jne Lbe4 | yes, go handle
468: movl sp@(FR_HW+2),d1 | no, can use save PC
469: btst #14,d0 | FB set?
470: jeq Lbe3 | no, try FC
471: addql #4,d1 | yes, adjust address
472: jra Lbe10 | done
473: Lbe3:
474: btst #15,d0 | FC set?
475: jeq Lbe10 | no, done
476: addql #2,d1 | yes, adjust address
477: jra Lbe10 | done
478: Lbe4:
479: movl sp@(FR_HW+36),d1 | long format, use stage B address
480: btst #15,d0 | FC set?
481: jeq Lbe10 | no, all done
482: subql #2,d1 | yes, adjust address
483: Lbe10:
484: movl d1,sp@- | push fault VA
485: movl d0,sp@- | and padded SSW
486: movw sp@(FR_HW+8+6),d0 | get frame format/vector offset
487: andw #0x0FFF,d0 | clear out frame format
488: cmpw #12,d0 | address error vector?
489: jeq Lisaerr | yes, go to it
490: movl d1,a0 | fault address
491: movl sp@,d0 | function code from ssw
492: btst #8,d0 | data fault?
493: jne Lbe10a
494: movql #1,d0 | user program access FC
495: | (we dont separate data/program)
496: btst #5,sp@(FR_HW+8) | supervisor mode?
497: jeq Lbe10a | if no, done
498: movql #5,d0 | else supervisor program access
499: Lbe10a:
500: ptestr d0,a0@,#7 | do a table search
501: pmove psr,sp@ | save result
502: movb sp@,d1
503: btst #2,d1 | invalid (incl. limit viol. and berr)?
504: jeq Lmightnotbemerr | no -> wp check
505: btst #7,d1 | is it MMU table berr?
506: jne Lisberr1 | yes, needs not be fast.
507: Lismerr:
508: movl #T_MMUFLT,sp@- | show that we are an MMU fault
509: jra _ASM_LABEL(faultstkadj) | and deal with it
510: Lmightnotbemerr:
511: btst #3,d1 | write protect bit set?
512: jeq Lisberr1 | no: must be bus error
513: movl sp@,d0 | ssw into low word of d0
514: andw #0xc0,d0 | Write protect is set on page:
515: cmpw #0x40,d0 | was it read cycle?
516: jne Lismerr | no, was not WPE, must be MMU fault
517: jra Lisberr1 | real bus err needs not be fast.
518: Lisaerr:
519: movl #T_ADDRERR,sp@- | mark address error
520: jra _ASM_LABEL(faultstkadj) | and deal with it
521: Lisberr1:
522: clrw sp@ | re-clear pad word
523: tstl _C_LABEL(nofault) | catch bus error?
524: jeq Lisberr | no, handle as usual
525: movl sp@(FR_HW+8+16),_C_LABEL(m68k_fault_addr) | save fault addr
526: movl _C_LABEL(nofault),sp@- | yes,
527: jbsr _C_LABEL(longjmp) | longjmp(nofault)
528: /* NOTREACHED */
529: #endif
530: Lisberr: | also used by M68040/60
531: movl #T_BUSERR,sp@- | mark bus error
532: jra _ASM_LABEL(faultstkadj) | and deal with it
533:
534: /*
535: * FP exceptions.
536: */
537: ENTRY_NOPROFILE(fpfline)
538: #if defined(M68040)
539: cmpl #FPU_68040,_C_LABEL(fputype) | 68040 FPU?
540: jne Lfp_unimp | no, skip FPSP
541: cmpw #0x202c,sp@(6) | format type 2?
542: jne _C_LABEL(illinst) | no, not an FP emulation
543: Ldofp_unimp:
544: #ifdef FPSP
545: jmp _ASM_LABEL(fpsp_unimp) | yes, go handle it
546: #endif
547: Lfp_unimp:
548: #endif /* M68040 */
549: #ifdef FPU_EMULATE
550: clrl sp@- | stack adjust count
551: moveml #0xFFFF,sp@- | save registers
552: moveq #T_FPEMULI,d0 | denote as FP emulation trap
553: jra _ASM_LABEL(fault) | do it
554: #else
555: jra _C_LABEL(illinst)
556: #endif
557:
558: ENTRY_NOPROFILE(fpunsupp)
559: #if defined(M68040)
560: cmpl #FPU_68040,_C_LABEL(fputype) | 68040 FPU?
561: jne _C_LABEL(illinst) | no, treat as illinst
562: #ifdef FPSP
563: jmp _ASM_LABEL(fpsp_unsupp) | yes, go handle it
564: #endif
565: Lfp_unsupp:
566: #endif /* M68040 */
567: #ifdef FPU_EMULATE
568: clrl sp@- | stack adjust count
569: moveml #0xFFFF,sp@- | save registers
570: moveq #T_FPEMULD,d0 | denote as FP emulation trap
571: jra _ASM_LABEL(fault) | do it
572: #else
573: jra _C_LABEL(illinst)
574: #endif
575:
576: /*
577: * Handles all other FP coprocessor exceptions.
578: * Note that since some FP exceptions generate mid-instruction frames
579: * and may cause signal delivery, we need to test for stack adjustment
580: * after the trap call.
581: */
582: ENTRY_NOPROFILE(fpfault)
583: clrl sp@- | stack adjust count
584: moveml #0xFFFF,sp@- | save user registers
585: movl usp,a0 | and save
586: movl a0,sp@(FR_SP) | the user stack pointer
587: clrl sp@- | no VA arg
588: movl _C_LABEL(curpcb),a0 | current pcb
589: lea a0@(PCB_FPCTX),a0 | address of FP savearea
590: fsave a0@ | save state
591: #if defined(M68040) || defined(M68060)
592: /* always null state frame on 68040, 68060 */
593: cmpl #FPU_68040,_C_LABEL(fputype)
594: jle Lfptnull
595: #endif
596: tstb a0@ | null state frame?
597: jeq Lfptnull | yes, safe
598: clrw d0 | no, need to tweak BIU
599: movb a0@(1),d0 | get frame size
600: bset #3,a0@(0,d0:w) | set exc_pend bit of BIU
601: Lfptnull:
602: fmovem fpsr,sp@- | push fpsr as code argument
603: frestore a0@ | restore state
604: movl #T_FPERR,sp@- | push type arg
605: jra _ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
606:
607: /*
608: * Other exceptions only cause four and six word stack frame and require
609: * no post-trap stack adjustment.
610: */
611:
612: ENTRY_NOPROFILE(badtrap)
613: moveml #0xC0C0,sp@- | save scratch regs
614: movw sp@(22),sp@- | push exception vector info
615: clrw sp@-
616: movl sp@(22),sp@- | and PC
617: jbsr _C_LABEL(straytrap) | report
618: addql #8,sp | pop args
619: moveml sp@+,#0x0303 | restore regs
620: jra _ASM_LABEL(rei) | all done
621:
622: ENTRY_NOPROFILE(trap0)
623: clrl sp@- | pad SR to longword
624: moveml #0xFFFF,sp@- | save user registers
625: movl usp,a0 | save the user SP
626: movl a0,sp@(FR_SP) | in the savearea
627: movl d0,sp@- | push syscall number
628: jbsr _C_LABEL(syscall) | handle it
629: addql #4,sp | pop syscall arg
630: tstl _C_LABEL(astpending)
631: jne Lrei2
632: tstb _C_LABEL(ssir)
633: jeq Ltrap1
634: movw #SPL1,sr
635: tstb _C_LABEL(ssir)
636: jne Lsir1
637: Ltrap1:
638: movl sp@(FR_SP),a0 | grab and restore
639: movl a0,usp | user SP
640: moveml sp@+,#0x7FFF | restore most registers
641: addql #8,sp | pop SSP and align word
642: rte
643:
644: /*
645: * Trap 1 - sigreturn
646: */
647: ENTRY_NOPROFILE(trap1)
648: jra _ASM_LABEL(sigreturn)
649:
650: /*
651: * Trap 2 - trace trap
652: */
653: ENTRY_NOPROFILE(trap2)
654: jra _C_LABEL(trace)
655:
656: /*
657: * Trap 12 is the entry point for the cachectl "syscall" (both HP-UX & BSD)
658: * cachectl(command, addr, length)
659: * command in d0, addr in a1, length in d1
660: */
661: ENTRY_NOPROFILE(trap12)
662: movl d1,sp@- | push length
663: movl a1,sp@- | push addr
664: movl d0,sp@- | push command
665: movl CURPROC,sp@- | push proc pointer
666: jbsr _C_LABEL(cachectl) | do it
667: lea sp@(16),sp | pop args
668: jra _ASM_LABEL(rei) | all done
669:
670: /*
671: * Trace (single-step) trap. Kernel-mode is special.
672: * User mode traps are simply passed on to trap().
673: */
674: ENTRY_NOPROFILE(trace)
675: clrl sp@- | stack adjust count
676: moveml #0xFFFF,sp@-
677: moveq #T_TRACE,d0
678:
679: | Check PSW and see what happened.
680: | T=0 S=0 (should not happen)
681: | T=1 S=0 trace trap from user mode
682: | T=0 S=1 trace trap on a trap instruction
683: | T=1 S=1 trace trap from system mode (kernel breakpoint)
684:
685: movw sp@(FR_HW),d1 | get PSW
686: notw d1 | XXX no support for T0 on 680[234]0
687: andw #PSL_TS,d1 | from system mode (T=1, S=1)?
688: jeq Lkbrkpt | yes, kernel breakpoint
689: jra _ASM_LABEL(fault) | no, user-mode fault
690:
691: /*
692: * Trap 15 is used for:
693: * - GDB breakpoints (in user programs)
694: * - KGDB breakpoints (in the kernel)
695: * - trace traps for SUN binaries (not fully supported yet)
696: * User mode traps are simply passed to trap().
697: */
698: ENTRY_NOPROFILE(trap15)
699: clrl sp@- | stack adjust count
700: moveml #0xFFFF,sp@-
701: moveq #T_TRAP15,d0
702: movw sp@(FR_HW),d1 | get PSW
703: andw #PSL_S,d1 | from system mode?
704: jne Lkbrkpt | yes, kernel breakpoint
705: jra _ASM_LABEL(fault) | no, user-mode fault
706:
707: Lkbrkpt: | Kernel-mode breakpoint or trace trap. (d0=trap_type)
708: | Save the system sp rather than the user sp.
709: movw #PSL_HIGHIPL,sr | lock out interrupts
710: lea sp@(FR_SIZE),a6 | Save stack pointer
711: movl a6,sp@(FR_SP) | from before trap
712:
713: | If we are not on tmpstk switch to it.
714: | (so debugger can change the stack pointer)
715: movl a6,d1
716: cmpl #_ASM_LABEL(tmpstk),d1
717: jls Lbrkpt2 | already on tmpstk
718: | Copy frame to the temporary stack
719: movl sp,a0 | a0=src
720: lea _ASM_LABEL(tmpstk)-96,a1 | a1=dst
721: movl a1,sp | sp=new frame
722: moveq #FR_SIZE,d1
723: Lbrkpt1:
724: movl a0@+,a1@+
725: subql #4,d1
726: bgt Lbrkpt1
727:
728: Lbrkpt2:
729: | Call the trap handler for the kernel debugger.
730: | Do not call trap() to do it, so that we can
731: | set breakpoints in trap() if we want. We know
732: | the trap type is either T_TRACE or T_BREAKPOINT.
733: | If we have both DDB and KGDB, let KGDB see it first,
734: | because KGDB will just return 0 if not connected.
735: | Save args in d2, a2
736: movl d0,d2 | trap type
737: movl sp,a2 | frame ptr
738: #ifdef KGDB
739: | Let KGDB handle it (if connected)
740: movl a2,sp@- | push frame ptr
741: movl d2,sp@- | push trap type
742: jbsr _C_LABEL(kgdb_trap) | handle the trap
743: addql #8,sp | pop args
744: cmpl #0,d0 | did kgdb handle it?
745: jne Lbrkpt3 | yes, done
746: #endif
747: #ifdef DDB
748: | Let DDB handle it
749: movl a2,sp@- | push frame ptr
750: movl d2,sp@- | push trap type
751: jbsr _C_LABEL(kdb_trap) | handle the trap
752: addql #8,sp | pop args
753: #endif
754: Lbrkpt3:
755: | The stack pointer may have been modified, or
756: | data below it modified (by kgdb push call),
757: | so push the hardware frame at the current sp
758: | before restoring registers and returning.
759:
760: movl sp@(FR_SP),a0 | modified sp
761: lea sp@(FR_SIZE),a1 | end of our frame
762: movl a1@-,a0@- | copy 2 longs with
763: movl a1@-,a0@- | ... predecrement
764: movl a0,sp@(FR_SP) | sp = h/w frame
765: moveml sp@+,#0x7FFF | restore all but sp
766: movl sp@,sp | ... and sp
767: rte | all done
768:
769: /* Use common m68k sigreturn */
770: #include <m68k/m68k/sigreturn.s>
771:
772: /*
773: * Interrupt handlers.
774: *
775: * Most 68k-based Macintosh computers
776: *
777: * Level 0: Spurious: ignored
778: * Level 1: VIA1 (clock, ADB)
779: * Level 2: VIA2 (NuBus, SCSI)
780: * Level 3:
781: * Level 4: Serial (SCC)
782: * Level 5:
783: * Level 6:
784: * Level 7: Non-maskable: parity errors, RESET button
785: *
786: * On the Q700, Q900 and Q950 in "A/UX mode": this should become:
787: *
788: * Level 0: Spurious: ignored
789: * Level 1: Software
790: * Level 2: VIA2 (except ethernet, sound)
791: * Level 3: Ethernet
792: * Level 4: Serial (SCC)
793: * Level 5: Sound
794: * Level 6: VIA1
795: * Level 7: NMIs: parity errors, RESET button, YANCC error
796: *
797: * On the 660AV and 840AV:
798: *
799: * Level 0: Spurious: ignored
800: * Level 1: VIA1 (clock, ADB)
801: * Level 2: VIA2 (NuBus, SCSI)
802: * Level 3: PSC device interrupt
803: * Level 4: PSC DMA and serial
804: * Level 5: ???
805: * Level 6: ???
806: * Level 7: NMIs: parity errors?, RESET button
807: */
808:
809: #define INTERRUPT_SAVEREG moveml #0xC0C0,sp@-
810: #define INTERRUPT_RESTOREREG moveml sp@+,#0x0303
811:
812: ENTRY_NOPROFILE(spurintr)
813: addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS
814: jra _ASM_LABEL(rei)
815:
816: ENTRY_NOPROFILE(intrhand) /* levels 3 through 6 */
817: INTERRUPT_SAVEREG
818: movw sp@(22),sp@- | push exception vector info
819: clrw sp@-
820: jbsr _C_LABEL(intr_dispatch) | call dispatch routine
821: addql #4,sp
822: INTERRUPT_RESTOREREG
823: jra _ASM_LABEL(rei) | all done
824:
825: ENTRY_NOPROFILE(lev7intr)
826: clrl sp@- | pad SR to longword
827: moveml #0xFFFF,sp@- | save registers
828: movl usp,a0 | and save
829: movl a0,sp@(FR_SP) | the user stack pointer
830: jbsr _C_LABEL(nmihand) | call handler
831: movl sp@(FR_SP),a0 | restore
832: movl a0,usp | user SP
833: moveml sp@+,#0x7FFF | and remaining registers
834: addql #8,sp | pop SSP and align word
835: jra _ASM_LABEL(rei)
836:
837: /*
838: * We could tweak rtclock_intr and gain 12 cycles on the 020 and 030 by
839: * saving the status register directly to the stack, but this would lose
840: * badly on the 040. Aligning the stack takes 10 more cycles than this
841: * code does, so it's a good compromise.
842: */
843: ENTRY_NOPROFILE(rtclock_intr)
844: movl d2,sp@- | save d2
845: movw sr,d2 | save SPL
846: movw _C_LABEL(mac68k_clockipl),sr | raise SPL to splclock()
847: movl a6@,a1 | unwind to frame in intr_dispatch
848: lea a1@(28),a1 | push pointer to interrupt frame
849: movl a1,sp@- | 28 = 16 for regs in intrhand,
850: | + 4 for args to intr_dispatch
851: | + 4 for return address to intrhand
852: | + 4 for value of A6
853: jbsr _C_LABEL(hardclock) | call generic clock int routine
854: addql #4,sp | pop params
855: movw d2,sr | restore SPL
856: movl sp@+,d2 | restore d2
857: movl #1,d0 | clock taken care of
858: rts | go back from whence we came
859:
860: /*
861: * Emulation of VAX REI instruction.
862: *
863: * This code deals with checking for and servicing ASTs
864: * (profiling, scheduling) and software interrupts (network, softclock).
865: * We check for ASTs first, just like the VAX. To avoid excess overhead
866: * the T_ASTFLT handling code will also check for software interrupts so we
867: * do not have to do it here. After identifing that we need an AST we
868: * drop the IPL to allow device interrupts.
869: *
870: * This code is complicated by the fact that sendsig may have been called
871: * necessitating a stack cleanup.
872: */
873:
874: BSS(ssir,1)
875:
876: ASENTRY_NOPROFILE(rei)
877: tstl _C_LABEL(astpending) | AST pending?
878: jeq Lchksir | no, go check for SIR
879: Lrei1:
880: btst #5,sp@ | yes, are we returning to user mode?
881: jne Lchksir | no, go check for SIR
882: movw #PSL_LOWIPL,sr | lower SPL
883: clrl sp@- | stack adjust
884: moveml #0xFFFF,sp@- | save all registers
885: movl usp,a1 | including
886: movl a1,sp@(FR_SP) | the users SP
887: Lrei2:
888: clrl sp@- | VA == none
889: clrl sp@- | code == none
890: movl #T_ASTFLT,sp@- | type == async system trap
891: jbsr _C_LABEL(trap) | go handle it
892: lea sp@(12),sp | pop value args
893: movl sp@(FR_SP),a0 | restore user SP
894: movl a0,usp | from save area
895: movw sp@(FR_ADJ),d0 | need to adjust stack?
896: jne Laststkadj | yes, go to it
897: moveml sp@+,#0x7FFF | no, restore most user regs
898: addql #8,sp | toss SP and stack adjust
899: rte | and do real RTE
900: Laststkadj:
901: lea sp@(FR_HW),a1 | pointer to HW frame
902: addql #8,a1 | source pointer
903: movl a1,a0 | source
904: addw d0,a0 | + hole size = dest pointer
905: movl a1@-,a0@- | copy
906: movl a1@-,a0@- | 8 bytes
907: movl a0,sp@(FR_SP) | new SSP
908: moveml sp@+,#0x7FFF | restore user registers
909: movl sp@,sp | and our SP
910: rte | and do real RTE
911: Lchksir:
912: tstb _C_LABEL(ssir) | SIR pending?
913: jeq Ldorte | no, all done
914: movl d0,sp@- | need a scratch register
915: movw sp@(4),d0 | get SR
916: andw #PSL_IPL7,d0 | mask all but IPL
917: jne Lnosir | came from interrupt, no can do
918: movl sp@+,d0 | restore scratch register
919: Lgotsir:
920: movw #SPL1,sr | prevent others from servicing int
921: tstb _C_LABEL(ssir) | too late?
922: jeq Ldorte | yes, oh well...
923: clrl sp@- | stack adjust
924: moveml #0xFFFF,sp@- | save all registers
925: movl usp,a1 | including
926: movl a1,sp@(FR_SP) | the users SP
927: Lsir1:
928: clrl sp@- | VA == none
929: clrl sp@- | code == none
930: movl #T_SSIR,sp@- | type == software interrupt
931: jbsr _C_LABEL(trap) | go handle it
932: lea sp@(12),sp | pop value args
933: movl sp@(FR_SP),a0 | restore
934: movl a0,usp | user SP
935: moveml sp@+,#0x7FFF | and all remaining registers
936: addql #8,sp | pop SP and stack adjust
937: rte
938: Lnosir:
939: movl sp@+,d0 | restore scratch register
940: Ldorte:
941: rte | real return
942:
943: /*
944: * Use common m68k sigcode.
945: */
946: #include <m68k/m68k/sigcode.s>
947:
948: /*
949: * Primitives
950: */
951:
952: /*
953: * Use common m68k support routines.
954: */
955: #include <m68k/m68k/support.s>
956:
957: /*
958: * Use common m68k process manipulation routines.
959: */
960: #include <m68k/m68k/proc_subr.s>
961:
962: .data
963: GLOBAL(curpcb)
964: .long 0
965:
966: ASBSS(nullpcb,SIZEOF_PCB)
967:
968: /*
969: * At exit of a process, do a switch for the last time.
970: * Switch to a safe stack and PCB, and select a new process to run. The
971: * old stack and u-area will be freed by the reaper.
972: */
973: ENTRY(switch_exit)
974: movl sp@(4),a0
975: /* save state into garbage pcb */
976: movl #_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
977: lea _ASM_LABEL(tmpstk),sp | goto a tmp stack
978:
979: /* Schedule the vmspace and stack to be freed. */
980: movl a0,sp@- | exit2(p)
981: jbsr _C_LABEL(exit2)
982: lea sp@(4),sp | pop args
983:
984: jra _C_LABEL(cpu_switch)
985:
986: /*
987: * When no processes are on the runq, Swtch branches to Idle
988: * to wait for something to come ready.
989: */
990: ASENTRY_NOPROFILE(Idle)
991: stop #PSL_LOWIPL
992: movw #PSL_HIGHIPL,sr
993: movl _C_LABEL(whichqs),d0
994: jeq _ASM_LABEL(Idle)
995: jra Lsw1
996:
997: Lbadsw:
998: PANIC("switch")
999: /*NOTREACHED*/
1000:
1001: /*
1002: * cpu_switch()
1003: *
1004: * NOTE: On the mc68851 (318/319/330) we attempt to avoid flushing the
1005: * entire ATC. The effort involved in selective flushing may not be
1006: * worth it, maybe we should just flush the whole thing?
1007: *
1008: * NOTE 2: With the new VM layout we now no longer know if an inactive
1009: * user's PTEs have been changed (formerly denoted by the SPTECHG p_flag
1010: * bit). For now, we just always flush the full ATC.
1011: */
1012: ENTRY(cpu_switch)
1013: movl _C_LABEL(curpcb),a0 | current pcb
1014: movw sr,a0@(PCB_PS) | save sr before changing ipl
1015: #ifdef notyet
1016: movl CURPROC,sp@- | remember last proc running
1017: #endif
1018: clrl CURPROC
1019:
1020: /*
1021: * Find the highest-priority queue that isn't empty,
1022: * then take the first proc from that queue.
1023: */
1024: movw #PSL_HIGHIPL,sr | lock out interrupts
1025: movl _C_LABEL(whichqs),d0
1026: jeq _ASM_LABEL(Idle)
1027: Lsw1:
1028: movl d0,d1
1029: negl d0
1030: andl d1,d0
1031: bfffo d0{#0:#32},d1
1032: eorib #31,d1
1033:
1034: movl d1,d0
1035: lslb #3,d1 | convert queue number to index
1036: addl #_C_LABEL(qs),d1 | locate queue (q)
1037: movl d1,a1
1038: movl a1@(P_FORW),a0 | p = q->p_forw
1039: cmpal d1,a0 | anyone on queue?
1040: jeq Lbadsw | no, panic
1041: movl a0@(P_FORW),a1@(P_FORW) | q->p_forw = p->p_forw
1042: movl a0@(P_FORW),a1 | n = p->p_forw
1043: movl d1,a1@(P_BACK) | n->p_back = q
1044: cmpal d1,a1 | anyone left on queue?
1045: jne Lsw2 | yes, skip
1046: movl _C_LABEL(whichqs),d1
1047: bclr d0,d1 | no, clear bit
1048: movl d1,_C_LABEL(whichqs)
1049: Lsw2:
1050: movl a0,CURPROC
1051: clrl _C_LABEL(want_resched)
1052: #ifdef notyet
1053: movl sp@+,a1
1054: cmpl a0,a1 | switching to same proc?
1055: jeq Lswdone | yes, skip save and restore
1056: #endif
1057: /*
1058: * Save state of previous process in its pcb.
1059: */
1060: movl _C_LABEL(curpcb),a1
1061: moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
1062: movl usp,a2 | grab USP (a2 has been saved)
1063: movl a2,a1@(PCB_USP) | and save it
1064:
1065: tstl _C_LABEL(fputype) | Do we have an FPU?
1066: jeq Lswnofpsave | No Then don't attempt save.
1067: lea a1@(PCB_FPCTX),a2 | pointer to FP save area
1068: fsave a2@ | save FP state
1069: tstb a2@ | null state frame?
1070: jeq Lswnofpsave | yes, all done
1071: fmovem fp0-fp7,a2@(FPF_REGS) | save FP general registers
1072: fmovem fpcr/fpsr/fpi,a2@(FPF_FPCR) | save FP control registers
1073: Lswnofpsave:
1074:
1075: #ifdef DIAGNOSTIC
1076: tstl a0@(P_WCHAN)
1077: jne Lbadsw
1078: cmpb #SRUN,a0@(P_STAT)
1079: jne Lbadsw
1080: #endif
1081: movb #SONPROC,a0@(P_STAT)
1082: clrl a0@(P_BACK) | clear back link
1083: movl a0@(P_ADDR),a1 | get p_addr
1084: movl a1,_C_LABEL(curpcb)
1085:
1086: /*
1087: * Activate the process's address space.
1088: * XXX Should remember the last USTP value loaded, and call this
1089: * XXX only if it has changed.
1090: */
1091: pea a0@ | push proc
1092: jbsr _C_LABEL(pmap_activate) | pmap_activate(p)
1093: addql #4,sp
1094: movl _C_LABEL(curpcb),a1 | restore p_addr
1095:
1096: lea _ASM_LABEL(tmpstk),sp | now goto a tmp stack for NMI
1097:
1098: moveml a1@(PCB_REGS),#0xFCFC | and registers
1099: movl a1@(PCB_USP),a0
1100: movl a0,usp | and USP
1101:
1102: tstl _C_LABEL(fputype) | If we don't have an FPU,
1103: jeq Lnofprest | don't try to restore it.
1104: lea a1@(PCB_FPCTX),a0 | pointer to FP save area
1105: tstb a0@ | null state frame?
1106: jeq Lresfprest | yes, easy
1107: #if defined(M68040)
1108: #if defined(M68020) || defined(M68030)
1109: cmpl #MMU_68040,_C_LABEL(mmutype) | 68040?
1110: jne Lresnot040 | no, skip
1111: #endif
1112: clrl sp@- | yes...
1113: frestore sp@+ | ...magic!
1114: Lresnot040:
1115: #endif
1116: fmovem a0@(FPF_FPCR),fpcr/fpsr/fpi | restore FP control registers
1117: fmovem a0@(FPF_REGS),fp0-fp7 | restore FP general registers
1118: Lresfprest:
1119: frestore a0@ | restore state
1120:
1121: Lnofprest:
1122: movw a1@(PCB_PS),sr | no, restore PS
1123: moveq #1,d0 | return 1 (for alternate returns)
1124: rts
1125:
1126: /*
1127: * savectx(pcb)
1128: * Update pcb, saving current processor state.
1129: */
1130: ENTRY(savectx)
1131: movl sp@(4),a1
1132: movw sr,a1@(PCB_PS)
1133: movl usp,a0 | grab USP
1134: movl a0,a1@(PCB_USP) | and save it
1135: moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
1136:
1137: tstl _C_LABEL(fputype) | Do we have FPU?
1138: jeq Lsvnofpsave | No? Then don't save state.
1139: lea a1@(PCB_FPCTX),a0 | pointer to FP save area
1140: fsave a0@ | save FP state
1141: tstb a0@ | null state frame?
1142: jeq Lsvnofpsave | yes, all done
1143: fmovem fp0-fp7,a0@(FPF_REGS) | save FP general registers
1144: fmovem fpcr/fpsr/fpi,a0@(FPF_FPCR) | save FP control registers
1145: Lsvnofpsave:
1146: moveq #0,d0 | return 0
1147: rts
1148:
1149: #if defined(M68040)
1150: ENTRY(suline)
1151: movl sp@(4),a0 | address to write
1152: movl _C_LABEL(curpcb),a1 | current pcb
1153: movl #Lslerr,a1@(PCB_ONFAULT) | where to return to on a fault
1154: movl sp@(8),a1 | address of line
1155: movl a1@+,d0 | get lword
1156: movsl d0,a0@+ | put lword
1157: nop | sync
1158: movl a1@+,d0 | get lword
1159: movsl d0,a0@+ | put lword
1160: nop | sync
1161: movl a1@+,d0 | get lword
1162: movsl d0,a0@+ | put lword
1163: nop | sync
1164: movl a1@+,d0 | get lword
1165: movsl d0,a0@+ | put lword
1166: nop | sync
1167: moveq #0,d0 | indicate no fault
1168: jra Lsldone
1169: Lslerr:
1170: moveq #-1,d0
1171: Lsldone:
1172: movl _C_LABEL(curpcb),a1 | current pcb
1173: clrl a1@(PCB_ONFAULT) | clear fault address
1174: rts
1175: #endif
1176:
1177: /*
1178: * Invalidate entire TLB.
1179: */
1180: ASENTRY_NOPROFILE(TBIA)
1181: #if defined(M68040)
1182: cmpl #MMU_68040,_C_LABEL(mmutype) | 68040?
1183: jne Lmotommu3 | no, skip
1184: .word 0xf518 | yes, pflusha
1185: rts
1186: Lmotommu3:
1187: #endif
1188: pflusha
1189: #if defined(M68020)
1190: tstl _C_LABEL(mmutype)
1191: jgt Ltbia851 | 68851 implies no d-cache
1192: #endif
1193: movl #DC_CLEAR,d0
1194: movc d0,cacr | invalidate on-chip d-cache
1195: Ltbia851:
1196: rts
1197:
1198: /*
1199: * Invalidate any TLB entry for given VA (TB Invalidate Single)
1200: */
1201: ENTRY(TBIS)
1202: movl sp@(4),a0
1203: #if defined(M68040)
1204: cmpl #MMU_68040,_C_LABEL(mmutype) | 68040?
1205: jne Lmotommu4 | no, skip
1206: movc dfc,d1
1207: moveq #FC_USERD,d0 | user space
1208: movc d0,dfc
1209: .word 0xf508 | pflush a0@
1210: moveq #FC_SUPERD,d0 | supervisor space
1211: movc d0,dfc
1212: .word 0xf508 | pflush a0@
1213: movc d1,dfc
1214: rts
1215: Lmotommu4:
1216: #endif
1217: #if defined(M68020)
1218: tstl _C_LABEL(mmutype)
1219: jle Ltbis851
1220: pflushs #0,#0,a0@ | flush address from both sides
1221: rts
1222: Ltbis851:
1223: #endif
1224: pflush #0,#0,a0@ | flush address from both sides
1225: movl #DC_CLEAR,d0
1226: movc d0,cacr | invalidate on-chip data cache
1227: rts
1228:
1229: /*
1230: * Invalidate supervisor side of TLB
1231: */
1232: ENTRY(TBIAS)
1233: #if defined(M68040)
1234: cmpl #MMU_68040,_C_LABEL(mmutype) | 68040?
1235: jne Lmotommu5 | no, skip
1236: .word 0xf518 | yes, pflusha (for now) XXX
1237: rts
1238: Lmotommu5:
1239: #endif
1240: #if defined(M68020)
1241: tstl _C_LABEL(mmutype)
1242: jle Ltbias851
1243: pflushs #4,#4 | flush supervisor TLB entries
1244: rts
1245: Ltbias851:
1246: #endif
1247: pflush #4,#4 | flush supervisor TLB entries
1248: movl #DC_CLEAR,d0
1249: movc d0,cacr | invalidate on-chip d-cache
1250: rts
1251:
1252: #if defined(COMPAT_HPUX)
1253: /*
1254: * Invalidate user side of TLB
1255: */
1256: ENTRY(TBIAU)
1257: #if defined(M68040)
1258: cmpl #MMU_68040,_C_LABEL(mmutype) | 68040?
1259: jne Lmotommu6 | no, skip
1260: .word 0xf518 | yes, pflusha (for now) XXX
1261: Lmotommu6:
1262: #endif
1263: #if defined(M68020)
1264: tstl _C_LABEL(mmutype)
1265: jle Ltbiau851
1266: pflush #0,#4 | flush user TLB entries
1267: rts
1268: Ltbiau851:
1269: #endif
1270: pflush #0,#4 | flush user TLB entries
1271: movl #DC_CLEAR,d0
1272: movc d0,cacr | invalidate on-chip d-cache
1273: rts
1274: #endif /* COMPAT_HPUX */
1275:
1276: /*
1277: * Invalidate instruction cache
1278: */
1279: ENTRY(ICIA)
1280: #if defined(M68040)
1281: cmpl #MMU_68040,_C_LABEL(mmutype) | 68040?
1282: jne Lmotommu7 | no, skip
1283: .word 0xf498 | cinva ic
1284: rts
1285: Lmotommu7:
1286: #endif
1287: movl #IC_CLEAR,d0
1288: movc d0,cacr | invalidate i-cache
1289: rts
1290:
1291: /*
1292: * Invalidate data cache.
1293: *
1294: * NOTE: we do not flush 68030 on-chip cache as there are no aliasing
1295: * problems with DC_WA. The only cases we have to worry about are context
1296: * switch and TLB changes, both of which are handled "in-line" in resume
1297: * and TBI*.
1298: * Because of this, since there is no way on 68040 and 68060 to flush
1299: * user and supervisor modes specfically, DCIS and DCIU are the same entry
1300: * point as DCIA.
1301: */
1302: ENTRY(DCIA)
1303: ENTRY(DCIS)
1304: ENTRY(DCIU)
1305: #if defined(M68040)
1306: cmpl #MMU_68040,_C_LABEL(mmutype) | 68040 or 68060?
1307: jgt 1f | no, skip
1308: .word 0xf478 | cpusha dc
1309: 1:
1310: #endif
1311: rts
1312:
1313: #ifdef M68040
1314: ENTRY(ICPA)
1315: .word 0xf498 | cinva ic
1316: rts
1317: ENTRY(DCFA)
1318: .word 0xf478 | cpusha dc
1319: rts
1320: ENTRY(ICPL) /* invalidate instruction physical cache line */
1321: movl sp@(4),a0 | address
1322: .word 0xf488 | cinvl ic,a0@
1323: rts
1324: ENTRY(ICPP) /* invalidate instruction physical cache page */
1325: movl sp@(4),a0 | address
1326: .word 0xf490 | cinvp ic,a0@
1327: rts
1328: ENTRY(DCPL) /* invalidate data physical cache line */
1329: movl sp@(4),a0 | address
1330: .word 0xf448 | cinvl dc,a0@
1331: rts
1332: ENTRY(DCPP) /* invalidate data physical cache page */
1333: movl sp@(4),a0 | address
1334: .word 0xf450 | cinvp dc,a0@
1335: rts
1336: ENTRY(DCFL) /* data cache flush line */
1337: movl sp@(4),a0 | address
1338: .word 0xf468 | cpushl dc,a0@
1339: rts
1340: ENTRY(DCFP) /* data cache flush page */
1341: movl sp@(4),a0 | address
1342: .word 0xf470 | cpushp dc,a0@
1343: rts
1344: #endif /* M68040 */
1345:
1346: ENTRY(PCIA)
1347: #if defined(M68040)
1348: cmpl #MMU_68040,_C_LABEL(mmutype) | 68040?
1349: jne LmotommuB | no, skip
1350: .word 0xf478 | cpusha dc
1351: rts
1352: LmotommuB:
1353: #endif
1354: movl #DC_CLEAR,d0
1355: movc d0,cacr | invalidate on-chip d-cache
1356: rts
1357:
1358: ENTRY_NOPROFILE(getsfc)
1359: movc sfc,d0
1360: rts
1361:
1362: ENTRY_NOPROFILE(getdfc)
1363: movc dfc,d0
1364: rts
1365:
1366: /*
1367: * Load a new user segment table pointer.
1368: */
1369: ENTRY(loadustp)
1370: movl sp@(4),d0 | new USTP
1371: moveq #PGSHIFT,d1
1372: lsll d1,d0 | convert to addr
1373: #if defined(M68040)
1374: cmpl #MMU_68040,_C_LABEL(mmutype) | 68040?
1375: jne LmotommuC | no, skip
1376: .word 0xf518 | pflusha
1377: .long 0x4e7b0806 | movec d0, URP
1378: rts
1379: LmotommuC:
1380: #endif
1381: pflusha | flush entire TLB
1382: lea _C_LABEL(protorp),a0 | CRP prototype
1383: movl d0,a0@(4) | stash USTP
1384: pmove a0@,crp | load root pointer
1385: movl #CACHE_CLR,d0
1386: movc d0,cacr | invalidate cache(s)
1387: rts
1388:
1389: /*
1390: * Set processor priority level calls. Most are implemented with
1391: * inline asm expansions. However, spl0 requires special handling
1392: * as we need to check for our emulated software interrupts.
1393: */
1394:
1395: ENTRY(spl0)
1396: moveq #0,d0
1397: movw sr,d0 | get old SR for return
1398: movw #PSL_LOWIPL,sr | restore new SR
1399: tstb _C_LABEL(ssir) | software interrupt pending?
1400: jeq Lspldone | no, all done
1401: subql #4,sp | make room for RTE frame
1402: movl sp@(4),sp@(2) | position return address
1403: clrw sp@(6) | set frame type 0
1404: movw #PSL_LOWIPL,sp@ | and new SR
1405: jra Lgotsir | go handle it
1406: Lspldone:
1407: rts
1408:
1409: /*
1410: * Save and restore 68881 state.
1411: * Pretty awful looking since our assembler does not
1412: * recognize FP mnemonics.
1413: */
1414: ENTRY(m68881_save)
1415: movl sp@(4),a0 | save area pointer
1416: fsave a0@ | save state
1417: tstb a0@ | null state frame?
1418: jeq Lm68881sdone | yes, all done
1419: fmovem fp0-fp7,a0@(FPF_REGS) | save FP general registers
1420: fmovem fpcr/fpsr/fpi,a0@(FPF_FPCR) | save FP control registers
1421: Lm68881sdone:
1422: rts
1423:
1424: ENTRY(m68881_restore)
1425: movl sp@(4),a0 | save area pointer
1426: tstb a0@ | null state frame?
1427: jeq Lm68881rdone | yes, easy
1428: fmovem a0@(FPF_FPCR),fpcr/fpsr/fpi | restore FP control registers
1429: fmovem a0@(FPF_REGS),fp0-fp7 | restore FP general registers
1430: Lm68881rdone:
1431: frestore a0@ | restore state
1432: rts
1433:
1434: /*
1435: * delay() - delay for a specified number of microseconds
1436: * _delay() - calibrator helper for delay()
1437: *
1438: * Notice that delay_factor is scaled up by a factor of 128 to avoid loss
1439: * of precision for small delays. As a result of this we need to avoid
1440: * overflow.
1441: *
1442: * The branch target for the loops must be aligned on a half-line (8-byte)
1443: * boundary to minimize cache effects. This guarantees both that there
1444: * will be no prefetch stalls due to cache line burst operations and that
1445: * the loops will run from a single cache half-line.
1446: */
1447: .balign 8 | align to half-line boundary
1448:
1449: ALTENTRY(_delay, _delay)
1450: ENTRY(delay)
1451: movl sp@(4),d0 | get microseconds to delay
1452: cmpl #0x40000,d0 | is it a "large" delay?
1453: bls Ldelayshort | no, normal calculation
1454: movql #0x7f,d1 | adjust for scaled multipler (to
1455: addl d1,d0 | avoid overflow)
1456: lsrl #7,d0
1457: mulul _C_LABEL(delay_factor),d0 | calculate number of loop iterations
1458: bra Ldelaysetup | go do it!
1459: Ldelayshort:
1460: mulul _C_LABEL(delay_factor),d0 | calculate number of loop iterations
1461: lsrl #7,d0 | adjust for scaled multiplier
1462: Ldelaysetup:
1463: jeq Ldelayexit | bail out if nothing to do
1464: movql #0,d1 | put bits 15-0 in d1 for the
1465: movw d0,d1 | inner loop, and move bits
1466: movw #0,d0 | 31-16 to the low-order word
1467: subql #1,d1 | of d0 for the outer loop
1468: swap d0
1469: Ldelay:
1470: tstl _C_LABEL(delay_flag) | this never changes for delay()!
1471: dbeq d1,Ldelay | (used only for timing purposes)
1472: dbeq d0,Ldelay
1473: addqw #1,d1 | adjust end count and
1474: swap d0 | return the longword result
1475: orl d1,d0
1476: Ldelayexit:
1477: rts
1478:
1479: /*
1480: * Handle the nitty-gritty of rebooting the machine.
1481: * Basically we just turn off the MMU and jump to the appropriate ROM routine.
1482: * Note that we must be running in an address range that is mapped one-to-one
1483: * logical to physical so that the PC is still valid immediately after the MMU
1484: * is turned off. We have conveniently mapped the last page of physical
1485: * memory this way.
1486: */
1487: ENTRY_NOPROFILE(doboot)
1488: #if defined(M68040)
1489: cmpl #MMU_68040,_C_LABEL(mmutype) | 68040?
1490: jeq Lnocache5 | yes, skip
1491: #endif
1492: movl #CACHE_OFF,d0
1493: movc d0,cacr | disable on-chip cache(s)
1494: Lnocache5:
1495: movl _C_LABEL(maxaddr),a0 | last page of physical memory
1496: lea Lbootcode,a1 | start of boot code
1497: lea Lebootcode,a3 | end of boot code
1498: Lbootcopy:
1499: movw a1@+,a0@+ | copy a word
1500: cmpl a3,a1 | done yet?
1501: jcs Lbootcopy | no, keep going
1502: #if defined(M68040)
1503: cmpl #MMU_68040,_C_LABEL(mmutype) | 68040?
1504: jne LmotommuE | no, skip
1505: .word 0xf4f8 | cpusha bc
1506: LmotommuE:
1507: #endif
1508: movl _C_LABEL(maxaddr),a0
1509: jmp a0@ | jump to last page
1510:
1511: Lbootcode:
1512: lea a0@(0x800),sp | physical SP in case of NMI
1513: movl _C_LABEL(MacOSROMBase),a1 | Load MacOS ROMBase
1514:
1515: #if defined(M68040)
1516: cmpl #MMU_68040,_C_LABEL(mmutype) | 68040?
1517: jne LmotommuF | no, skip
1518: movl #0,d0
1519: movc d0,cacr | caches off
1520: .long 0x4e7b0003 | movc d0,tc (disable MMU)
1521: jra Ldoboot1
1522: LmotommuF:
1523: #endif
1524: movl #0,a3@ | value for pmove to TC (turn off MMU)
1525: pmove a3@,tc | disable MMU
1526:
1527: Ldoboot1:
1528: lea a1@(0x90),a1 | offset of ROM reset routine
1529: jmp a1@ | and jump to ROM to reset machine
1530: Lebootcode:
1531:
1532: /*
1533: * u_long ptest040(caddr_t addr, u_int fc);
1534: *
1535: * ptest040() does an 040 PTESTR (addr) and returns the 040 MMUSR iff
1536: * translation is enabled. This allows us to find the physical address
1537: * corresponding to a MacOS logical address for get_physical().
1538: * sar 01-oct-1996
1539: */
1540: ENTRY_NOPROFILE(ptest040)
1541: #if defined(M68040)
1542: .long 0x4e7a0003 | movc tc,d0
1543: andw #0x8000,d0
1544: jeq Lget_phys1 | MMU is disabled
1545: movc dfc,d1 | Save DFC
1546: movl sp@(8),d0 | Set FC for ptestr
1547: movc d0,dfc
1548: movl sp@(4),a0 | logical address to look up
1549: .word 0xf568 | ptestr (a0)
1550: .long 0x4e7a0805 | movc mmusr,d0
1551: movc d1,dfc | Restore DFC
1552: rts
1553: Lget_phys1:
1554: #endif
1555: movql #0,d0 | return failure
1556: rts
1557:
1558: /*
1559: * LAK: (7/24/94) This routine was added so that the
1560: * C routine that runs at startup can figure out how MacOS
1561: * had mapped memory. We want to keep the same mapping so
1562: * that when we set our MMU pointer, the PC doesn't point
1563: * in the middle of nowhere.
1564: *
1565: * long get_pte(void *addr, unsigned long pte[2], unsigned short *psr)
1566: *
1567: * Takes "addr" and looks it up in the current MMU pages. Puts
1568: * the PTE of that address in "pte" and the result of the
1569: * search in "psr". "pte" should be 2 longs in case it is
1570: * a long-format entry.
1571: *
1572: * One possible problem here is that setting the TT register
1573: * may screw something up if we access user data space in a
1574: * called function or in an interrupt service routine.
1575: *
1576: * Returns -1 on error, 0 if pte is a short-format pte, or
1577: * 1 if pte is a long-format pte.
1578: *
1579: * Be sure to only call this routine if the MMU is enabled. This
1580: * routine is probably more general than it needs to be -- it
1581: * could simply return the physical address (replacing
1582: * get_physical() in machdep).
1583: *
1584: * "gas" does not understand the tt0 register, so we must hand-
1585: * assemble the instructions.
1586: */
1587: ENTRY_NOPROFILE(get_pte)
1588: subql #4,sp | make temporary space
1589:
1590: lea _ASM_LABEL(longscratch),a0
1591: movl #0x00ff8710,a0@ | Set up FC 1 r/w access
1592: .long 0xf0100800 | pmove a0@,tt0
1593:
1594: movl sp@(8),a0 | logical address to look up
1595: movl #0,a1 | clear in case of failure
1596: ptestr #FC_USERD,a0@,#7,a1 | search for logical address
1597: pmove psr,sp@ | store processor status register
1598: movw sp@,d1
1599: movl sp@(16),a0 | where to store the psr
1600: movw d1,a0@ | send back to caller
1601: andw #0xc400,d1 | if bus error, exceeded limit, or invalid
1602: jne get_pte_fail1 | leave now
1603: tstl a1 | check address we got back
1604: jeq get_pte_fail2 | if 0, then was not set -- fail
1605:
1606: movl a1,d0
1607: movl d0,_ASM_LABEL(pte_tmp) | save for later
1608:
1609: | send first long back to user
1610: movl sp@(12),a0 | address of where to put pte
1611: movsl a1@,d0 |
1612: movl d0,a0@ | first long
1613:
1614: andl #3,d0 | dt bits of pte
1615: cmpl #1,d0 | should be 1 if page descriptor
1616: jne get_pte_fail3 | if not, get out now
1617:
1618: movl sp@(16),a0 | addr of stored psr
1619: movw a0@,d0 | get psr again
1620: andw #7,d0 | number of levels it found
1621: addw #-1,d0 | find previous level
1622: movl sp@(8),a0 | logical address to look up
1623: movl #0,a1 | clear in case of failure
1624:
1625: cmpl #0,d0
1626: jeq pte_level_zero
1627: cmpl #1,d0
1628: jeq pte_level_one
1629: cmpl #2,d0
1630: jeq pte_level_two
1631: cmpl #3,d0
1632: jeq pte_level_three
1633: cmpl #4,d0
1634: jeq pte_level_four
1635: cmpl #5,d0
1636: jeq pte_level_five
1637: cmpl #6,d0
1638: jeq pte_level_six
1639: jra get_pte_fail4 | really should have been one of these...
1640:
1641: pte_level_zero:
1642: | must get CRP to get length of entries at first level
1643: lea _ASM_LABEL(longscratch),a0 | space for two longs
1644: pmove crp,a0@ | save root pointer
1645: movl a0@,d0 | load high long
1646: jra pte_got_parent
1647: pte_level_one:
1648: ptestr #FC_USERD,a0@,#1,a1 | search for logical address
1649: pmove psr,sp@ | store processor status register
1650: movw sp@,d1
1651: jra pte_got_it
1652: pte_level_two:
1653: ptestr #FC_USERD,a0@,#2,a1 | search for logical address
1654: pmove psr,sp@ | store processor status register
1655: movw sp@,d1
1656: jra pte_got_it
1657: pte_level_three:
1658: ptestr #FC_USERD,a0@,#3,a1 | search for logical address
1659: pmove psr,sp@ | store processor status register
1660: movw sp@,d1
1661: jra pte_got_it
1662: pte_level_four:
1663: ptestr #FC_USERD,a0@,#4,a1 | search for logical address
1664: pmove psr,sp@ | store processor status register
1665: movw sp@,d1
1666: jra pte_got_it
1667: pte_level_five:
1668: ptestr #FC_USERD,a0@,#5,a1 | search for logical address
1669: pmove psr,sp@ | store processor status register
1670: movw sp@,d1
1671: jra pte_got_it
1672: pte_level_six:
1673: ptestr #FC_USERD,a0@,#6,a1 | search for logical address
1674: pmove psr,sp@ | store processor status register
1675: movw sp@,d1
1676:
1677: pte_got_it:
1678: andw #0xc400,d1 | if bus error, exceeded limit, or invalid
1679: jne get_pte_fail5 | leave now
1680: tstl a1 | check address we got back
1681: jeq get_pte_fail6 | if 0, then was not set -- fail
1682:
1683: movsl a1@,d0 | get pte of parent
1684: movl d0,_C_LABEL(macos_tt0) | XXX for later analysis (kill me)
1685: pte_got_parent:
1686: andl #3,d0 | dt bits of pte
1687: cmpl #2,d0 | child is short-format descriptor
1688: jeq short_format
1689: cmpl #3,d0 | child is long-format descriptor
1690: jne get_pte_fail7
1691:
1692: | long_format -- we must go back, change the tt, and get the
1693: | second long. The reason we didn't do this in the first place
1694: | is that the first long might have been the last long of RAM.
1695:
1696: movl _ASM_LABEL(pte_tmp),a1 | get address of our original pte
1697: addql #4,a1 | address of ite second long
1698:
1699: | send second long back to user
1700: movl sp@(12),a0 | address of where to put pte
1701: movsl a1@,d0 |
1702: movl d0,a0@(4) | write in second long
1703:
1704: movql #1,d0 | return long-format
1705: jra get_pte_success
1706:
1707: short_format:
1708: movql #0,d0 | return short-format
1709: jra get_pte_success
1710:
1711: #ifndef DEBUG
1712: get_pte_fail1:
1713: get_pte_fail2:
1714: get_pte_fail3:
1715: get_pte_fail4:
1716: get_pte_fail5:
1717: get_pte_fail6:
1718: get_pte_fail7:
1719: get_pte_fail8:
1720: get_pte_fail9:
1721: get_pte_fail10:
1722: #endif
1723: get_pte_fail:
1724: movql #-1,d0 | return failure
1725:
1726: get_pte_success:
1727: lea _ASM_LABEL(longscratch),a0 | disable tt
1728: movl #0,a0@
1729: .long 0xf0100800 | pmove a0@,tt0
1730:
1731: addql #4,sp | return temporary space
1732: rts
1733:
1734: #ifdef DEBUG
1735: get_pte_fail10:
1736: jbsr _C_LABEL(printstar)
1737: get_pte_fail9:
1738: jbsr _C_LABEL(printstar)
1739: get_pte_fail8:
1740: jbsr _C_LABEL(printstar)
1741: get_pte_fail7:
1742: jbsr _C_LABEL(printstar)
1743: get_pte_fail6:
1744: jbsr _C_LABEL(printstar)
1745: get_pte_fail5:
1746: jbsr _C_LABEL(printstar)
1747: get_pte_fail4:
1748: jbsr _C_LABEL(printstar)
1749: get_pte_fail3:
1750: jbsr _C_LABEL(printstar)
1751: get_pte_fail2:
1752: jbsr _C_LABEL(printstar)
1753: get_pte_fail1:
1754: jbsr _C_LABEL(printstar)
1755: jra get_pte_fail
1756: #endif
1757:
1758: /*
1759: * Misc. global variables.
1760: */
1761: .data
1762: GLOBAL(machineid)
1763: .long 0
1764:
1765: GLOBAL(mmutype)
1766: .long MMU_68851 | default to 68851 PMMU
1767:
1768: GLOBAL(cputype)
1769: .long CPU_68020 | default to 68020 CPU
1770:
1771: GLOBAL(fputype)
1772: .long FPU_68882 | default to 68882 FPU
1773:
1774: GLOBAL(protorp)
1775: .long 0,0 | prototype root pointer
1776:
1777: GLOBAL(cold)
1778: .long 1 | cold start flag
1779:
1780: GLOBAL(want_resched)
1781: .long 0
1782:
1783: GLOBAL(proc0paddr)
1784: .long 0 | KVA of proc0 u-area
1785:
1786: GLOBAL(intiolimit)
1787: .long 0 | KVA of end of internal IO space
1788:
1789: GLOBAL(load_addr)
1790: .long 0 | Physical address of kernel
1791:
1792: ASLOCAL(lastpage)
1793: .long 0 | LAK: to store the addr of last page in mem
1794:
1795: GLOBAL(MacOSROMBase)
1796: .long 0x40800000
1797: GLOBAL(mac68k_vrsrc_cnt)
1798: .long 0
1799: GLOBAL(mac68k_vrsrc_vec)
1800: .word 0, 0, 0, 0, 0, 0
CVSweb