Annotation of sys/arch/amd64/amd64/locore.S, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: locore.S,v 1.22 2007/05/27 08:58:31 art Exp $ */
2: /* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */
3:
4: /*
5: * Copyright-o-rama!
6: */
7:
8: /*
9: * Copyright (c) 2001 Wasabi Systems, Inc.
10: * All rights reserved.
11: *
12: * Written by Frank van der Linden for Wasabi Systems, Inc.
13: *
14: * Redistribution and use in source and binary forms, with or without
15: * modification, are permitted provided that the following conditions
16: * are met:
17: * 1. Redistributions of source code must retain the above copyright
18: * notice, this list of conditions and the following disclaimer.
19: * 2. Redistributions in binary form must reproduce the above copyright
20: * notice, this list of conditions and the following disclaimer in the
21: * documentation and/or other materials provided with the distribution.
22: * 3. All advertising materials mentioning features or use of this software
23: * must display the following acknowledgement:
24: * This product includes software developed for the NetBSD Project by
25: * Wasabi Systems, Inc.
26: * 4. The name of Wasabi Systems, Inc. may not be used to endorse
27: * or promote products derived from this software without specific prior
28: * written permission.
29: *
30: * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
31: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
32: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
33: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
34: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
37: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
38: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40: * POSSIBILITY OF SUCH DAMAGE.
41: */
42:
43:
44: /*-
45: * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
46: * All rights reserved.
47: *
48: * This code is derived from software contributed to The NetBSD Foundation
49: * by Charles M. Hannum.
50: *
51: * Redistribution and use in source and binary forms, with or without
52: * modification, are permitted provided that the following conditions
53: * are met:
54: * 1. Redistributions of source code must retain the above copyright
55: * notice, this list of conditions and the following disclaimer.
56: * 2. Redistributions in binary form must reproduce the above copyright
57: * notice, this list of conditions and the following disclaimer in the
58: * documentation and/or other materials provided with the distribution.
59: * 3. All advertising materials mentioning features or use of this software
60: * must display the following acknowledgement:
61: * This product includes software developed by the NetBSD
62: * Foundation, Inc. and its contributors.
63: * 4. Neither the name of The NetBSD Foundation nor the names of its
64: * contributors may be used to endorse or promote products derived
65: * from this software without specific prior written permission.
66: *
67: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
68: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
69: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
70: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
71: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
72: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
73: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
74: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
75: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
76: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
77: * POSSIBILITY OF SUCH DAMAGE.
78: */
79:
80: /*-
81: * Copyright (c) 1990 The Regents of the University of California.
82: * All rights reserved.
83: *
84: * This code is derived from software contributed to Berkeley by
85: * William Jolitz.
86: *
87: * Redistribution and use in source and binary forms, with or without
88: * modification, are permitted provided that the following conditions
89: * are met:
90: * 1. Redistributions of source code must retain the above copyright
91: * notice, this list of conditions and the following disclaimer.
92: * 2. Redistributions in binary form must reproduce the above copyright
93: * notice, this list of conditions and the following disclaimer in the
94: * documentation and/or other materials provided with the distribution.
95: * 3. Neither the name of the University nor the names of its contributors
96: * may be used to endorse or promote products derived from this software
97: * without specific prior written permission.
98: *
99: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
100: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
101: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
102: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
103: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
104: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
105: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
106: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
107: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
108: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
109: * SUCH DAMAGE.
110: *
111: * @(#)locore.s 7.3 (Berkeley) 5/13/91
112: */
113:
114: #include "assym.h"
115: #include "lapic.h"
116: #include "ioapic.h"
117: #include "ksyms.h"
118:
119: #include <sys/errno.h>
120: #include <sys/syscall.h>
121:
122: #include <machine/param.h>
123: #include <machine/segments.h>
124: #include <machine/specialreg.h>
125: #include <machine/trap.h>
126: #include <machine/frameasm.h>
127:
128: #if NLAPIC > 0
129: #include <machine/i82489reg.h>
130: #endif
131:
132: /*
133: * override user-land alignment before including asm.h
134: */
135: #define ALIGN_DATA .align 8
136: #define ALIGN_TEXT .align 16,0x90
137: #define _ALIGN_TEXT ALIGN_TEXT
138:
139: #include <machine/asm.h>
140:
141: #define SET_CURPROC(proc,cpu) \
142: movq CPUVAR(SELF),cpu ; \
143: movq proc,CPUVAR(CURPROC) ; \
144: movq cpu,P_CPU(proc)
145:
146: #define GET_CURPCB(reg) movq CPUVAR(CURPCB),reg
147: #define SET_CURPCB(reg) movq reg,CPUVAR(CURPCB)
148:
149:
150: /* XXX temporary kluge; these should not be here */
151: /* Get definitions for IOM_BEGIN, IOM_END, and IOM_SIZE */
152: #include <dev/isa/isareg.h>
153:
154:
155: /*
156: * Initialization
157: */
158: .data
159:
160: #if NLAPIC > 0
161: .align NBPG
162: .globl _C_LABEL(local_apic), _C_LABEL(lapic_id), _C_LABEL(lapic_tpr)
163: _C_LABEL(local_apic):
164: .space LAPIC_ID
165: _C_LABEL(lapic_id):
166: .long 0x00000000
167: .space LAPIC_TPRI-(LAPIC_ID+4)
168: _C_LABEL(lapic_tpr):
169: .space LAPIC_PPRI-LAPIC_TPRI
170: _C_LABEL(lapic_ppr):
171: .space LAPIC_ISR-LAPIC_PPRI
172: _C_LABEL(lapic_isr):
173: .space NBPG-LAPIC_ISR
174: #endif
175:
176: .globl _C_LABEL(cpu_id),_C_LABEL(cpu_vendor), _C_LABEL(cpu_brand_id)
177: .globl _C_LABEL(cpuid_level),_C_LABEL(cpu_feature)
178: .globl _C_LABEL(cpu_ecxfeature)
179: .globl _C_LABEL(esym),_C_LABEL(boothowto),_C_LABEL(bootdev)
180: .globl _C_LABEL(bootinfo), _C_LABEL(bootinfo_size), _C_LABEL(atdevbase)
181: .globl _C_LABEL(proc0paddr),_C_LABEL(PTDpaddr)
182: .globl _C_LABEL(biosbasemem),_C_LABEL(biosextmem)
183: .globl _C_LABEL(bootapiver)
184: _C_LABEL(cpu): .long 0 # are we 386, 386sx, or 486,
185: # or Pentium, or..
186: _C_LABEL(cpu_id): .long 0 # saved from `cpuid' instruction
187: _C_LABEL(cpu_feature): .long 0 # feature flags from 'cpuid'
188: # instruction
189: _C_LABEL(cpu_ecxfeature):.long 0 # extended feature flags from 'cpuid'
190: _C_LABEL(cpuid_level): .long -1 # max. level accepted by 'cpuid'
191: # instruction
192: _C_LABEL(cpu_vendor): .space 16 # vendor string returned by `cpuid'
193: # instruction
194: _C_LABEL(cpu_brand_id): .long 0 # brand ID from 'cpuid' instruction
195: _C_LABEL(esym): .quad 0 # ptr to end of syms
196: _C_LABEL(atdevbase): .quad 0 # location of start of iomem in virtual
197: _C_LABEL(bootapiver): .long 0 # /boot API version
198: _C_LABEL(bootdev): .long 0 # device we booted from
199: _C_LABEL(proc0paddr): .quad 0
200: _C_LABEL(PTDpaddr): .quad 0 # paddr of PTD, for libkvm
201: #ifndef REALBASEMEM
202: _C_LABEL(biosbasemem): .long 0 # base memory reported by BIOS
203: #else
204: _C_LABEL(biosbasemem): .long REALBASEMEM
205: #endif
206: #ifndef REALEXTMEM
207: _C_LABEL(biosextmem): .long 0 # extended memory reported by BIOS
208: #else
209: _C_LABEL(biosextmem): .long REALEXTMEM
210: #endif
211:
212: #define _RELOC(x) ((x) - KERNBASE)
213: #define RELOC(x) _RELOC(_C_LABEL(x))
214:
215: .globl gdt64
216:
217: gdt64:
218: .word gdt64_end-gdt64_start
219: .quad _RELOC(gdt64_start)
220: .align 64
221:
222: gdt64_start:
223: .quad 0x0000000000000000 /* always empty */
224: .quad 0x00af9a000000ffff /* kernel CS */
225: .quad 0x00cf92000000ffff /* kernel DS */
226: gdt64_end:
227:
228: farjmp64:
229: .long longmode-KERNBASE
230: .word GSEL(GCODE_SEL, SEL_KPL)
231:
232: .space 512
233: tmpstk:
234:
235: .globl _C_LABEL(cpu_private)
236: .comm _C_LABEL(cpu_private),NBPG,NBPG
237:
238: /*
239: * Some hackage to deal with 64bit symbols in 32 bit mode.
240: * This may not be needed if things are cleaned up a little.
241: */
242:
243:
244: .text
245: .globl _C_LABEL(kernel_text)
246: .set _C_LABEL(kernel_text),KERNTEXTOFF
247:
248: .code32
249:
250: .globl start
251: start: movw $0x1234,0x472 # warm boot
252:
253: /*
254: * Load parameters from stack
255: * (howto, bootdev, bootapiver, esym, extmem, cnvmem, ac, av)
256: */
257: movl 4(%esp),%eax
258: movl %eax, RELOC(boothowto)
259: movl 8(%esp),%eax
260: movl %eax, RELOC(bootdev)
261:
262: movl 16(%esp), %eax
263: testl %eax,%eax
264: jz 1f
265: addl $KERNBASE_LO,%eax
266: movl $RELOC(esym),%ebp
267: movl %eax,(%ebp)
268: movl $KERNBASE_HI,4(%ebp)
269: 1:
270: movl 20(%esp), %eax
271: movl %eax, RELOC(biosextmem)
272: movl 24(%esp), %eax
273: movl %eax, RELOC(biosbasemem)
274:
275: movl 12(%esp), %eax
276: movl %eax, RELOC(bootapiver)
277:
278: /*
279: * Copy the boot arguments to bootinfo[] in machdep.c.
280: *
281: * We are passed the size of bootinfo[] in bootinfo_size, and
282: * we report how much data /boot passed us back in the same variable.
283: *
284: * machdep.c can then take action if bootinfo_size >= bootinfo[]
285: * (which would meant that we may have been passed too much data).
286: */
287: movl 28(%esp), %eax
288: movl %eax, %ecx
289: cmpl RELOC(bootinfo_size), %ecx /* Too much? */
290: jnc bi_size_ok
291: movl RELOC(bootinfo_size), %ecx /* Only copy this much */
292: bi_size_ok:
293: movl %eax, RELOC(bootinfo_size) /* Report full amount */
294:
295: movl $RELOC(bootinfo), %edi /* Destination */
296: movl 32(%esp), %esi /* Source */
297: rep movsb /* Copy this many bytes */
298:
299: /* First, reset the PSL. */
300: pushl $PSL_MBO
301: popfl
302:
303: xorl %eax,%eax
304: cpuid
305: movl %eax,RELOC(cpuid_level)
306: movl $RELOC(cpu_vendor),%ebp
307: movl %ebx,(%ebp)
308: movl %edx,4(%ebp)
309: movl %ecx,8(%ebp)
310: movl $0, 12(%ebp)
311:
312: movl $1,%eax
313: cpuid
314: movl %eax,RELOC(cpu_id)
315: movl %ecx,RELOC(cpu_ecxfeature)
316: movl %edx,RELOC(cpu_feature)
317:
318: movl $0x80000001, %eax
319: cpuid
320: andl $CPUID_NXE, %edx /* other bits may clash */
321: orl %edx, RELOC(cpu_feature)
322:
323: /* Brand ID is bits 0-7 of %ebx */
324: andl $255,%ebx
325: movl %ebx,RELOC(cpu_brand_id)
326:
327: /*
328: * Finished with old stack; load new %esp now instead of later so we
329: * can trace this code without having to worry about the trace trap
330: * clobbering the memory test or the zeroing of the bss+bootstrap page
331: * tables.
332: *
333: * The boot program should check:
334: * text+data <= &stack_variable - more_space_for_stack
335: * text+data+bss+pad+space_for_page_tables <= end_of_memory
336: * Oops, the gdt is in the carcass of the boot program so clearing
337: * the rest of memory is still not possible.
338: */
339: movl $RELOC(tmpstk),%esp
340:
341: /*
342: * Virtual address space of kernel:
343: *
344: * text | data | bss | [syms] | page dir | proc0 kstack | L1 ptp | L2 ptp | L3
345: * 0 1 2 3
346: */
347:
348: #if L2_SLOT_KERNBASE > 0
349: #define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1))
350: #else
351: #define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1)
352: #endif
353:
354: #if L3_SLOT_KERNBASE > 0
355: #define TABLE_L3_ENTRIES (2 * NKL3_KIMG_ENTRIES)
356: #else
357: #define TABLE_L3_ENTRIES NKL3_KIMG_ENTRIES
358: #endif
359:
360:
361: #define PROC0_PML4_OFF 0
362: #define PROC0_STK_OFF (PROC0_PML4_OFF + NBPG)
363: #define PROC0_PTP3_OFF (PROC0_STK_OFF + UPAGES * NBPG)
364: #define PROC0_PTP2_OFF (PROC0_PTP3_OFF + NKL4_KIMG_ENTRIES * NBPG)
365: #define PROC0_PTP1_OFF (PROC0_PTP2_OFF + TABLE_L3_ENTRIES * NBPG)
366: #define PROC0_DMP3_OFF (PROC0_PTP1_OFF + TABLE_L2_ENTRIES * NBPG)
367: #define PROC0_DMP2_OFF (PROC0_DMP3_OFF + NDML3_ENTRIES * NBPG)
368: #define TABLESIZE \
369: ((NKL4_KIMG_ENTRIES + TABLE_L3_ENTRIES + TABLE_L2_ENTRIES + 1 + UPAGES + \
370: NDML3_ENTRIES + NDML2_ENTRIES) * NBPG)
371:
372: #define fillkpt \
373: 1: movl %eax,(%ebx) ; /* store phys addr */ \
374: movl $0,4(%ebx) ; /* upper 32 bits 0 */ \
375: addl $8,%ebx ; /* next pte/pde */ \
376: addl $NBPG,%eax ; /* next phys page */ \
377: loop 1b ; \
378:
379:
380: /* Find end of kernel image. */
381: movl $RELOC(end),%edi
382: #if (NKSYMS || defined(DDB) || defined(LKM)) && !defined(SYMTAB_SPACE)
383: /* Save the symbols (if loaded). */
384: movl RELOC(esym),%eax
385: testl %eax,%eax
386: jz 1f
387: subl $KERNBASE_LO,%eax /* XXX */
388: movl %eax,%edi
389: 1:
390: #endif
391: /* Clear tables */
392: movl %edi,%esi
393: addl $PGOFSET,%esi
394: andl $~PGOFSET,%esi
395:
396: movl %esi,%edi
397: xorl %eax,%eax
398: cld
399: movl $TABLESIZE,%ecx
400: shrl $2,%ecx
401: rep
402: stosl
403:
404: leal (PROC0_PTP1_OFF)(%esi), %ebx
405:
406: /*
407: * Compute etext - KERNBASE. This can't be > 4G, or we can't deal
408: * with it anyway, since we can't load it in 32 bit mode. So use
409: * the bottom 32 bits.
410: */
411: movl $RELOC(etext),%edx
412: addl $PGOFSET,%edx
413: andl $~PGOFSET,%edx
414:
415: /*
416: * Skip the first MB.
417: */
418: movl $(KERNTEXTOFF_LO - KERNBASE_LO),%eax
419: movl %eax,%ecx
420: shrl $(PGSHIFT-3),%ecx /* ((n >> PGSHIFT) << 3) for # pdes */
421: addl %ecx,%ebx
422:
423: /* Map kernel text read-only */
424: movl %edx,%ecx
425: subl %eax,%ecx
426: shrl $PGSHIFT,%ecx
427: orl $(PG_V|PG_KR),%eax
428: fillkpt
429:
430: /* Map the data, BSS, and bootstrap tables read-write. */
431: leal (PG_V|PG_KW)(%edx),%eax
432: movl $TABLESIZE,%ecx
433: addl %esi,%ecx /* %ecx = &end[TABLESIZE] */
434: subl %edx,%ecx /* %ecx = %ecx - etext */
435: shrl $PGSHIFT,%ecx
436: fillkpt
437:
438: /* Map ISA I/O mem (later atdevbase) */
439: movl $(IOM_BEGIN|PG_V|PG_KW/*|PG_N*/),%eax
440: movl $(IOM_SIZE>>PGSHIFT),%ecx
441: fillkpt
442:
443: /* Set up level 2 pages */
444: leal (PROC0_PTP2_OFF)(%esi),%ebx
445: leal (PROC0_PTP1_OFF)(%esi),%eax
446: orl $(PG_V|PG_KW), %eax
447: movl $(NKL2_KIMG_ENTRIES+1),%ecx
448: fillkpt
449:
450: #if L2_SLOT_KERNBASE > 0
451: /* If needed, set up level 2 entries for actual kernel mapping */
452: leal (PROC0_PTP2_OFF+ L2_SLOT_KERNBASE*8)(%esi),%ebx
453: leal (PROC0_PTP1_OFF)(%esi),%eax
454: orl $(PG_V|PG_KW), %eax
455: movl $(NKL2_KIMG_ENTRIES+1),%ecx
456: fillkpt
457: #endif
458:
459: /* Set up level 3 pages */
460: leal (PROC0_PTP3_OFF)(%esi),%ebx
461: leal (PROC0_PTP2_OFF)(%esi),%eax
462: orl $(PG_V|PG_KW), %eax
463: movl $NKL3_KIMG_ENTRIES,%ecx
464: fillkpt
465:
466: #if L3_SLOT_KERNBASE > 0
467: /* If needed, set up level 3 entries for actual kernel mapping */
468: leal (PROC0_PTP3_OFF+ L3_SLOT_KERNBASE*8)(%esi),%ebx
469: leal (PROC0_PTP2_OFF)(%esi),%eax
470: orl $(PG_V|PG_KW), %eax
471: movl $NKL3_KIMG_ENTRIES,%ecx
472: fillkpt
473: #endif
474:
475: /* Set up top level entries for identity mapping */
476: leal (PROC0_PML4_OFF)(%esi),%ebx
477: leal (PROC0_PTP3_OFF)(%esi),%eax
478: orl $(PG_V|PG_KW), %eax
479: movl $NKL4_KIMG_ENTRIES,%ecx
480: fillkpt
481:
482: /* Set up top level entries for actual kernel mapping */
483: leal (PROC0_PML4_OFF + L4_SLOT_KERNBASE*8)(%esi),%ebx
484: leal (PROC0_PTP3_OFF)(%esi),%eax
485: orl $(PG_V|PG_KW), %eax
486: movl $NKL4_KIMG_ENTRIES,%ecx
487: fillkpt
488:
489: /*
490: * Map the first 4 GB with the direct map. We'll map the rest
491: * in pmap_bootstrap. But we always need the first 4GB during
492: * bootstrap.
493: */
494: leal (PROC0_DMP2_OFF)(%esi), %ebx
495: xorl %eax, %eax
496: orl $(PG_V|PG_KW|PG_PS|PG_G), %eax
497: movl $(NDML2_ENTRIES * NPDPG), %ecx
498: 1: movl %eax, (%ebx)
499: movl $0, 4(%ebx)
500: addl $8, %ebx
501: addl $NBPD_L2, %eax
502: loop 1b
503:
504: leal (PROC0_DMP3_OFF)(%esi), %ebx
505: leal (PROC0_DMP2_OFF)(%esi), %eax
506: orl $(PG_V|PG_KW), %eax
507: movl $NDML2_ENTRIES, %ecx
508: fillkpt
509:
510: leal (PROC0_PML4_OFF + PDIR_SLOT_DIRECT * 8)(%esi), %ebx
511: leal (PROC0_DMP3_OFF)(%esi), %eax
512: orl $(PG_V|PG_KW), %eax
513: movl $NDML3_ENTRIES, %ecx
514: fillkpt
515:
516: /* Install recursive top level PDE */
517: leal (PROC0_PML4_OFF + PDIR_SLOT_PTE*8)(%esi),%ebx
518: leal (PROC0_PML4_OFF)(%esi),%eax
519: orl $(PG_V|PG_KW),%eax
520: movl %eax,(%ebx)
521: movl $0, 4(%ebx)
522:
523:
524: /* Save phys. addr of PTD, for libkvm. */
525: movl $RELOC(PTDpaddr),%ebp
526: movl %esi,(%ebp)
527: movl $0,4(%ebp)
528:
529: /*
530: * Startup checklist:
531: * 1. Enable PAE (and SSE while here).
532: */
533: movl %cr4,%eax
534: orl $(CR4_DEFAULT),%eax
535: movl %eax,%cr4
536:
537: /*
538: * 2. Set Long Mode Enable in EFER. Also enable the
539: * syscall extensions.
540: */
541: movl $MSR_EFER,%ecx
542: rdmsr
543: xorl %eax,%eax /* XXX */
544: orl $(EFER_LME|EFER_SCE),%eax
545: wrmsr
546:
547: /*
548: * 3. Load %cr3 with pointer to PML4.
549: */
550: movl %esi,%eax
551: movl %eax,%cr3
552:
553: /*
554: * 4. Enable paging and the rest of it.
555: */
556: movl %cr0,%eax
557: orl $(CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_MP|CR0_WP),%eax
558: movl %eax,%cr0
559: jmp compat
560: compat:
561:
562: /*
563: * 5.
564: * Not quite done yet, we're now in a compatibility segment,
565: * in legacy mode. We must jump to a long mode segment.
566: * Need to set up a temporary GDT with a long mode segment
567: * in it to do that.
568: */
569:
570: movl $RELOC(gdt64),%eax
571: lgdt (%eax)
572: movl $RELOC(farjmp64),%eax
573: ljmp *(%eax)
574:
575: .code64
576: longmode:
577: /*
578: * 6.
579: * Finally, we're in long mode. However, we're still
580: * in the identity mapped area (could not jump out
581: * of that earlier because it would have been a > 32bit
582: * jump). We can do that now, so here we go.
583: */
584: movabsq $longmode_hi,%rax
585: jmp *%rax
586: longmode_hi:
587: /*
588: * We have arrived.
589: * There's no need anymore for the identity mapping in low
590: * memory, remove it.
591: */
592: movq $KERNBASE,%r8
593:
594: #if L2_SLOT_KERNBASE > 0
595: movq $(NKL2_KIMG_ENTRIES+1),%rcx
596: leaq (PROC0_PTP2_OFF)(%rsi),%rbx
597: addq %r8, %rbx
598: 1: movq $0,(%rbx)
599: addq $8,%rbx
600: loop 1b
601: #endif
602:
603: #if L3_SLOT_KERNBASE > 0
604: movq $NKL3_KIMG_ENTRIES,%rcx
605: leaq (PROC0_PTP3_OFF)(%rsi),%rbx
606: addq %r8, %rbx
607: 1: movq $0,(%rbx)
608: addq $8,%rbx
609: loop 1b
610: #endif
611:
612: movq $NKL4_KIMG_ENTRIES,%rcx
613: leaq (PROC0_PML4_OFF)(%rsi),%rbx # old, phys address of PML4
614: addq %r8, %rbx # new, virtual address of PML4
615: 1: movq $0,(%rbx)
616: addq $8,%rbx
617: loop 1b
618:
619: /* Relocate atdevbase. */
620: movq $(TABLESIZE+KERNBASE),%rdx
621: addq %rsi,%rdx
622: movq %rdx,_C_LABEL(atdevbase)(%rip)
623:
624: /* Set up bootstrap stack. */
625: leaq (PROC0_STK_OFF)(%rsi),%rax
626: addq %r8,%rax
627: movq %rax,_C_LABEL(proc0paddr)(%rip)
628: leaq (USPACE-FRAMESIZE)(%rax),%rsp
629: movq %rsi,PCB_CR3(%rax) # pcb->pcb_cr3
630: xorq %rbp,%rbp # mark end of frames
631:
632: xorw %ax,%ax
633: movw %ax,%gs
634: movw %ax,%fs
635:
636: /* XXX merge these */
637: leaq TABLESIZE(%rsi),%rdi
638: call _C_LABEL(init_x86_64)
639:
640: call _C_LABEL(main)
641:
642: /*****************************************************************************/
643:
644: /*
645: * Signal trampoline; copied to top of user stack.
646: */
647: NENTRY(sigcode)
648: call *%rax
649:
650: movq %rsp,%rdi
651: pushq %rdi /* fake return address */
652: movq $SYS_sigreturn,%rax
653: int $0x80
654: movq $SYS_exit,%rax
655: syscall
656: .globl _C_LABEL(esigcode)
657: _C_LABEL(esigcode):
658:
659: /*
660: * void lgdt(struct region_descriptor *rdp);
661: * Change the global descriptor table.
662: */
663: NENTRY(lgdt)
664: /* Reload the descriptor table. */
665: movq %rdi,%rax
666: lgdt (%rax)
667: /* Flush the prefetch q. */
668: jmp 1f
669: nop
670: 1: /* Reload "stale" selectors. */
671: movl $GSEL(GDATA_SEL, SEL_KPL),%eax
672: movl %eax,%ds
673: movl %eax,%es
674: movl %eax,%ss
675: /* Reload code selector by doing intersegment return. */
676: popq %rax
677: pushq $GSEL(GCODE_SEL, SEL_KPL)
678: pushq %rax
679: lretq
680:
681: ENTRY(setjmp)
682: /*
683: * Only save registers that must be preserved across function
684: * calls according to the ABI (%rbx, %rsp, %rbp, %r12-%r15)
685: * and %rip.
686: */
687: movq %rdi,%rax
688: movq %rbx,(%rax)
689: movq %rsp,8(%rax)
690: movq %rbp,16(%rax)
691: movq %r12,24(%rax)
692: movq %r13,32(%rax)
693: movq %r14,40(%rax)
694: movq %r15,48(%rax)
695: movq (%rsp),%rdx
696: movq %rdx,56(%rax)
697: xorl %eax,%eax
698: ret
699:
700: ENTRY(longjmp)
701: movq %rdi,%rax
702: movq (%rax),%rbx
703: movq 8(%rax),%rsp
704: movq 16(%rax),%rbp
705: movq 24(%rax),%r12
706: movq 32(%rax),%r13
707: movq 40(%rax),%r14
708: movq 48(%rax),%r15
709: movq 56(%rax),%rdx
710: movq %rdx,(%rsp)
711: xorl %eax,%eax
712: incl %eax
713: ret
714:
715: /*****************************************************************************/
716:
717: /*
718: * The following primitives manipulate the run queues.
719: * _whichqs tells which of the 32 queues _qs
720: * have processes in them. Setrq puts processes into queues, Remrq
721: * removes them from queues. The running process is on no queue,
722: * other processes are on a queue related to p->p_pri, divided by 4
723: * actually to shrink the 0-127 range of priorities into the 32 available
724: * queues.
725: */
726: .globl _C_LABEL(whichqs),_C_LABEL(qs)
727: .globl _C_LABEL(uvmexp),_C_LABEL(panic)
728:
729: #if NAPM > 0
730: .globl _C_LABEL(apm_cpu_idle),_C_LABEL(apm_cpu_busy)
731: #endif
732:
733: #ifdef DIAGNOSTIC
734: NENTRY(switch_error1)
735: movabsq $1f,%rdi
736: call _C_LABEL(panic)
737: /* NOTREACHED */
738: 1: .asciz "cpu_switch 1"
739: NENTRY(switch_error2)
740: movabsq $1f,%rdi
741: call _C_LABEL(panic)
742: /* NOTREACHED */
743: 1: .asciz "cpu_switch 2"
744: NENTRY(switch_error3)
745: movabsq $1f,%rdi
746: call _C_LABEL(panic)
747: /* NOTREACHED */
748: 1: .asciz "cpu_switch 3"
749: #endif /* DIAGNOSTIC */
750:
751: /*
752: * int cpu_switch(struct proc *)
753: * Find a runnable process and switch to it. Wait if necessary. If the new
754: * proc is the same as the old one, we short-circuit the context save and
755: * restore.
756: */
757: ENTRY(cpu_switch)
758: pushq %rbx
759: pushq %rbp
760: pushq %r12
761: pushq %r13
762: pushq %r14
763: pushq %r15
764:
765: movq %rdi,%r13
766:
767: /*
768: * Clear curproc so that we don't accumulate system time while idle.
769: * This also insures that schedcpu() will move the old proc to
770: * the correct queue if it happens to get called from the spllower()
771: * below and changes the priority. (See corresponding comment in
772: * userret()).
773: */
774: movq $0,CPUVAR(CURPROC)
775:
776:
777: /*
778: * First phase: find new proc.
779: *
780: * Registers:
781: * %rax - queue head, scratch, then zero
782: * %r8 - queue number
783: * %ecx - cached value of whichqs
784: * %rdx - next process in queue
785: * %r13 - old proc
786: * %r12 - new proc
787: */
788:
789: /* Look for new proc. */
790: cli # splhigh doesn't do a cli
791: movl _C_LABEL(whichqs)(%rip),%ecx
792: bsfl %ecx,%r8d # find a full q
793: jnz switch_dequeue
794:
795: /*
796: * idling: save old context
797: *
798: * Registers:
799: * %rax, %rcx - scratch
800: * %r13 - old proc, then old pcb
801: * %r12 - idle pcb
802: */
803:
804: /* old proc still in %rdi */
805: call _C_LABEL(pmap_deactivate)
806:
807: movq P_ADDR(%r13),%r13
808:
809: /* Save stack pointers */
810:
811: movq %rsp,PCB_RSP(%r13)
812: movq %rbp,PCB_RBP(%r13)
813:
814: /* Find idle PCB for this CPU */
815: #ifndef MULTIPROCESSOR
816: leaq _C_LABEL(proc0)(%rip),%rcx
817: movq P_ADDR(%rcx),%r12
818: movl P_MD_TSS_SEL(%rcx),%edx
819: #else
820: movq CPUVAR(IDLE_PCB),%r12
821: movl CPUVAR(IDLE_TSS_SEL),%edx
822: #endif
823: movq $0,CPUVAR(CURPROC)
824:
825: /* Restore the idle context (avoid interrupts) */
826: cli
827:
828: /* Restore stack pointers. */
829: movq PCB_RSP(%r12),%rsp
830: movq PCB_RBP(%r12),%rbp
831:
832: /* Switch address space. */
833: movq PCB_CR3(%r12),%rcx
834: movq %rcx,%cr3
835:
836: #ifdef MULTIPROCESSOR
837: movq CPUVAR(GDT),%rax
838: #else
839: movq _C_LABEL(gdtstore)(%rip),%rax
840: #endif
841:
842: /* Switch TSS. Reset "task busy" flag before */
843: andl $~0x0200,4(%rax,%rdx, 1)
844: ltr %dx
845:
846: /* Restore cr0 (including FPU state). */
847: movl PCB_CR0(%r12),%ecx
848: movq %rcx,%cr0
849:
850: SET_CURPCB(%r12)
851:
852: xorq %r13,%r13
853: sti
854: idle_unlock:
855: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
856: call _C_LABEL(sched_unlock_idle)
857: #endif
858: /* Interrupts are okay again. */
859: movl $IPL_NONE,%edi
860: call _C_LABEL(Xspllower)
861: jmp idle_start
862: idle_zero:
863: sti
864: call _C_LABEL(uvm_pageidlezero)
865: cli
866: cmpl $0,_C_LABEL(whichqs)(%rip)
867: jnz idle_exit
868: idle_loop:
869: /* Try to zero some pages. */
870: movl _C_LABEL(uvm)+UVM_PAGE_IDLE_ZERO(%rip),%ecx
871: testl %ecx,%ecx
872: jnz idle_zero
873: sti
874: hlt
875: NENTRY(mpidle)
876: idle_start:
877: cli
878: cmpl $0,_C_LABEL(whichqs)(%rip)
879: jz idle_loop
880: idle_exit:
881: movl $IPL_HIGH,CPUVAR(ILEVEL)
882: sti
883: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
884: call _C_LABEL(sched_lock_idle)
885: #endif
886: switch_search:
887: movl _C_LABEL(whichqs)(%rip),%ecx
888: bsfl %ecx,%r8d
889: jz idle_unlock
890:
891: switch_dequeue:
892:
893: sti
894: movq %r8,%r9
895:
896: shlq $4, %r9
897: leaq _C_LABEL(qs)(%rip),%rax
898: addq %r9,%rax
899: /* movq (%rax),%rax */
900:
901: movq P_FORW(%rax),%r12 # unlink from front of process q
902: #ifdef DIAGNOSTIC
903: cmpq %r12,%rax # linked to self (i.e. nothing queued)?
904: je _C_LABEL(switch_error1) # not possible
905: #endif /* DIAGNOSTIC */
906: movq P_FORW(%r12),%rdx
907: movq %rdx,P_FORW(%rax)
908: movq %rax,P_BACK(%rdx)
909:
910: cmpq %rdx,%rax # q empty?
911: jne 3f
912:
913: btrl %r8d,%ecx # yes, clear to indicate empty
914: movl %ecx,_C_LABEL(whichqs)(%rip) # update q status
915:
916: 3: /* We just did it. */
917: xorq %rax,%rax
918: movl %eax,CPUVAR(RESCHED)
919: switch_resume:
920: #ifdef DIAGNOSTIC
921: cmpq %rax,P_WCHAN(%r12)
922: jne _C_LABEL(switch_error2)
923: cmpb $SRUN,P_STAT(%r12)
924: jne _C_LABEL(switch_error3)
925: #endif
926:
927: /* Isolate proc. XXX Is this necessary? */
928: movq %rax,P_BACK(%r12)
929:
930: /* Record new proc. */
931: movb $SONPROC,P_STAT(%r12) # p->p_stat = SONPROC
932: SET_CURPROC(%r12,%rcx)
933:
934: /* Skip context switch if same proc. */
935: xorl %ebx,%ebx
936: cmpq %r12,%r13
937: je switch_return
938:
939: /* If old proc exited, don't bother. */
940: testq %r13,%r13
941: jz switch_exited
942:
943: /*
944: * Second phase: save old context.
945: *
946: * Registers:
947: * %rax, %rcx - scratch
948: * %r13 - old proc, then old pcb
949: * %r12 - new proc
950: */
951:
952: movq %r13,%rdi
953: call pmap_deactivate
954:
955: movq P_ADDR(%r13),%r13
956:
957: /* Save stack pointers. */
958: movq %rsp,PCB_RSP(%r13)
959: movq %rbp,PCB_RBP(%r13)
960:
961: switch_exited:
962: /*
963: * Third phase: restore saved context.
964: *
965: * Registers:
966: * %rax, %rcx, %rdx - scratch
967: * %r13 - new pcb
968: * %r12 - new process
969: */
970:
971: /* No interrupts while loading new state. */
972: cli
973: movq P_ADDR(%r12),%r13
974:
975: /* Restore stack pointers. */
976: movq PCB_RSP(%r13),%rsp
977: movq PCB_RBP(%r13),%rbp
978:
979: #if 0
980: /* Don't bother with the rest if switching to a system process. */
981: testl $P_SYSTEM,P_FLAG(%r12)
982: jnz switch_restored
983: #endif
984:
985: /* Load TSS info. */
986: #ifdef MULTIPROCESSOR
987: movq CPUVAR(GDT),%rax
988: #else
989: movq _C_LABEL(gdtstore)(%rip),%rax
990: #endif
991: movl P_MD_TSS_SEL(%r12),%edx
992:
993: /* Switch TSS. Reset "task busy" flag before */
994: andl $~0x0200,4(%rax,%rdx, 1)
995: ltr %dx
996:
997: movq %r12,%rdi
998: call _C_LABEL(pmap_activate)
999:
1000: #if 0
1001: switch_restored:
1002: #endif
1003: /* Restore cr0 (including FPU state). */
1004: movl PCB_CR0(%r13),%ecx
1005: #ifdef MULTIPROCESSOR
1006: movq PCB_FPCPU(%r13),%r8
1007: cmpq CPUVAR(SELF),%r8
1008: jz 1f
1009: orl $CR0_TS,%ecx
1010: 1:
1011: #endif
1012: movq %rcx,%cr0
1013:
1014: SET_CURPCB(%r13)
1015:
1016: /* Interrupts are okay again. */
1017: sti
1018:
1019: switch_return:
1020: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1021: call _C_LABEL(sched_unlock_idle)
1022: #endif
1023:
1024: movl $IPL_NONE,%edi
1025: call _C_LABEL(Xspllower)
1026: movl $IPL_HIGH,CPUVAR(ILEVEL)
1027:
1028: movl %ebx,%eax
1029:
1030: popq %r15
1031: popq %r14
1032: popq %r13
1033: popq %r12
1034: popq %rbp
1035: popq %rbx
1036: ret
1037:
1038: ENTRY(cpu_switchto)
1039: pushq %rbx
1040: pushq %rbp
1041: pushq %r12
1042: pushq %r13
1043: pushq %r14
1044: pushq %r15
1045:
1046: movq %rdi,%r13
1047: movq %rsi,%r12
1048:
1049: movq $0,CPUVAR(CURPROC)
1050:
1051: xorq %rax,%rax
1052: jmp switch_resume
1053:
1054:
1055: /*
1056: * void switch_exit(struct proc *l, void (*exit)(struct proc *));
1057: * Switch to proc0's saved context and deallocate the address space and kernel
1058: * stack for p. Then jump into cpu_switch(), as if we were in proc0 all along.
1059: */
1060: .globl _C_LABEL(proc0)
1061: ENTRY(switch_exit)
1062: #ifdef MULTIPROCESSOR
1063: movq CPUVAR(IDLE_PCB),%r8
1064: movl CPUVAR(IDLE_TSS_SEL),%edx
1065: #else
1066: leaq _C_LABEL(proc0)(%rip),%r9
1067: movq P_ADDR(%r9),%r8
1068: movl P_MD_TSS_SEL(%r9),%edx
1069: #endif
1070:
1071: /* In case we fault... */
1072: movq $0,CPUVAR(CURPROC)
1073:
1074: cli
1075:
1076: /* Restore stack pointers. */
1077: movq PCB_RSP(%r8),%rsp
1078: movq PCB_RBP(%r8),%rbp
1079:
1080: /* Load TSS info. */
1081: #ifdef MULTIPROCESSOR
1082: movq CPUVAR(GDT),%rax
1083: #else
1084: movq _C_LABEL(gdtstore)(%rip),%rax
1085: #endif
1086:
1087: /* Switch address space. */
1088: movq PCB_CR3(%r8),%rcx
1089: movq %rcx,%cr3
1090:
1091: /* Switch TSS. */
1092: andl $~0x0200,4-SEL_KPL(%rax,%rdx,1)
1093: ltr %dx
1094:
1095: /* We're always in the kernel, so we don't need the LDT. */
1096:
1097: /* Restore cr0 (including FPU state). */
1098: movl PCB_CR0(%r8),%ecx
1099: movq %rcx,%cr0
1100:
1101: /* Record new pcb. */
1102: SET_CURPCB(%r8)
1103:
1104: /* Interrupts are okay again. */
1105: sti
1106:
1107: /*
1108: * Schedule the dead process's vmspace and stack to be freed.
1109: * {lpw_}exit2(l). Function still in %rsi (2nd arg), proc in
1110: * %rdi (first arg).
1111: */
1112:
1113: call *%rsi
1114:
1115: /* Jump into cpu_switch() with the right state. */
1116: xorq %r13,%r13
1117: movq %r13, CPUVAR(CURPROC)
1118: jmp switch_search
1119:
1120: /*
1121: * savectx(struct pcb *pcb);
1122: * Update pcb, saving current processor state.
1123: */
1124: ENTRY(savectx)
1125: /* Save stack pointers. */
1126: movq %rsp,PCB_RSP(%rdi)
1127: movq %rbp,PCB_RBP(%rdi)
1128:
1129: ret
1130:
1131: IDTVEC(syscall32)
1132: sysret /* go away please */
1133:
1134: /*
1135: * syscall insn entry. This currently isn't much faster, but
1136: * it can be made faster in the future.
1137: */
1138: IDTVEC(syscall)
1139: swapgs
1140: movq %r15,CPUVAR(SCRATCH)
1141: movq CPUVAR(CURPCB),%r15
1142: movq PCB_RSP0(%r15),%r15
1143: xchgq %r15,%rsp
1144: sti
1145:
1146: /*
1147: * XXX don't need this whole frame, split of the
1148: * syscall frame and trapframe is needed.
1149: * First, leave some room for the trapno, error,
1150: * ss:rsp, etc, so that all GP registers can be
1151: * saved. Then, fill in the rest.
1152: */
1153: pushq $(LSEL(LUDATA_SEL, SEL_UPL))
1154: pushq %r15
1155: subq $(TF_RSP-TF_TRAPNO),%rsp
1156: movq CPUVAR(SCRATCH),%r15
1157: subq $32,%rsp
1158: INTR_SAVE_GPRS
1159: movw %fs,TF_FS(%rsp)
1160: movw %gs,TF_GS(%rsp)
1161: movw %es,TF_ES(%rsp)
1162: movw $(LSEL(LUDATA_SEL, SEL_UPL)),TF_DS(%rsp)
1163: movq %r11, TF_RFLAGS(%rsp) /* old rflags from syscall insn */
1164: movq $(LSEL(LUCODE_SEL, SEL_UPL)), TF_CS(%rsp)
1165: movq %rcx,TF_RIP(%rsp)
1166: movq $2,TF_ERR(%rsp)
1167: movq $T_ASTFLT, TF_TRAPNO(%rsp)
1168:
1169: movq CPUVAR(CURPROC),%r14
1170: movq %rsp,P_MD_REGS(%r14) # save pointer to frame
1171: andl $~MDP_IRET,P_MD_FLAGS(%r14)
1172: movq %rsp,%rdi
1173: call _C_LABEL(syscall)
1174: 1: /* Check for ASTs on exit to user mode. */
1175: cli
1176: CHECK_ASTPENDING(%r11)
1177: je 2f
1178: /* Always returning to user mode here. */
1179: CLEAR_ASTPENDING(%r11)
1180: sti
1181: /* Pushed T_ASTFLT into tf_trapno on entry. */
1182: movq %rsp,%rdi
1183: call _C_LABEL(trap)
1184: jmp 1b
1185: 2:
1186: sti
1187: testl $MDP_IRET, P_MD_FLAGS(%r14)
1188: jne iret_return;
1189: syscall_return:
1190: #ifdef DIAGNOSTIC
1191: cmpl $IPL_NONE,CPUVAR(ILEVEL)
1192: jne 3f
1193: #endif
1194: /*
1195: * XXX interrupts off longer than they should be here.
1196: */
1197: cli
1198: swapgs
1199: movw TF_ES(%rsp),%es
1200: movw TF_FS(%rsp),%fs
1201: movw TF_GS(%rsp),%gs
1202: INTR_RESTORE_GPRS
1203: movw $(LSEL(LUDATA_SEL, SEL_UPL)),%r11
1204: movw %r11,%ds
1205: addq $48,%rsp
1206: popq %rcx /* return rip */
1207: addq $8,%rsp
1208: popq %r11 /* flags as set by sysret insn */
1209: movq %ss:(%rsp),%rsp
1210: sysretq
1211:
1212: #ifdef DIAGNOSTIC
1213: 3: movabsq $4f, %rdi
1214: movl TF_RAX(%rsp),%esi
1215: movl TF_RDI(%rsp),%edx
1216: movl %ebx,%ecx
1217: movl CPUVAR(ILEVEL),%r8d
1218: xorq %rax,%rax
1219: call _C_LABEL(printf)
1220: #ifdef DDB
1221: int $3
1222: #endif /* DDB */
1223: movl $IPL_NONE,CPUVAR(ILEVEL)
1224: jmp 1b
1225: 4: .asciz "WARNING: SPL NOT LOWERED ON SYSCALL %d %d EXIT %x %x\n"
1226: #endif
1227:
1228:
1229: NENTRY(proc_trampoline)
1230: #ifdef MULTIPROCESSOR
1231: call _C_LABEL(proc_trampoline_mp)
1232: #endif
1233: movl $IPL_NONE,CPUVAR(ILEVEL)
1234: movq %r13,%rdi
1235: call *%r12
1236: INTRFASTEXIT
1237: /* NOTREACHED */
1238:
1239: NENTRY(child_trampoline)
1240: #ifdef MULTIPROCESSOR
1241: call _C_LABEL(proc_trampoline_mp)
1242: #endif
1243: movl $IPL_NONE,CPUVAR(ILEVEL)
1244: movq %r13,%rdi
1245: call *%r12
1246: jmp syscall_return
1247:
1248: .globl _C_LABEL(osyscall_return)
1249:
1250: /* XXX - can we zap the following two? */
1251:
1252: /*
1253: * Old call gate entry for syscall. only needed if we're
1254: * going to support running old NetBSD or ibcs2 binaries, etc,
1255: * on NetBSD/amd64.
1256: */
1257: IDTVEC(oosyscall)
1258: /* Set rflags in trap frame. */
1259: pushfq
1260: popq 8(%rsp)
1261: pushq $7 # size of instruction for restart
1262: jmp osyscall1
1263:
1264: /*
1265: * Trap gate entry for int $80 syscall, also used by sigreturn.
1266: */
1267: IDTVEC(osyscall)
1268: pushq $2 # size of instruction for restart
1269: osyscall1:
1270: pushq $T_ASTFLT # trap # for doing ASTs
1271: INTRENTRY
1272: sti
1273: movq CPUVAR(CURPROC),%rdx
1274: movq %rsp,P_MD_REGS(%rdx) # save pointer to frame
1275: movq %rsp,%rdi
1276: call _C_LABEL(syscall)
1277: _C_LABEL(osyscall_return):
1278: 2: /* Check for ASTs on exit to user mode. */
1279: cli
1280: CHECK_ASTPENDING(%r11)
1281: je 1f
1282: /* Always returning to user mode here. */
1283: CLEAR_ASTPENDING(%r11)
1284: sti
1285: /* Pushed T_ASTFLT into tf_trapno on entry. */
1286: movq %rsp,%rdi
1287: call _C_LABEL(trap)
1288: jmp 2b
1289:
1290: iret_return:
1291: #ifndef DIAGNOSTIC
1292: 1: INTRFASTEXIT
1293: #else /* DIAGNOSTIC */
1294: 1: cmpl $IPL_NONE,CPUVAR(ILEVEL)
1295: jne 3f
1296: INTRFASTEXIT
1297: 3: sti
1298: movabsq $4f, %rdi
1299: xorq %rax,%rax
1300: call _C_LABEL(printf)
1301: #ifdef DDB
1302: int $3
1303: #endif /* DDB */
1304: movl $IPL_NONE,CPUVAR(ILEVEL)
1305: jmp 2b
1306: 4: .asciz "WARNING: SPL NOT LOWERED ON SYSCALL EXIT\n"
1307: #endif /* DIAGNOSTIC */
1308:
1309:
1310: ENTRY(pagezero)
1311: movq $-PAGE_SIZE,%rdx
1312: subq %rdx,%rdi
1313: xorq %rax,%rax
1314: 1:
1315: movnti %rax,(%rdi,%rdx)
1316: movnti %rax,8(%rdi,%rdx)
1317: movnti %rax,16(%rdi,%rdx)
1318: movnti %rax,24(%rdi,%rdx)
1319: addq $32,%rdx
1320: jne 1b
1321: sfence
1322: ret
CVSweb