Annotation of sys/arch/i386/i386/locore.s, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: locore.s,v 1.114 2007/05/29 23:02:02 tom Exp $ */
2: /* $NetBSD: locore.s,v 1.145 1996/05/03 19:41:19 christos Exp $ */
3:
4: /*-
5: * Copyright (c) 1993, 1994, 1995 Charles M. Hannum. All rights reserved.
6: * Copyright (c) 1990 The Regents of the University of California.
7: * All rights reserved.
8: *
9: * This code is derived from software contributed to Berkeley by
10: * William Jolitz.
11: *
12: * Redistribution and use in source and binary forms, with or without
13: * modification, are permitted provided that the following conditions
14: * are met:
15: * 1. Redistributions of source code must retain the above copyright
16: * notice, this list of conditions and the following disclaimer.
17: * 2. Redistributions in binary form must reproduce the above copyright
18: * notice, this list of conditions and the following disclaimer in the
19: * documentation and/or other materials provided with the distribution.
20: * 3. Neither the name of the University nor the names of its contributors
21: * may be used to endorse or promote products derived from this software
22: * without specific prior written permission.
23: *
24: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34: * SUCH DAMAGE.
35: *
36: * @(#)locore.s 7.3 (Berkeley) 5/13/91
37: */
38:
39: #include "npx.h"
40: #include "assym.h"
41: #include "apm.h"
42: #include "lapic.h"
43: #include "ioapic.h"
44: #include "pctr.h"
45: #include "ksyms.h"
46:
47: #include <sys/errno.h>
48: #include <sys/syscall.h>
49: #ifdef COMPAT_SVR4
50: #include <compat/svr4/svr4_syscall.h>
51: #endif
52: #ifdef COMPAT_LINUX
53: #include <compat/linux/linux_syscall.h>
54: #endif
55: #ifdef COMPAT_FREEBSD
56: #include <compat/freebsd/freebsd_syscall.h>
57: #endif
58:
59: #include <machine/cputypes.h>
60: #include <machine/param.h>
61: #include <machine/pte.h>
62: #include <machine/segments.h>
63: #include <machine/specialreg.h>
64: #include <machine/trap.h>
65:
66: #include <dev/isa/isareg.h>
67:
68: #if NLAPIC > 0
69: #include <machine/i82489reg.h>
70: #endif
71:
72: /*
73: * override user-land alignment before including asm.h
74: */
75:
76: #define ALIGN_DATA .align 4
77: #define ALIGN_TEXT .align 4,0x90 /* 4-byte boundaries, NOP-filled */
78: #define SUPERALIGN_TEXT .align 16,0x90 /* 16-byte boundaries better for 486 */
79: #define _ALIGN_TEXT ALIGN_TEXT
80: #include <machine/asm.h>
81:
82: #define CPL _C_LABEL(lapic_tpr)
83:
84: #define GET_CURPCB(reg) \
85: movl CPUVAR(CURPCB), reg
86:
87: #define SET_CURPCB(reg) \
88: movl reg, CPUVAR(CURPCB)
89:
90: #define CHECK_ASTPENDING(treg) \
91: movl CPUVAR(CURPROC),treg ; \
92: cmpl $0, treg ; \
93: je 1f ; \
94: cmpl $0,P_MD_ASTPENDING(treg) ; \
95: 1:
96:
97: #define CLEAR_ASTPENDING(cpreg) \
98: movl $0,P_MD_ASTPENDING(cpreg)
99:
100: /*
101: * These are used on interrupt or trap entry or exit.
102: */
103: #define INTRENTRY \
104: pushl %eax ; \
105: pushl %ecx ; \
106: pushl %edx ; \
107: pushl %ebx ; \
108: pushl %ebp ; \
109: pushl %esi ; \
110: pushl %edi ; \
111: pushl %ds ; \
112: pushl %es ; \
113: pushl %gs ; \
114: movl $GSEL(GDATA_SEL, SEL_KPL),%eax ; \
115: movw %ax,%ds ; \
116: movw %ax,%es ; \
117: movw %ax,%gs ; \
118: pushl %fs ; \
119: movl $GSEL(GCPU_SEL, SEL_KPL),%eax ; \
120: movw %ax,%fs
121: #define INTRFASTEXIT \
122: popl %fs ; \
123: popl %gs ; \
124: popl %es ; \
125: popl %ds ; \
126: popl %edi ; \
127: popl %esi ; \
128: popl %ebp ; \
129: popl %ebx ; \
130: popl %edx ; \
131: popl %ecx ; \
132: popl %eax ; \
133: sti ; \
134: addl $8,%esp ; \
135: iret
136:
137:
138: /*
139: * PTmap is recursive pagemap at top of virtual address space.
140: * Within PTmap, the page directory can be found (third indirection).
141: */
142: .globl _C_LABEL(PTmap), _C_LABEL(PTD), _C_LABEL(PTDpde)
143: .set _C_LABEL(PTmap), (PDSLOT_PTE << PDSHIFT)
144: .set _C_LABEL(PTD), (_C_LABEL(PTmap) + PDSLOT_PTE * NBPG)
145: .set _C_LABEL(PTDpde), (_C_LABEL(PTD) + PDSLOT_PTE * 4) # XXX 4 == sizeof pde
146:
147: /*
148: * APTmap, APTD is the alternate recursive pagemap.
149: * It's used when modifying another process's page tables.
150: */
151: .globl _C_LABEL(APTmap), _C_LABEL(APTD), _C_LABEL(APTDpde)
152: .set _C_LABEL(APTmap), (PDSLOT_APTE << PDSHIFT)
153: .set _C_LABEL(APTD), (_C_LABEL(APTmap) + PDSLOT_APTE * NBPG)
154: # XXX 4 == sizeof pde
155: .set _C_LABEL(APTDpde), (_C_LABEL(PTD) + PDSLOT_APTE * 4)
156:
157: /*
158: * Initialization
159: */
160: .data
161:
162: .globl _C_LABEL(cpu), _C_LABEL(cpu_id), _C_LABEL(cpu_vendor)
163: .globl _C_LABEL(cpu_brandstr)
164: .globl _C_LABEL(cpuid_level)
165: .globl _C_LABEL(cpu_miscinfo)
166: .globl _C_LABEL(cpu_feature), _C_LABEL(cpu_ecxfeature)
167: .globl _C_LABEL(cpu_cache_eax), _C_LABEL(cpu_cache_ebx)
168: .globl _C_LABEL(cpu_cache_ecx), _C_LABEL(cpu_cache_edx)
169: .globl _C_LABEL(cold), _C_LABEL(cnvmem), _C_LABEL(extmem)
170: .globl _C_LABEL(esym)
171: .globl _C_LABEL(boothowto), _C_LABEL(bootdev), _C_LABEL(atdevbase)
172: .globl _C_LABEL(proc0paddr), _C_LABEL(PTDpaddr), _C_LABEL(PTDsize)
173: .globl _C_LABEL(gdt)
174: .globl _C_LABEL(bootapiver), _C_LABEL(bootargc), _C_LABEL(bootargv)
175: .globl _C_LABEL(lapic_tpr)
176:
177: #if NLAPIC > 0
178: #ifdef __ELF__
179: .align NBPG
180: #else
181: .align 12
182: #endif
183: .globl _C_LABEL(local_apic), _C_LABEL(lapic_id)
184: _C_LABEL(local_apic):
185: .space LAPIC_ID
186: _C_LABEL(lapic_id):
187: .long 0x00000000
188: .space LAPIC_TPRI-(LAPIC_ID+4)
189: _C_LABEL(lapic_tpr):
190: .space LAPIC_PPRI-LAPIC_TPRI
191: _C_LABEL(lapic_ppr):
192: .space LAPIC_ISR-LAPIC_PPRI
193: _C_LABEL(lapic_isr):
194: .space NBPG-LAPIC_ISR
195: #else
196: _C_LABEL(lapic_tpr):
197: .long 0
198: #endif
199:
200: _C_LABEL(cpu): .long 0 # are we 386, 386sx, 486, 586 or 686
201: _C_LABEL(cpu_id): .long 0 # saved from 'cpuid' instruction
202: _C_LABEL(cpu_miscinfo): .long 0 # misc info (apic/brand id) from 'cpuid'
203: _C_LABEL(cpu_feature): .long 0 # feature flags from 'cpuid' instruction
204: _C_LABEL(cpu_ecxfeature):.long 0 # extended feature flags from 'cpuid'
205: _C_LABEL(cpuid_level): .long -1 # max. lvl accepted by 'cpuid' insn
206: _C_LABEL(cpu_cache_eax):.long 0
207: _C_LABEL(cpu_cache_ebx):.long 0
208: _C_LABEL(cpu_cache_ecx):.long 0
209: _C_LABEL(cpu_cache_edx):.long 0
210: _C_LABEL(cpu_vendor): .space 16 # vendor string returned by 'cpuid' instruction
211: _C_LABEL(cpu_brandstr): .space 48 # brand string returned by 'cpuid'
212: _C_LABEL(cold): .long 1 # cold till we are not
213: _C_LABEL(esym): .long 0 # ptr to end of syms
214: _C_LABEL(cnvmem): .long 0 # conventional memory size
215: _C_LABEL(extmem): .long 0 # extended memory size
216: _C_LABEL(atdevbase): .long 0 # location of start of iomem in virtual
217: _C_LABEL(bootapiver): .long 0 # /boot API version
218: _C_LABEL(bootargc): .long 0 # /boot argc
219: _C_LABEL(bootargv): .long 0 # /boot argv
220: _C_LABEL(bootdev): .long 0 # device we booted from
221: _C_LABEL(proc0paddr): .long 0
222: _C_LABEL(PTDpaddr): .long 0 # paddr of PTD, for libkvm
223: _C_LABEL(PTDsize): .long NBPG # size of PTD, for libkvm
224:
225: .space 512
226: tmpstk:
227:
228:
229: #define RELOC(x) ((x) - KERNBASE)
230:
231: .text
232: .globl start
233: .globl _C_LABEL(kernel_text)
234: _C_LABEL(kernel_text) = KERNTEXTOFF
235: start: movw $0x1234,0x472 # warm boot
236:
237: /*
238: * Load parameters from stack (howto, bootdev, unit, bootapiver, esym).
239: * note: (%esp) is return address of boot
240: * (If we want to hold onto /boot, it's physical %esp up to _end.)
241: */
242: movl 4(%esp),%eax
243: movl %eax,RELOC(_C_LABEL(boothowto))
244: movl 8(%esp),%eax
245: movl %eax,RELOC(_C_LABEL(bootdev))
246: movl 16(%esp),%eax
247: testl %eax,%eax
248: jz 1f
249: addl $KERNBASE,%eax
250: 1: movl %eax,RELOC(_C_LABEL(esym))
251:
252: movl 12(%esp),%eax
253: movl %eax,RELOC(_C_LABEL(bootapiver))
254: movl 28(%esp), %eax
255: movl %eax, RELOC(_C_LABEL(bootargc))
256: movl 32(%esp), %eax
257: movl %eax, RELOC(_C_LABEL(bootargv))
258:
259: /* First, reset the PSL. */
260: pushl $PSL_MBO
261: popfl
262:
263: /* Clear segment registers; null until proc0 setup */
264: xorl %eax,%eax
265: movw %ax,%fs
266: movw %ax,%gs
267:
268: /* Find out our CPU type. */
269:
270: try386: /* Try to toggle alignment check flag; does not exist on 386. */
271: pushfl
272: popl %eax
273: movl %eax,%ecx
274: orl $PSL_AC,%eax
275: pushl %eax
276: popfl
277: pushfl
278: popl %eax
279: xorl %ecx,%eax
280: andl $PSL_AC,%eax
281: pushl %ecx
282: popfl
283:
284: testl %eax,%eax
285: jnz try486
286:
287: /*
288: * Try the test of a NexGen CPU -- ZF will not change on a DIV
289: * instruction on a NexGen, it will on an i386. Documented in
290: * Nx586 Processor Recognition Application Note, NexGen, Inc.
291: */
292: movl $0x5555,%eax
293: xorl %edx,%edx
294: movl $2,%ecx
295: divl %ecx
296: jnz is386
297:
298: isnx586:
299: /*
300: * Don't try cpuid, as Nx586s reportedly don't support the
301: * PSL_ID bit.
302: */
303: movl $CPU_NX586,RELOC(_C_LABEL(cpu))
304: jmp 2f
305:
306: is386:
307: movl $CPU_386,RELOC(_C_LABEL(cpu))
308: jmp 2f
309:
310: try486: /* Try to toggle identification flag; does not exist on early 486s. */
311: pushfl
312: popl %eax
313: movl %eax,%ecx
314: xorl $PSL_ID,%eax
315: pushl %eax
316: popfl
317: pushfl
318: popl %eax
319: xorl %ecx,%eax
320: andl $PSL_ID,%eax
321: pushl %ecx
322: popfl
323:
324: testl %eax,%eax
325: jnz try586
326: is486: movl $CPU_486,RELOC(_C_LABEL(cpu))
327:
328: /*
329: * Check Cyrix CPU
330: * Cyrix CPUs do not change the undefined flags following
331: * execution of the divide instruction which divides 5 by 2.
332: *
333: * Note: CPUID is enabled on M2, so it passes another way.
334: */
335: pushfl
336: movl $0x5555, %eax
337: xorl %edx, %edx
338: movl $2, %ecx
339: clc
340: divl %ecx
341: jnc trycyrix486
342: popfl
343: jmp 2f
344: trycyrix486:
345: movl $CPU_6x86,RELOC(_C_LABEL(cpu)) # set CPU type
346: /*
347: * Check for Cyrix 486 CPU by seeing if the flags change during a
348: * divide. This is documented in the Cx486SLC/e SMM Programmer's
349: * Guide.
350: */
351: xorl %edx,%edx
352: cmpl %edx,%edx # set flags to known state
353: pushfl
354: popl %ecx # store flags in ecx
355: movl $-1,%eax
356: movl $4,%ebx
357: divl %ebx # do a long division
358: pushfl
359: popl %eax
360: xorl %ecx,%eax # are the flags different?
361: testl $0x8d5,%eax # only check C|PF|AF|Z|N|V
362: jne 2f # yes; must not be Cyrix CPU
363: movl $CPU_486DLC,RELOC(_C_LABEL(cpu)) # set CPU type
364:
365: #ifndef CYRIX_CACHE_WORKS
366: /* Disable caching of the ISA hole only. */
367: invd
368: movb $CCR0,%al # Configuration Register index (CCR0)
369: outb %al,$0x22
370: inb $0x23,%al
371: orb $(CCR0_NC1|CCR0_BARB),%al
372: movb %al,%ah
373: movb $CCR0,%al
374: outb %al,$0x22
375: movb %ah,%al
376: outb %al,$0x23
377: invd
378: #else /* CYRIX_CACHE_WORKS */
379: /* Set cache parameters */
380: invd # Start with guaranteed clean cache
381: movb $CCR0,%al # Configuration Register index (CCR0)
382: outb %al,$0x22
383: inb $0x23,%al
384: andb $~CCR0_NC0,%al
385: #ifndef CYRIX_CACHE_REALLY_WORKS
386: orb $(CCR0_NC1|CCR0_BARB),%al
387: #else
388: orb $CCR0_NC1,%al
389: #endif
390: movb %al,%ah
391: movb $CCR0,%al
392: outb %al,$0x22
393: movb %ah,%al
394: outb %al,$0x23
395: /* clear non-cacheable region 1 */
396: movb $(NCR1+2),%al
397: outb %al,$0x22
398: movb $NCR_SIZE_0K,%al
399: outb %al,$0x23
400: /* clear non-cacheable region 2 */
401: movb $(NCR2+2),%al
402: outb %al,$0x22
403: movb $NCR_SIZE_0K,%al
404: outb %al,$0x23
405: /* clear non-cacheable region 3 */
406: movb $(NCR3+2),%al
407: outb %al,$0x22
408: movb $NCR_SIZE_0K,%al
409: outb %al,$0x23
410: /* clear non-cacheable region 4 */
411: movb $(NCR4+2),%al
412: outb %al,$0x22
413: movb $NCR_SIZE_0K,%al
414: outb %al,$0x23
415: /* enable caching in CR0 */
416: movl %cr0,%eax
417: andl $~(CR0_CD|CR0_NW),%eax
418: movl %eax,%cr0
419: invd
420: #endif /* CYRIX_CACHE_WORKS */
421:
422: jmp 2f
423:
424: try586: /* Use the `cpuid' instruction. */
425: xorl %eax,%eax
426: cpuid
427: movl %eax,RELOC(_C_LABEL(cpuid_level))
428: movl %ebx,RELOC(_C_LABEL(cpu_vendor)) # store vendor string
429: movl %edx,RELOC(_C_LABEL(cpu_vendor))+4
430: movl %ecx,RELOC(_C_LABEL(cpu_vendor))+8
431: movl $0, RELOC(_C_LABEL(cpu_vendor))+12
432:
433: movl $1,%eax
434: cpuid
435: movl %eax,RELOC(_C_LABEL(cpu_id)) # store cpu_id and features
436: movl %ebx,RELOC(_C_LABEL(cpu_miscinfo))
437: movl %edx,RELOC(_C_LABEL(cpu_feature))
438: movl %ecx,RELOC(_C_LABEL(cpu_ecxfeature))
439:
440: movl RELOC(_C_LABEL(cpuid_level)),%eax
441: cmp $2,%eax
442: jl 1f
443:
444: movl $2,%eax
445: cpuid
446: /*
447: cmp $1,%al
448: jne 1f
449: */
450:
451: movl %eax,RELOC(_C_LABEL(cpu_cache_eax))
452: movl %ebx,RELOC(_C_LABEL(cpu_cache_ebx))
453: movl %ecx,RELOC(_C_LABEL(cpu_cache_ecx))
454: movl %edx,RELOC(_C_LABEL(cpu_cache_edx))
455:
456: 1:
457: /* Check if brand identification string is supported */
458: movl $0x80000000,%eax
459: cpuid
460: cmpl $0x80000000,%eax
461: jbe 2f
462: movl $0x80000002,%eax
463: cpuid
464: movl %eax,RELOC(_C_LABEL(cpu_brandstr))
465: movl %ebx,RELOC(_C_LABEL(cpu_brandstr))+4
466: movl %ecx,RELOC(_C_LABEL(cpu_brandstr))+8
467: movl %edx,RELOC(_C_LABEL(cpu_brandstr))+12
468: movl $0x80000003,%eax
469: cpuid
470: movl %eax,RELOC(_C_LABEL(cpu_brandstr))+16
471: movl %ebx,RELOC(_C_LABEL(cpu_brandstr))+20
472: movl %ecx,RELOC(_C_LABEL(cpu_brandstr))+24
473: movl %edx,RELOC(_C_LABEL(cpu_brandstr))+28
474: movl $0x80000004,%eax
475: cpuid
476: movl %eax,RELOC(_C_LABEL(cpu_brandstr))+32
477: movl %ebx,RELOC(_C_LABEL(cpu_brandstr))+36
478: movl %ecx,RELOC(_C_LABEL(cpu_brandstr))+40
479: andl $0x00ffffff,%edx /* Shouldn't be necessary */
480: movl %edx,RELOC(_C_LABEL(cpu_brandstr))+44
481:
482: 2:
483: /*
484: * Finished with old stack; load new %esp now instead of later so we
485: * can trace this code without having to worry about the trace trap
486: * clobbering the memory test or the zeroing of the bss+bootstrap page
487: * tables.
488: *
489: * The boot program should check:
490: * text+data <= &stack_variable - more_space_for_stack
491: * text+data+bss+pad+space_for_page_tables <= end_of_memory
492: * Oops, the gdt is in the carcass of the boot program so clearing
493: * the rest of memory is still not possible.
494: */
495: movl $RELOC(tmpstk),%esp # bootstrap stack end location
496:
497: /*
498: * Virtual address space of kernel:
499: *
500: * text | data | bss | [syms] | proc0 stack | page dir | Sysmap
501: * 0 1 2 3
502: */
503: #define PROC0STACK ((0) * NBPG)
504: #define PROC0PDIR (( UPAGES) * NBPG)
505: #define SYSMAP ((1+UPAGES) * NBPG)
506: #define TABLESIZE ((1+UPAGES) * NBPG) /* + _C_LABEL(nkpde) * NBPG */
507:
508: /* Find end of kernel image. */
509: movl $RELOC(_C_LABEL(end)),%edi
510: #if (defined(DDB) || NKSYMS > 0) && !defined(SYMTAB_SPACE)
511: /* Save the symbols (if loaded). */
512: movl RELOC(_C_LABEL(esym)),%eax
513: testl %eax,%eax
514: jz 1f
515: subl $KERNBASE,%eax
516: movl %eax,%edi
517: 1:
518: #endif
519:
520: /* Calculate where to start the bootstrap tables. */
521: movl %edi,%esi # edi = esym ? esym : end
522: addl $PGOFSET, %esi # page align up
523: andl $~PGOFSET, %esi
524:
525: /*
526: * Calculate the size of the kernel page table directory, and
527: * how many entries it will have.
528: */
529: movl RELOC(_C_LABEL(nkpde)),%ecx # get nkpde
530: cmpl $NKPTP_MIN,%ecx # larger than min?
531: jge 1f
532: movl $NKPTP_MIN,%ecx # set at min
533: jmp 2f
534: 1: cmpl $NKPTP_MAX,%ecx # larger than max?
535: jle 2f
536: movl $NKPTP_MAX,%ecx
537: 2: movl %ecx,RELOC(_C_LABEL(nkpde)) # and store it back
538:
539: /* Clear memory for bootstrap tables. */
540: shll $PGSHIFT,%ecx
541: addl $TABLESIZE,%ecx
542: addl %esi,%ecx # end of tables
543: subl %edi,%ecx # size of tables
544: shrl $2,%ecx
545: xorl %eax, %eax
546: cld
547: rep
548: stosl
549:
550: /*
551: * fillkpt
552: * eax = pte (page frame | control | status)
553: * ebx = page table address
554: * ecx = number of pages to map
555: */
556: #define fillkpt \
557: 1: movl %eax,(%ebx) ; \
558: addl $NBPG,%eax ; /* increment physical address */ \
559: addl $4,%ebx ; /* next pte */ \
560: loop 1b ;
561:
562: /*
563: * Build initial page tables.
564: */
565: /* Calculate end of text segment, rounded to a page. */
566: leal (RELOC(_C_LABEL(etext))+PGOFSET),%edx
567: andl $~PGOFSET,%edx
568:
569: /* Skip over the first 2MB. */
570: movl $RELOC(KERNTEXTOFF),%eax
571: movl %eax,%ecx
572: shrl $PGSHIFT,%ecx
573: leal (SYSMAP)(%esi,%ecx,4),%ebx
574:
575: /* Map the kernel text read-only. */
576: movl %edx,%ecx
577: subl %eax,%ecx
578: shrl $PGSHIFT,%ecx
579: #ifdef DDB
580: orl $(PG_V|PG_KW),%eax
581: #else
582: orl $(PG_V|PG_KR),%eax
583: #endif
584: fillkpt
585:
586: /* Map the data, BSS, and bootstrap tables read-write. */
587: leal (PG_V|PG_KW)(%edx),%eax
588: movl RELOC(_C_LABEL(nkpde)),%ecx
589: shll $PGSHIFT,%ecx
590: addl $TABLESIZE,%ecx
591: addl %esi,%ecx # end of tables
592: subl %edx,%ecx # subtract end of text
593: shrl $PGSHIFT,%ecx
594: fillkpt
595:
596: /* Map ISA I/O memory. */
597: movl $(IOM_BEGIN|PG_V|PG_KW/*|PG_N*/),%eax # having these bits set
598: movl $(IOM_SIZE>>PGSHIFT),%ecx # for this many pte s,
599: fillkpt
600:
601: /*
602: * Construct a page table directory.
603: */
604: movl RELOC(_C_LABEL(nkpde)),%ecx # count of pde s,
605: leal (PROC0PDIR+0*4)(%esi),%ebx # where temp maps!
606: leal (SYSMAP+PG_V|PG_KW)(%esi),%eax # pte for KPT in proc 0
607: fillkpt
608:
609: /*
610: * Map kernel PDEs: this is the real mapping used
611: * after the temp mapping outlives its usefulness.
612: */
613: movl RELOC(_C_LABEL(nkpde)),%ecx # count of pde s,
614: leal (PROC0PDIR+PDSLOT_KERN*4)(%esi),%ebx # map them high
615: leal (SYSMAP+PG_V|PG_KW)(%esi),%eax # pte for KPT in proc 0
616: fillkpt
617:
618: /* Install a PDE recursively mapping page directory as a page table! */
619: leal (PROC0PDIR+PG_V|PG_KW)(%esi),%eax # pte for ptd
620: movl %eax,(PROC0PDIR+PDSLOT_PTE*4)(%esi) # recursive PD slot
621:
622: /* Save phys. addr of PTD, for libkvm. */
623: leal (PROC0PDIR)(%esi),%eax # phys address of ptd in proc 0
624: movl %eax,RELOC(_C_LABEL(PTDpaddr))
625:
626: /* Load base of page directory and enable mapping. */
627: movl %eax,%cr3 # load ptd addr into mmu
628: movl %cr0,%eax # get control word
629: # enable paging & NPX emulation
630: orl $(CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_EM|CR0_MP),%eax
631: movl %eax,%cr0 # and let's page NOW!
632:
633: pushl $begin # jump to high mem
634: ret
635:
636: begin:
637: /* Now running relocated at KERNBASE. Remove double mapping. */
638: movl _C_LABEL(nkpde),%ecx # for this many pde s,
639: leal (PROC0PDIR+0*4)(%esi),%ebx # which is where temp maps!
640: addl $(KERNBASE), %ebx # now use relocated address
641: 1: movl $0,(%ebx)
642: addl $4,%ebx # next pde
643: loop 1b
644:
645: /* Relocate atdevbase. */
646: movl _C_LABEL(nkpde),%edx
647: shll $PGSHIFT,%edx
648: addl $(TABLESIZE+KERNBASE),%edx
649: addl %esi,%edx
650: movl %edx,_C_LABEL(atdevbase)
651:
652: /* Set up bootstrap stack. */
653: leal (PROC0STACK+KERNBASE)(%esi),%eax
654: movl %eax,_C_LABEL(proc0paddr)
655: leal (USPACE-FRAMESIZE)(%eax),%esp
656: leal (PROC0PDIR)(%esi),%ebx # phys address of ptd in proc 0
657: movl %ebx,PCB_CR3(%eax) # pcb->pcb_cr3
658: xorl %ebp,%ebp # mark end of frames
659:
660: movl _C_LABEL(nkpde),%eax
661: shll $PGSHIFT,%eax
662: addl $TABLESIZE,%eax
663: addl %esi,%eax # skip past stack and page tables
664: pushl %eax
665: call _C_LABEL(init386) # wire 386 chip for unix operation
666: addl $4,%esp
667:
668: call _C_LABEL(main)
669:
670: NENTRY(proc_trampoline)
671: #ifdef MULTIPROCESSOR
672: call _C_LABEL(proc_trampoline_mp)
673: #endif
674: movl $IPL_NONE,CPL
675: pushl %ebx
676: call *%esi
677: addl $4,%esp
678: INTRFASTEXIT
679: /* NOTREACHED */
680:
681: /*****************************************************************************/
682:
683: /*
684: * Signal trampoline; copied to top of user stack.
685: */
686: NENTRY(sigcode)
687: movl SIGF_FPSTATE(%esp),%esi # FPU state area if need saving
688: testl %esi,%esi
689: jz 1f
690: fnsave (%esi)
691: 1: call *SIGF_HANDLER(%esp)
692: testl %esi,%esi
693: jz 2f
694: frstor (%esi)
695: jmp 2f
696:
697: .globl _C_LABEL(sigcode_xmm)
698: _C_LABEL(sigcode_xmm):
699: movl SIGF_FPSTATE(%esp),%esi # FPU state area if need saving
700: testl %esi,%esi
701: jz 1f
702: fxsave (%esi)
703: fninit
704: 1: call *SIGF_HANDLER(%esp)
705: testl %esi,%esi
706: jz 2f
707: fxrstor (%esi)
708:
709: 2: leal SIGF_SC(%esp),%eax # scp (the call may have clobbered the
710: # copy at SIGF_SCP(%esp))
711: pushl %eax
712: pushl %eax # junk to fake return address
713: movl $SYS_sigreturn,%eax
714: int $0x80 # enter kernel with args on stack
715: movl $SYS_exit,%eax
716: int $0x80 # exit if sigreturn fails
717: .globl _C_LABEL(esigcode)
718: _C_LABEL(esigcode):
719:
720: /*****************************************************************************/
721:
722: #ifdef COMPAT_SVR4
723: NENTRY(svr4_sigcode)
724: call *SVR4_SIGF_HANDLER(%esp)
725: leal SVR4_SIGF_UC(%esp),%eax # ucp (the call may have clobbered the
726: # copy at SIGF_UCP(%esp))
727: pushl %eax
728: pushl $1 # setcontext(p) == syscontext(1, p)
729: pushl %eax # junk to fake return address
730: movl $SVR4_SYS_context,%eax
731: int $0x80 # enter kernel with args on stack
732: movl $SVR4_SYS_exit,%eax
733: int $0x80 # exit if sigreturn fails
734: .globl _C_LABEL(svr4_esigcode)
735: _C_LABEL(svr4_esigcode):
736: #endif
737:
738: /*****************************************************************************/
739:
740: #ifdef COMPAT_LINUX
741: /*
742: * Signal trampoline; copied to top of user stack.
743: */
744: NENTRY(linux_sigcode)
745: call *LINUX_SIGF_HANDLER(%esp)
746: leal LINUX_SIGF_SC(%esp),%ebx # scp (the call may have clobbered the
747: # copy at SIGF_SCP(%esp))
748: pushl %eax # junk to fake return address
749: movl $LINUX_SYS_sigreturn,%eax
750: int $0x80 # enter kernel with args on stack
751: movl $LINUX_SYS_exit,%eax
752: int $0x80 # exit if sigreturn fails
753: .globl _C_LABEL(linux_esigcode)
754: _C_LABEL(linux_esigcode):
755: #endif
756:
757: /*****************************************************************************/
758:
759: #ifdef COMPAT_FREEBSD
760: /*
761: * Signal trampoline; copied to top of user stack.
762: */
763: NENTRY(freebsd_sigcode)
764: call *FREEBSD_SIGF_HANDLER(%esp)
765: leal FREEBSD_SIGF_SC(%esp),%eax # scp (the call may have clobbered
766: # the copy at SIGF_SCP(%esp))
767: pushl %eax
768: pushl %eax # junk to fake return address
769: movl $FREEBSD_SYS_sigreturn,%eax
770: int $0x80 # enter kernel with args on stack
771: movl $FREEBSD_SYS_exit,%eax
772: int $0x80 # exit if sigreturn fails
773: .globl _C_LABEL(freebsd_esigcode)
774: _C_LABEL(freebsd_esigcode):
775: #endif
776:
777: /*****************************************************************************/
778:
779: /*
780: * The following primitives are used to fill and copy regions of memory.
781: */
782:
783: /*
784: * fillw(short pattern, caddr_t addr, size_t len);
785: * Write len copies of pattern at addr.
786: */
787: ENTRY(fillw)
788: pushl %edi
789: movl 8(%esp),%eax
790: movl 12(%esp),%edi
791: movw %ax,%cx
792: rorl $16,%eax
793: movw %cx,%ax
794: cld
795: movl 16(%esp),%ecx
796: shrl %ecx # do longwords
797: rep
798: stosl
799: movl 16(%esp),%ecx
800: andl $1,%ecx # do remainder
801: rep
802: stosw
803: popl %edi
804: ret
805:
806:
807: /* Frame pointer reserve on stack. */
808: #ifdef DDB
809: #define FPADD 4
810: #else
811: #define FPADD 0
812: #endif
813:
814: /*
815: * kcopy(caddr_t from, caddr_t to, size_t len);
816: * Copy len bytes, abort on fault.
817: */
818: ENTRY(kcopy)
819: #ifdef DDB
820: pushl %ebp
821: movl %esp,%ebp
822: #endif
823: pushl %esi
824: pushl %edi
825: GET_CURPCB(%eax) # load curpcb into eax and set on-fault
826: pushl PCB_ONFAULT(%eax)
827: movl $_C_LABEL(copy_fault), PCB_ONFAULT(%eax)
828:
829: movl 16+FPADD(%esp),%esi
830: movl 20+FPADD(%esp),%edi
831: movl 24+FPADD(%esp),%ecx
832: movl %edi,%eax
833: subl %esi,%eax
834: cmpl %ecx,%eax # overlapping?
835: jb 1f
836: cld # nope, copy forward
837: shrl $2,%ecx # copy by 32-bit words
838: rep
839: movsl
840: movl 24+FPADD(%esp),%ecx
841: andl $3,%ecx # any bytes left?
842: rep
843: movsb
844:
845: GET_CURPCB(%edx) # XXX save curpcb?
846: popl PCB_ONFAULT(%edx)
847: popl %edi
848: popl %esi
849: xorl %eax,%eax
850: #ifdef DDB
851: leave
852: #endif
853: ret
854:
855: ALIGN_TEXT
856: 1: addl %ecx,%edi # copy backward
857: addl %ecx,%esi
858: std
859: andl $3,%ecx # any fractional bytes?
860: decl %edi
861: decl %esi
862: rep
863: movsb
864: movl 24+FPADD(%esp),%ecx # copy remainder by 32-bit words
865: shrl $2,%ecx
866: subl $3,%esi
867: subl $3,%edi
868: rep
869: movsl
870: cld
871:
872: GET_CURPCB(%edx)
873: popl PCB_ONFAULT(%edx)
874: popl %edi
875: popl %esi
876: xorl %eax,%eax
877: #ifdef DDB
878: leave
879: #endif
880: ret
881:
882: /*
883: * bcopy(caddr_t from, caddr_t to, size_t len);
884: * Copy len bytes.
885: */
886: ALTENTRY(ovbcopy)
887: ENTRY(bcopy)
888: pushl %esi
889: pushl %edi
890: movl 12(%esp),%esi
891: movl 16(%esp),%edi
892: movl 20(%esp),%ecx
893: movl %edi,%eax
894: subl %esi,%eax
895: cmpl %ecx,%eax # overlapping?
896: jb 1f
897: cld # nope, copy forward
898: shrl $2,%ecx # copy by 32-bit words
899: rep
900: movsl
901: movl 20(%esp),%ecx
902: andl $3,%ecx # any bytes left?
903: rep
904: movsb
905: popl %edi
906: popl %esi
907: ret
908:
909: ALIGN_TEXT
910: 1: addl %ecx,%edi # copy backward
911: addl %ecx,%esi
912: std
913: andl $3,%ecx # any fractional bytes?
914: decl %edi
915: decl %esi
916: rep
917: movsb
918: movl 20(%esp),%ecx # copy remainder by 32-bit words
919: shrl $2,%ecx
920: subl $3,%esi
921: subl $3,%edi
922: rep
923: movsl
924: popl %edi
925: popl %esi
926: cld
927: ret
928:
929: /*
930: * Emulate memcpy() by swapping the first two arguments and calling bcopy()
931: */
932: ENTRY(memcpy)
933: movl 4(%esp),%ecx
934: xchg 8(%esp),%ecx
935: movl %ecx,4(%esp)
936: jmp _C_LABEL(bcopy)
937:
938: /*****************************************************************************/
939:
940: /*
941: * The following primitives are used to copy data in and out of the user's
942: * address space.
943: */
944:
945: /*
946: * copyout(caddr_t from, caddr_t to, size_t len);
947: * Copy len bytes into the user's address space.
948: */
949: ENTRY(copyout)
950: #ifdef DDB
951: pushl %ebp
952: movl %esp,%ebp
953: #endif
954: pushl %esi
955: pushl %edi
956: pushl $0
957:
958: movl 16+FPADD(%esp),%esi
959: movl 20+FPADD(%esp),%edi
960: movl 24+FPADD(%esp),%eax
961:
962: /*
963: * We check that the end of the destination buffer is not past the end
964: * of the user's address space. If it's not, then we only need to
965: * check that each page is writable. The 486 will do this for us; the
966: * 386 will not. (We assume that pages in user space that are not
967: * writable by the user are not writable by the kernel either.)
968: */
969: movl %edi,%edx
970: addl %eax,%edx
971: jc _C_LABEL(copy_fault)
972: cmpl $VM_MAXUSER_ADDRESS,%edx
973: ja _C_LABEL(copy_fault)
974:
975: 3: GET_CURPCB(%edx)
976: movl $_C_LABEL(copy_fault),PCB_ONFAULT(%edx)
977:
978: /* bcopy(%esi, %edi, %eax); */
979: cld
980: movl %eax,%ecx
981: shrl $2,%ecx
982: rep
983: movsl
984: movl %eax,%ecx
985: andl $3,%ecx
986: rep
987: movsb
988:
989: popl PCB_ONFAULT(%edx)
990: popl %edi
991: popl %esi
992: xorl %eax,%eax
993: #ifdef DDB
994: leave
995: #endif
996: ret
997:
998: /*
999: * copyin(caddr_t from, caddr_t to, size_t len);
1000: * Copy len bytes from the user's address space.
1001: */
1002: ENTRY(copyin)
1003: #ifdef DDB
1004: pushl %ebp
1005: movl %esp,%ebp
1006: #endif
1007: pushl %esi
1008: pushl %edi
1009: GET_CURPCB(%eax)
1010: pushl $0
1011: movl $_C_LABEL(copy_fault),PCB_ONFAULT(%eax)
1012:
1013: movl 16+FPADD(%esp),%esi
1014: movl 20+FPADD(%esp),%edi
1015: movl 24+FPADD(%esp),%eax
1016:
1017: /*
1018: * We check that the end of the destination buffer is not past the end
1019: * of the user's address space. If it's not, then we only need to
1020: * check that each page is readable, and the CPU will do that for us.
1021: */
1022: movl %esi,%edx
1023: addl %eax,%edx
1024: jc _C_LABEL(copy_fault)
1025: cmpl $VM_MAXUSER_ADDRESS,%edx
1026: ja _C_LABEL(copy_fault)
1027:
1028: 3: /* bcopy(%esi, %edi, %eax); */
1029: cld
1030: movl %eax,%ecx
1031: shrl $2,%ecx
1032: rep
1033: movsl
1034: movb %al,%cl
1035: andb $3,%cl
1036: rep
1037: movsb
1038:
1039: GET_CURPCB(%edx)
1040: popl PCB_ONFAULT(%edx)
1041: popl %edi
1042: popl %esi
1043: xorl %eax,%eax
1044: #ifdef DDB
1045: leave
1046: #endif
1047: ret
1048:
1049: ENTRY(copy_fault)
1050: GET_CURPCB(%edx)
1051: popl PCB_ONFAULT(%edx)
1052: popl %edi
1053: popl %esi
1054: movl $EFAULT,%eax
1055: #ifdef DDB
1056: leave
1057: #endif
1058: ret
1059:
1060: /*
1061: * copyoutstr(caddr_t from, caddr_t to, size_t maxlen, size_t *lencopied);
1062: * Copy a NUL-terminated string, at most maxlen characters long, into the
1063: * user's address space. Return the number of characters copied (including the
1064: * NUL) in *lencopied. If the string is too long, return ENAMETOOLONG; else
1065: * return 0 or EFAULT.
1066: */
1067: ENTRY(copyoutstr)
1068: #ifdef DDB
1069: pushl %ebp
1070: movl %esp,%ebp
1071: #endif
1072: pushl %esi
1073: pushl %edi
1074:
1075: movl 12+FPADD(%esp),%esi # esi = from
1076: movl 16+FPADD(%esp),%edi # edi = to
1077: movl 20+FPADD(%esp),%edx # edx = maxlen
1078:
1079: 5: GET_CURPCB(%eax)
1080: movl $_C_LABEL(copystr_fault),PCB_ONFAULT(%eax)
1081: /*
1082: * Get min(%edx, VM_MAXUSER_ADDRESS-%edi).
1083: */
1084: movl $VM_MAXUSER_ADDRESS,%eax
1085: subl %edi,%eax
1086: jbe _C_LABEL(copystr_fault) # die if CF == 1 || ZF == 1
1087: # i.e. make sure that %edi
1088: # is below VM_MAXUSER_ADDRESS
1089:
1090: cmpl %edx,%eax
1091: jae 1f
1092: movl %eax,%edx
1093: movl %eax,20+FPADD(%esp)
1094:
1095: 1: incl %edx
1096: cld
1097:
1098: 1: decl %edx
1099: jz 2f
1100: lodsb
1101: stosb
1102: testb %al,%al
1103: jnz 1b
1104:
1105: /* Success -- 0 byte reached. */
1106: decl %edx
1107: xorl %eax,%eax
1108: jmp copystr_return
1109:
1110: 2: /* edx is zero -- return EFAULT or ENAMETOOLONG. */
1111: cmpl $VM_MAXUSER_ADDRESS,%edi
1112: jae _C_LABEL(copystr_fault)
1113: movl $ENAMETOOLONG,%eax
1114: jmp copystr_return
1115:
1116: /*
1117: * copyinstr(caddr_t from, caddr_t to, size_t maxlen, size_t *lencopied);
1118: * Copy a NUL-terminated string, at most maxlen characters long, from the
1119: * user's address space. Return the number of characters copied (including the
1120: * NUL) in *lencopied. If the string is too long, return ENAMETOOLONG; else
1121: * return 0 or EFAULT.
1122: */
1123: ENTRY(copyinstr)
1124: #ifdef DDB
1125: pushl %ebp
1126: movl %esp,%ebp
1127: #endif
1128: pushl %esi
1129: pushl %edi
1130: GET_CURPCB(%ecx)
1131: movl $_C_LABEL(copystr_fault),PCB_ONFAULT(%ecx)
1132:
1133: movl 12+FPADD(%esp),%esi # %esi = from
1134: movl 16+FPADD(%esp),%edi # %edi = to
1135: movl 20+FPADD(%esp),%edx # %edx = maxlen
1136:
1137: /*
1138: * Get min(%edx, VM_MAXUSER_ADDRESS-%esi).
1139: */
1140: movl $VM_MAXUSER_ADDRESS,%eax
1141: subl %esi,%eax
1142: jbe _C_LABEL(copystr_fault) # Error if CF == 1 || ZF == 1
1143: # i.e. make sure that %esi
1144: # is below VM_MAXUSER_ADDRESS
1145: cmpl %edx,%eax
1146: jae 1f
1147: movl %eax,%edx
1148: movl %eax,20+FPADD(%esp)
1149:
1150: 1: incl %edx
1151: cld
1152:
1153: 1: decl %edx
1154: jz 2f
1155: lodsb
1156: stosb
1157: testb %al,%al
1158: jnz 1b
1159:
1160: /* Success -- 0 byte reached. */
1161: decl %edx
1162: xorl %eax,%eax
1163: jmp copystr_return
1164:
1165: 2: /* edx is zero -- return EFAULT or ENAMETOOLONG. */
1166: cmpl $VM_MAXUSER_ADDRESS,%esi
1167: jae _C_LABEL(copystr_fault)
1168: movl $ENAMETOOLONG,%eax
1169: jmp copystr_return
1170:
1171: ENTRY(copystr_fault)
1172: movl $EFAULT,%eax
1173:
1174: copystr_return:
1175: /* Set *lencopied and return %eax. */
1176: GET_CURPCB(%ecx)
1177: movl $0,PCB_ONFAULT(%ecx)
1178: movl 20+FPADD(%esp),%ecx
1179: subl %edx,%ecx
1180: movl 24+FPADD(%esp),%edx
1181: testl %edx,%edx
1182: jz 8f
1183: movl %ecx,(%edx)
1184:
1185: 8: popl %edi
1186: popl %esi
1187: #ifdef DDB
1188: leave
1189: #endif
1190: ret
1191:
1192: /*
1193: * copystr(caddr_t from, caddr_t to, size_t maxlen, size_t *lencopied);
1194: * Copy a NUL-terminated string, at most maxlen characters long. Return the
1195: * number of characters copied (including the NUL) in *lencopied. If the
1196: * string is too long, return ENAMETOOLONG; else return 0.
1197: */
1198: ENTRY(copystr)
1199: #ifdef DDB
1200: pushl %ebp
1201: movl %esp,%ebp
1202: #endif
1203: pushl %esi
1204: pushl %edi
1205:
1206: movl 12+FPADD(%esp),%esi # esi = from
1207: movl 16+FPADD(%esp),%edi # edi = to
1208: movl 20+FPADD(%esp),%edx # edx = maxlen
1209: incl %edx
1210: cld
1211:
1212: 1: decl %edx
1213: jz 4f
1214: lodsb
1215: stosb
1216: testb %al,%al
1217: jnz 1b
1218:
1219: /* Success -- 0 byte reached. */
1220: decl %edx
1221: xorl %eax,%eax
1222: jmp 6f
1223:
1224: 4: /* edx is zero -- return ENAMETOOLONG. */
1225: movl $ENAMETOOLONG,%eax
1226:
1227: 6: /* Set *lencopied and return %eax. */
1228: movl 20+FPADD(%esp),%ecx
1229: subl %edx,%ecx
1230: movl 24+FPADD(%esp),%edx
1231: testl %edx,%edx
1232: jz 7f
1233: movl %ecx,(%edx)
1234:
1235: 7: popl %edi
1236: popl %esi
1237: #ifdef DDB
1238: leave
1239: #endif
1240: ret
1241:
1242: /*****************************************************************************/
1243:
1244: /*
1245: * The following is i386-specific nonsense.
1246: */
1247:
1248: /*
1249: * void lgdt(struct region_descriptor *rdp);
1250: * Change the global descriptor table.
1251: */
1252: NENTRY(lgdt)
1253: /* Reload the descriptor table. */
1254: movl 4(%esp),%eax
1255: lgdt (%eax)
1256: /* Flush the prefetch q. */
1257: jmp 1f
1258: nop
1259: 1: /* Reload "stale" selectors. */
1260: movl $GSEL(GDATA_SEL, SEL_KPL),%eax
1261: movw %ax,%ds
1262: movw %ax,%es
1263: movw %ax,%ss
1264: movl $GSEL(GCPU_SEL, SEL_KPL),%eax
1265: movw %ax,%fs
1266: /* Reload code selector by doing intersegment return. */
1267: popl %eax
1268: pushl $GSEL(GCODE_SEL, SEL_KPL)
1269: pushl %eax
1270: lret
1271:
1272: ENTRY(setjmp)
1273: movl 4(%esp),%eax
1274: movl %ebx,(%eax) # save ebx
1275: movl %esp,4(%eax) # save esp
1276: movl %ebp,8(%eax) # save ebp
1277: movl %esi,12(%eax) # save esi
1278: movl %edi,16(%eax) # save edi
1279: movl (%esp),%edx # get rta
1280: movl %edx,20(%eax) # save eip
1281: xorl %eax,%eax # return (0);
1282: ret
1283:
1284: ENTRY(longjmp)
1285: movl 4(%esp),%eax
1286: movl (%eax),%ebx # restore ebx
1287: movl 4(%eax),%esp # restore esp
1288: movl 8(%eax),%ebp # restore ebp
1289: movl 12(%eax),%esi # restore esi
1290: movl 16(%eax),%edi # restore edi
1291: movl 20(%eax),%edx # get rta
1292: movl %edx,(%esp) # put in return frame
1293: xorl %eax,%eax # return (1);
1294: incl %eax
1295: ret
1296:
1297: /*****************************************************************************/
1298:
1299: /*
1300: * The following primitives manipulate the run queues.
1301: * whichqs tells which of the 32 queues qs have processes in them.
1302: * Setrq puts processes into queues, Remrq removes them from queues.
1303: * The running process is on no queue, other processes are on a queue
1304: * related to p->p_pri, divided by 4 actually to shrink the 0-127 range
1305: * of priorities into the 32 available queues.
1306: */
1307: .globl _C_LABEL(whichqs),_C_LABEL(qs),_C_LABEL(uvmexp),_C_LABEL(panic)
1308: /*
1309: * setrunqueue(struct proc *p);
1310: * Insert a process on the appropriate queue. Should be called at splclock().
1311: */
1312: NENTRY(setrunqueue)
1313: movl 4(%esp),%eax
1314: #ifdef DIAGNOSTIC
1315: cmpl $0,P_BACK(%eax) # should not be on q already
1316: jne 1f
1317: cmpl $0,P_WCHAN(%eax)
1318: jne 1f
1319: cmpb $SRUN,P_STAT(%eax)
1320: jne 1f
1321: #endif /* DIAGNOSTIC */
1322: movzbl P_PRIORITY(%eax),%edx
1323: shrl $2,%edx
1324: btsl %edx,_C_LABEL(whichqs) # set q full bit
1325: leal _C_LABEL(qs)(,%edx,8),%edx # locate q hdr
1326: movl P_BACK(%edx),%ecx
1327: movl %edx,P_FORW(%eax) # link process on tail of q
1328: movl %eax,P_BACK(%edx)
1329: movl %eax,P_FORW(%ecx)
1330: movl %ecx,P_BACK(%eax)
1331: ret
1332: #ifdef DIAGNOSTIC
1333: 1: pushl $2f
1334: call _C_LABEL(panic)
1335: /* NOTREACHED */
1336: 2: .asciz "setrunqueue"
1337: #endif /* DIAGNOSTIC */
1338:
1339: /*
1340: * remrunqueue(struct proc *p);
1341: * Remove a process from its queue. Should be called at splclock().
1342: */
1343: NENTRY(remrunqueue)
1344: movl 4(%esp),%ecx
1345: movzbl P_PRIORITY(%ecx),%eax
1346: #ifdef DIAGNOSTIC
1347: shrl $2,%eax
1348: btl %eax,_C_LABEL(whichqs)
1349: jnc 1f
1350: #endif /* DIAGNOSTIC */
1351: movl P_BACK(%ecx),%edx # unlink process
1352: movl $0,P_BACK(%ecx) # zap reverse link to indicate off list
1353: movl P_FORW(%ecx),%ecx
1354: movl %ecx,P_FORW(%edx)
1355: movl %edx,P_BACK(%ecx)
1356: cmpl %ecx,%edx # q still has something?
1357: jne 2f
1358: #ifndef DIAGNOSTIC
1359: shrl $2,%eax
1360: #endif
1361: btrl %eax,_C_LABEL(whichqs) # no; clear bit
1362: 2: ret
1363: #ifdef DIAGNOSTIC
1364: 1: pushl $3f
1365: call _C_LABEL(panic)
1366: /* NOTREACHED */
1367: 3: .asciz "remrunqueue"
1368: #endif /* DIAGNOSTIC */
1369:
1370: #if NAPM > 0
1371: .globl _C_LABEL(apm_cpu_idle),_C_LABEL(apm_cpu_busy)
1372: #endif
1373: /*
1374: * When no processes are on the runq, cpu_switch() branches to here to wait for
1375: * something to come ready.
1376: */
1377: ENTRY(idle)
1378: /* Skip context saving if we have none. */
1379: testl %esi,%esi
1380: jz 1f
1381:
1382: /*
1383: * idling: save old context.
1384: *
1385: * Registers:
1386: * %eax, %ebx, %ecx - scratch
1387: * %esi - old proc, then old pcb
1388: * %edi - idle pcb
1389: * %edx - idle TSS selector
1390: */
1391:
1392: pushl %esi
1393: call _C_LABEL(pmap_deactivate) # pmap_deactivate(oldproc)
1394: addl $4,%esp
1395:
1396: movl P_ADDR(%esi),%esi
1397:
1398: /* Save stack pointers. */
1399: movl %esp,PCB_ESP(%esi)
1400: movl %ebp,PCB_EBP(%esi)
1401:
1402: /* Find idle PCB for this CPU */
1403: #ifndef MULTIPROCESSOR
1404: movl $_C_LABEL(proc0),%ebx
1405: movl P_ADDR(%ebx),%edi
1406: movl P_MD_TSS_SEL(%ebx),%edx
1407: #else
1408: movl CPUVAR(IDLE_PCB), %edi
1409: movl CPUVAR(IDLE_TSS_SEL), %edx
1410: #endif
1411:
1412: /* Restore the idle context (avoid interrupts) */
1413: cli
1414:
1415: /* Restore stack pointers. */
1416: movl PCB_ESP(%edi),%esp
1417: movl PCB_EBP(%edi),%ebp
1418:
1419:
1420: /* Switch address space. */
1421: movl PCB_CR3(%edi),%ecx
1422: movl %ecx,%cr3
1423:
1424: /* Switch TSS. Reset "task busy" flag before loading. */
1425: movl CPUVAR(GDT), %eax
1426: andl $~0x0200,4-SEL_KPL(%eax,%edx,1)
1427: ltr %dx
1428:
1429: /* We're always in the kernel, so we don't need the LDT. */
1430:
1431: /* Restore cr0 (including FPU state). */
1432: movl PCB_CR0(%edi),%ecx
1433: movl %ecx,%cr0
1434:
1435: /* Record new pcb. */
1436: SET_CURPCB(%edi)
1437:
1438: xorl %esi,%esi
1439: sti
1440:
1441: 1:
1442: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1443: call _C_LABEL(sched_unlock_idle)
1444: #endif
1445:
1446: movl $IPL_NONE,CPL # spl0()
1447: call _C_LABEL(Xspllower) # process pending interrupts
1448: jmp _C_LABEL(idle_start)
1449:
1450: ENTRY(idle_loop)
1451: #if NAPM > 0
1452: call _C_LABEL(apm_cpu_idle)
1453: #else
1454: #if NPCTR > 0
1455: addl $1,_C_LABEL(pctr_idlcnt)
1456: adcl $0,_C_LABEL(pctr_idlcnt)+4
1457: #endif
1458: sti
1459: hlt
1460: #endif
1461: ENTRY(idle_start)
1462: cli
1463: cmpl $0,_C_LABEL(whichqs)
1464: jz _C_LABEL(idle_loop)
1465:
1466: ENTRY(idle_exit)
1467: movl $IPL_HIGH,CPL # splhigh
1468: sti
1469: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1470: call _C_LABEL(sched_lock_idle)
1471: #endif
1472: #if NAPM > 0
1473: call _C_LABEL(apm_cpu_busy)
1474: #endif
1475: jmp switch_search
1476:
1477: #ifdef DIAGNOSTIC
1478: NENTRY(switch_error)
1479: pushl $1f
1480: call _C_LABEL(panic)
1481: /* NOTREACHED */
1482: 1: .asciz "cpu_switch"
1483: #endif /* DIAGNOSTIC */
1484:
1485: /*
1486: * cpu_switch(void);
1487: * Find a runnable process and switch to it. Wait if necessary. If the new
1488: * process is the same as the old one, we short-circuit the context save and
1489: * restore.
1490: */
1491: ENTRY(cpu_switch)
1492: pushl %ebx
1493: pushl %esi
1494: pushl %edi
1495: pushl CPL
1496:
1497: movl CPUVAR(CURPROC), %esi
1498:
1499: /*
1500: * Clear curproc so that we don't accumulate system time while idle.
1501: * This also insures that schedcpu() will move the old process to
1502: * the correct queue if it happens to get called from the spllower()
1503: * below and changes the priority. (See corresponding comment in
1504: * userret()).
1505: */
1506: movl $0, CPUVAR(CURPROC)
1507:
1508: switch_search:
1509: /*
1510: * First phase: find new process.
1511: *
1512: * Registers:
1513: * %eax - queue head, scratch, then zero
1514: * %ebx - queue number
1515: * %ecx - cached value of whichqs
1516: * %edx - next process in queue
1517: * %esi - old process
1518: * %edi - new process
1519: */
1520:
1521: /* Wait for new process. */
1522: movl _C_LABEL(whichqs),%ecx
1523: bsfl %ecx,%ebx # find a full q
1524: jz _C_LABEL(idle) # if none, idle
1525: leal _C_LABEL(qs)(,%ebx,8),%eax # select q
1526: movl P_FORW(%eax),%edi # unlink from front of process q
1527: #ifdef DIAGNOSTIC
1528: cmpl %edi,%eax # linked to self (i.e. nothing queued)?
1529: je _C_LABEL(switch_error) # not possible
1530: #endif /* DIAGNOSTIC */
1531: movl P_FORW(%edi),%edx
1532: movl %edx,P_FORW(%eax)
1533: movl %eax,P_BACK(%edx)
1534:
1535: cmpl %edx,%eax # q empty?
1536: jne 3f
1537:
1538: btrl %ebx,%ecx # yes, clear to indicate empty
1539: movl %ecx,_C_LABEL(whichqs) # update q status
1540:
1541: 3: xorl %eax, %eax
1542: /* We just did it. */
1543: movl $0, CPUVAR(RESCHED)
1544:
1545: #ifdef DIAGNOSTIC
1546: cmpl %eax,P_WCHAN(%edi) # Waiting for something?
1547: jne _C_LABEL(switch_error) # Yes; shouldn't be queued.
1548: cmpb $SRUN,P_STAT(%edi) # In run state?
1549: jne _C_LABEL(switch_error) # No; shouldn't be queued.
1550: #endif /* DIAGNOSTIC */
1551:
1552: /* Isolate process. XXX Is this necessary? */
1553: movl %eax,P_BACK(%edi)
1554:
1555: /* Record new process. */
1556: movb $SONPROC,P_STAT(%edi) # p->p_stat = SONPROC
1557: movl CPUVAR(SELF), %ecx
1558: movl %edi, CPUVAR(CURPROC)
1559: movl %ecx, P_CPU(%edi)
1560:
1561: /* Skip context switch if same process. */
1562: cmpl %edi,%esi
1563: je switch_return
1564:
1565: /* If old process exited, don't bother. */
1566: testl %esi,%esi
1567: jz switch_exited
1568:
1569: /*
1570: * Second phase: save old context.
1571: *
1572: * Registers:
1573: * %eax, %ecx - scratch
1574: * %esi - old process, then old pcb
1575: * %edi - new process
1576: */
1577:
1578: pushl %esi
1579: call _C_LABEL(pmap_deactivate)
1580: addl $4,%esp
1581:
1582: movl P_ADDR(%esi),%esi
1583:
1584: /* Save stack pointers. */
1585: movl %esp,PCB_ESP(%esi)
1586: movl %ebp,PCB_EBP(%esi)
1587:
1588: switch_exited:
1589: /*
1590: * Third phase: restore saved context.
1591: *
1592: * Registers:
1593: * %eax, %ecx, %edx - scratch
1594: * %esi - new pcb
1595: * %edi - new process
1596: */
1597:
1598: /* No interrupts while loading new state. */
1599: cli
1600: movl P_ADDR(%edi),%esi
1601:
1602: /* Restore stack pointers. */
1603: movl PCB_ESP(%esi),%esp
1604: movl PCB_EBP(%esi),%ebp
1605:
1606: #if 0
1607: /* Don't bother with the rest if switching to a system process. */
1608: testl $P_SYSTEM,P_FLAG(%edi)
1609: jnz switch_restored
1610: #endif
1611:
1612: /*
1613: * Activate the address space. We're curproc, so %cr3 will
1614: * be reloaded, but we're not yet curpcb, so the LDT won't
1615: * be reloaded, although the PCB copy of the selector will
1616: * be refreshed from the pmap.
1617: */
1618: pushl %edi
1619: call _C_LABEL(pmap_activate)
1620: addl $4,%esp
1621:
1622: /* Load TSS info. */
1623: movl CPUVAR(GDT),%eax
1624: movl P_MD_TSS_SEL(%edi),%edx
1625:
1626: /* Switch TSS. */
1627: andl $~0x0200,4-SEL_KPL(%eax,%edx,1)
1628: ltr %dx
1629:
1630: #ifdef USER_LDT
1631: /*
1632: * Switch LDT.
1633: *
1634: * XXX
1635: * Always do this, because the LDT could have been swapped into a
1636: * different selector after a process exited. (See gdt_compact().)
1637: */
1638: movl PCB_LDT_SEL(%esi),%edx
1639: lldt %dx
1640: #endif /* USER_LDT */
1641:
1642: switch_restored:
1643: /* Restore cr0 (including FPU state). */
1644: movl PCB_CR0(%esi),%ecx
1645: #ifdef MULTIPROCESSOR
1646: /*
1647: * If our floating point registers are on a different CPU,
1648: * clear CR0_TS so we'll trap rather than reuse bogus state.
1649: */
1650: movl CPUVAR(SELF), %ebx
1651: cmpl PCB_FPCPU(%esi),%ebx
1652: jz 1f
1653: orl $CR0_TS,%ecx
1654: 1:
1655: #endif
1656: movl %ecx,%cr0
1657:
1658: /* Record new pcb. */
1659: SET_CURPCB(%esi)
1660:
1661: /* Interrupts are okay again. */
1662: sti
1663:
1664: switch_return:
1665: #if 0
1666: pushl %edi
1667: movl CPUVAR(NAME), %ebx
1668: leal CPU_INFO_NAME(%ebx),%ebx
1669: pushl %ebx
1670: pushl $1f
1671: call _C_LABEL(printf)
1672: addl $0xc,%esp
1673: #endif
1674: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1675: call _C_LABEL(sched_unlock_idle)
1676: #endif
1677: /*
1678: * Restore old cpl from stack. Note that this is always an increase,
1679: * due to the spl0() on entry.
1680: */
1681: popl CPL
1682:
1683: movl %edi,%eax # return (p);
1684: popl %edi
1685: popl %esi
1686: popl %ebx
1687: ret
1688: 1: .asciz "%s: scheduled %x\n"
1689: /*
1690: * switch_exit(struct proc *p);
1691: * Switch to the appropriate idle context (proc0's if uniprocessor; the cpu's if
1692: * multiprocessor) and deallocate the address space and kernel stack for p.
1693: * Then jump into cpu_switch(), as if we were in the idle proc all along.
1694: */
1695: #ifndef MULTIPROCESSOR
1696: .globl _C_LABEL(proc0)
1697: #endif
1698: ENTRY(switch_exit)
1699: movl 4(%esp),%edi # old process
1700: #ifndef MULTIPROCESSOR
1701: movl $_C_LABEL(proc0),%ebx
1702: movl P_ADDR(%ebx),%esi
1703: movl P_MD_TSS_SEL(%ebx),%edx
1704: #else
1705: movl CPUVAR(IDLE_PCB), %esi
1706: movl CPUVAR(IDLE_TSS_SEL), %edx
1707: #endif
1708:
1709: /* In case we fault... */
1710: movl $0, CPUVAR(CURPROC)
1711:
1712: /* Restore the idle context. */
1713: cli
1714:
1715: /* Restore stack pointers. */
1716: movl PCB_ESP(%esi),%esp
1717: movl PCB_EBP(%esi),%ebp
1718:
1719: /* Load TSS info. */
1720: movl CPUVAR(GDT), %eax
1721:
1722: /* Switch address space. */
1723: movl PCB_CR3(%esi),%ecx
1724: movl %ecx,%cr3
1725:
1726: /* Switch TSS. */
1727: andl $~0x0200,4-SEL_KPL(%eax,%edx,1)
1728: ltr %dx
1729:
1730: /* We're always in the kernel, so we don't need the LDT. */
1731:
1732: /* Clear segment registers; always null in proc0. */
1733: xorl %ecx,%ecx
1734: movw %cx,%gs
1735:
1736: /* Point to cpu_info */
1737: movl $GSEL(GCPU_SEL, SEL_KPL),%ecx
1738: movw %cx,%fs
1739:
1740: /* Restore cr0 (including FPU state). */
1741: movl PCB_CR0(%esi),%ecx
1742: movl %ecx,%cr0
1743:
1744: /* Record new pcb. */
1745: SET_CURPCB(%esi)
1746:
1747: /* Interrupts are okay again. */
1748: sti
1749:
1750: /*
1751: * Schedule the dead process's vmspace and stack to be freed.
1752: */
1753: pushl %edi /* exit2(p) */
1754: call _C_LABEL(exit2)
1755: addl $4,%esp
1756:
1757: /* Jump into cpu_switch() with the right state. */
1758: xorl %esi,%esi
1759: movl $0, CPUVAR(CURPROC)
1760: jmp switch_search
1761:
1762: /*
1763: * savectx(struct pcb *pcb);
1764: * Update pcb, saving current processor state.
1765: */
1766: ENTRY(savectx)
1767: movl 4(%esp),%edx # edx = p->p_addr
1768:
1769: /* Save stack pointers. */
1770: movl %esp,PCB_ESP(%edx)
1771: movl %ebp,PCB_EBP(%edx)
1772:
1773: ret
1774:
1775: /*****************************************************************************/
1776:
1777: /*
1778: * Trap and fault vector routines
1779: *
1780: * On exit from the kernel to user mode, we always need to check for ASTs. In
1781: * addition, we need to do this atomically; otherwise an interrupt may occur
1782: * which causes an AST, but it won't get processed until the next kernel entry
1783: * (possibly the next clock tick). Thus, we disable interrupt before checking,
1784: * and only enable them again on the final `iret' or before calling the AST
1785: * handler.
1786: *
1787: * XXX - debugger traps are now interrupt gates so at least bdb doesn't lose
1788: * control. The sti's give the standard losing behaviour for ddb and kgdb.
1789: */
1790: #define IDTVEC(name) ALIGN_TEXT; .globl X/**/name; X/**/name:
1791:
1792: #define TRAP(a) pushl $(a) ; jmp _C_LABEL(alltraps)
1793: #define ZTRAP(a) pushl $0 ; TRAP(a)
1794: #define BPTTRAP(a) testb $(PSL_I>>8),13(%esp) ; jz 1f ; sti ; 1: ; \
1795: TRAP(a)
1796:
1797: .text
1798: IDTVEC(div)
1799: ZTRAP(T_DIVIDE)
1800: IDTVEC(dbg)
1801: subl $4,%esp
1802: pushl %eax
1803: movl %dr6,%eax
1804: movl %eax,4(%esp)
1805: andb $~0xf,%al
1806: movl %eax,%dr6
1807: popl %eax
1808: BPTTRAP(T_TRCTRAP)
1809: IDTVEC(nmi)
1810: ZTRAP(T_NMI)
1811: IDTVEC(bpt)
1812: pushl $0
1813: BPTTRAP(T_BPTFLT)
1814: IDTVEC(ofl)
1815: ZTRAP(T_OFLOW)
1816: IDTVEC(bnd)
1817: ZTRAP(T_BOUND)
1818: IDTVEC(ill)
1819: ZTRAP(T_PRIVINFLT)
1820: IDTVEC(dna)
1821: #if NNPX > 0
1822: pushl $0 # dummy error code
1823: pushl $T_DNA
1824: INTRENTRY
1825: #ifdef MULTIPROCESSOR
1826: pushl CPUVAR(SELF)
1827: #else
1828: pushl $_C_LABEL(cpu_info_primary)
1829: #endif
1830: call *_C_LABEL(npxdna_func)
1831: addl $4,%esp
1832: testl %eax,%eax
1833: jz calltrap
1834: INTRFASTEXIT
1835: #else
1836: ZTRAP(T_DNA)
1837: #endif
1838: IDTVEC(dble)
1839: TRAP(T_DOUBLEFLT)
1840: IDTVEC(fpusegm)
1841: ZTRAP(T_FPOPFLT)
1842: IDTVEC(tss)
1843: TRAP(T_TSSFLT)
1844: IDTVEC(missing)
1845: TRAP(T_SEGNPFLT)
1846: IDTVEC(stk)
1847: TRAP(T_STKFLT)
1848: IDTVEC(prot)
1849: TRAP(T_PROTFLT)
1850: #ifdef I586_CPU
1851: IDTVEC(f00f_redirect)
1852: pushl $T_PAGEFLT
1853: INTRENTRY
1854: testb $PGEX_U,TF_ERR(%esp)
1855: jnz calltrap
1856: movl %cr2,%eax
1857: subl _C_LABEL(idt),%eax
1858: cmpl $(6*8),%eax
1859: jne calltrap
1860: movb $T_PRIVINFLT,TF_TRAPNO(%esp)
1861: jmp calltrap
1862: #endif
1863: IDTVEC(page)
1864: TRAP(T_PAGEFLT)
1865: IDTVEC(rsvd)
1866: ZTRAP(T_RESERVED)
1867: IDTVEC(mchk)
1868: ZTRAP(T_MACHK)
1869: IDTVEC(simd)
1870: ZTRAP(T_XFTRAP)
1871: IDTVEC(intrspurious)
1872: /*
1873: * The Pentium Pro local APIC may erroneously call this vector for a
1874: * default IR7. Just ignore it.
1875: *
1876: * (The local APIC does this when CPL is raised while it's on the
1877: * way to delivering an interrupt.. presumably enough has been set
1878: * up that it's inconvenient to abort delivery completely..)
1879: */
1880: iret
1881: IDTVEC(fpu)
1882: #if NNPX > 0
1883: /*
1884: * Handle like an interrupt so that we can call npxintr to clear the
1885: * error. It would be better to handle npx interrupts as traps but
1886: * this is difficult for nested interrupts.
1887: */
1888: pushl $0 # dummy error code
1889: pushl $T_ASTFLT
1890: INTRENTRY
1891: pushl CPL # if_ppl in intrframe
1892: pushl %esp # push address of intrframe
1893: incl _C_LABEL(uvmexp)+V_TRAP
1894: call _C_LABEL(npxintr)
1895: addl $8,%esp # pop address and if_ppl
1896: INTRFASTEXIT
1897: #else
1898: ZTRAP(T_ARITHTRAP)
1899: #endif
1900: IDTVEC(align)
1901: ZTRAP(T_ALIGNFLT)
1902: /* 18 - 31 reserved for future exp */
1903:
1904: /*
1905: * If an error is detected during trap, syscall, or interrupt exit, trap() will
1906: * change %eip to point to one of these labels. We clean up the stack, if
1907: * necessary, and resume as if we were handling a general protection fault.
1908: * This will cause the process to get a SIGBUS.
1909: */
1910: NENTRY(resume_iret)
1911: ZTRAP(T_PROTFLT)
1912: NENTRY(resume_pop_ds)
1913: pushl %es
1914: movl $GSEL(GDATA_SEL, SEL_KPL),%eax
1915: movw %ax,%es
1916: NENTRY(resume_pop_es)
1917: pushl %gs
1918: movl $GSEL(GDATA_SEL, SEL_KPL),%eax
1919: movw %ax,%gs
1920: NENTRY(resume_pop_gs)
1921: pushl %fs
1922: movl $GSEL(GCPU_SEL, SEL_KPL),%eax
1923: movw %ax,%fs
1924: NENTRY(resume_pop_fs)
1925: movl $T_PROTFLT,TF_TRAPNO(%esp)
1926: jmp calltrap
1927:
1928: NENTRY(alltraps)
1929: INTRENTRY
1930: calltrap:
1931: #ifdef DIAGNOSTIC
1932: movl CPL,%ebx
1933: #endif /* DIAGNOSTIC */
1934: call _C_LABEL(trap)
1935: 2: /* Check for ASTs on exit to user mode. */
1936: cli
1937: CHECK_ASTPENDING(%ecx)
1938: je 1f
1939: testb $SEL_RPL,TF_CS(%esp)
1940: #ifdef VM86
1941: jnz 5f
1942: testl $PSL_VM,TF_EFLAGS(%esp)
1943: #endif
1944: jz 1f
1945: 5: CLEAR_ASTPENDING(%ecx)
1946: sti
1947: movl $T_ASTFLT,TF_TRAPNO(%esp)
1948: call _C_LABEL(trap)
1949: jmp 2b
1950: #ifndef DIAGNOSTIC
1951: 1: INTRFASTEXIT
1952: #else
1953: 1: cmpl CPL,%ebx
1954: jne 3f
1955: INTRFASTEXIT
1956: 3: sti
1957: pushl $4f
1958: call _C_LABEL(printf)
1959: addl $4,%esp
1960: #if defined(DDB) && 0
1961: int $3
1962: #endif /* DDB */
1963: movl %ebx,CPL
1964: jmp 2b
1965: 4: .asciz "WARNING: SPL NOT LOWERED ON TRAP EXIT\n"
1966: #endif /* DIAGNOSTIC */
1967:
1968: /*
1969: * Old call gate entry for syscall
1970: */
1971: IDTVEC(osyscall)
1972: /* Set eflags in trap frame. */
1973: pushfl
1974: popl 8(%esp)
1975: /* Turn off trace flag and nested task. */
1976: pushfl
1977: andb $~((PSL_T|PSL_NT)>>8),1(%esp)
1978: popfl
1979: pushl $7 # size of instruction for restart
1980: jmp syscall1
1981: IDTVEC(osyscall_end)
1982:
1983: /*
1984: * Trap gate entry for syscall
1985: */
1986: IDTVEC(syscall)
1987: pushl $2 # size of instruction for restart
1988: syscall1:
1989: pushl $T_ASTFLT # trap # for doing ASTs
1990: INTRENTRY
1991: call _C_LABEL(syscall)
1992: 2: /* Check for ASTs on exit to user mode. */
1993: cli
1994: CHECK_ASTPENDING(%ecx)
1995: je 1f
1996: /* Always returning to user mode here. */
1997: CLEAR_ASTPENDING(%ecx)
1998: sti
1999: /* Pushed T_ASTFLT into tf_trapno on entry. */
2000: call _C_LABEL(trap)
2001: jmp 2b
2002: 1: INTRFASTEXIT
2003:
2004: #include <i386/i386/vector.s>
2005: #include <i386/isa/icu.s>
2006:
2007: /*
2008: * bzero (void *b, size_t len)
2009: * write len zero bytes to the string b.
2010: */
2011:
2012: ENTRY(bzero)
2013: pushl %edi
2014: movl 8(%esp),%edi
2015: movl 12(%esp),%edx
2016:
2017: cld /* set fill direction forward */
2018: xorl %eax,%eax /* set fill data to 0 */
2019:
2020: /*
2021: * if the string is too short, it's really not worth the overhead
2022: * of aligning to word boundaries, etc. So we jump to a plain
2023: * unaligned set.
2024: */
2025: cmpl $16,%edx
2026: jb 7f
2027:
2028: movl %edi,%ecx /* compute misalignment */
2029: negl %ecx
2030: andl $3,%ecx
2031: subl %ecx,%edx
2032: rep /* zero until word aligned */
2033: stosb
2034:
2035: #if defined(I486_CPU)
2036: #if defined(I586_CPU) || defined(I686_CPU)
2037: cmpl $CPUCLASS_486,_C_LABEL(cpu_class)
2038: jne 8f
2039: #endif
2040:
2041: movl %edx,%ecx
2042: shrl $6,%ecx
2043: jz 8f
2044: andl $63,%edx
2045: 1: movl %eax,(%edi)
2046: movl %eax,4(%edi)
2047: movl %eax,8(%edi)
2048: movl %eax,12(%edi)
2049: movl %eax,16(%edi)
2050: movl %eax,20(%edi)
2051: movl %eax,24(%edi)
2052: movl %eax,28(%edi)
2053: movl %eax,32(%edi)
2054: movl %eax,36(%edi)
2055: movl %eax,40(%edi)
2056: movl %eax,44(%edi)
2057: movl %eax,48(%edi)
2058: movl %eax,52(%edi)
2059: movl %eax,56(%edi)
2060: movl %eax,60(%edi)
2061: addl $64,%edi
2062: decl %ecx
2063: jnz 1b
2064: #endif
2065:
2066: 8: movl %edx,%ecx /* zero by words */
2067: shrl $2,%ecx
2068: andl $3,%edx
2069: rep
2070: stosl
2071:
2072: 7: movl %edx,%ecx /* zero remainder bytes */
2073: rep
2074: stosb
2075:
2076: popl %edi
2077: ret
2078:
2079: #if defined(I686_CPU) && !defined(SMALL_KERNEL)
2080: ENTRY(sse2_pagezero)
2081: pushl %ebx
2082: movl 8(%esp),%ecx
2083: movl %ecx,%eax
2084: addl $4096,%eax
2085: xor %ebx,%ebx
2086: 1:
2087: movnti %ebx,(%ecx)
2088: addl $4,%ecx
2089: cmpl %ecx,%eax
2090: jne 1b
2091: sfence
2092: popl %ebx
2093: ret
2094:
2095: ENTRY(i686_pagezero)
2096: pushl %edi
2097: pushl %ebx
2098:
2099: movl 12(%esp), %edi
2100: movl $1024, %ecx
2101: cld
2102:
2103: ALIGN_TEXT
2104: 1:
2105: xorl %eax, %eax
2106: repe
2107: scasl
2108: jnz 2f
2109:
2110: popl %ebx
2111: popl %edi
2112: ret
2113:
2114: ALIGN_TEXT
2115:
2116: 2:
2117: incl %ecx
2118: subl $4, %edi
2119:
2120: movl %ecx, %edx
2121: cmpl $16, %ecx
2122:
2123: jge 3f
2124:
2125: movl %edi, %ebx
2126: andl $0x3f, %ebx
2127: shrl %ebx
2128: shrl %ebx
2129: movl $16, %ecx
2130: subl %ebx, %ecx
2131:
2132: 3:
2133: subl %ecx, %edx
2134: rep
2135: stosl
2136:
2137: movl %edx, %ecx
2138: testl %edx, %edx
2139: jnz 1b
2140:
2141: popl %ebx
2142: popl %edi
2143: ret
2144: #endif
2145:
2146: #if NLAPIC > 0
2147: #include <i386/i386/apicvec.s>
2148: #endif
2149:
2150: #include <i386/i386/mutex.S>
CVSweb