Annotation of sys/arch/i386/i386/machdep.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: machdep.c,v 1.403 2007/07/20 17:04:14 mk Exp $ */
2: /* $NetBSD: machdep.c,v 1.214 1996/11/10 03:16:17 thorpej Exp $ */
3:
4: /*-
5: * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
6: * All rights reserved.
7: *
8: * This code is derived from software contributed to The NetBSD Foundation
9: * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10: * NASA Ames Research Center.
11: *
12: * Redistribution and use in source and binary forms, with or without
13: * modification, are permitted provided that the following conditions
14: * are met:
15: * 1. Redistributions of source code must retain the above copyright
16: * notice, this list of conditions and the following disclaimer.
17: * 2. Redistributions in binary form must reproduce the above copyright
18: * notice, this list of conditions and the following disclaimer in the
19: * documentation and/or other materials provided with the distribution.
20: * 3. All advertising materials mentioning features or use of this software
21: * must display the following acknowledgement:
22: * This product includes software developed by the NetBSD
23: * Foundation, Inc. and its contributors.
24: * 4. Neither the name of The NetBSD Foundation nor the names of its
25: * contributors may be used to endorse or promote products derived
26: * from this software without specific prior written permission.
27: *
28: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38: * POSSIBILITY OF SUCH DAMAGE.
39: */
40:
41: /*-
42: * Copyright (c) 1993, 1994, 1995, 1996 Charles M. Hannum. All rights reserved.
43: * Copyright (c) 1992 Terrence R. Lambert.
44: * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
45: * All rights reserved.
46: *
47: * This code is derived from software contributed to Berkeley by
48: * William Jolitz.
49: *
50: * Redistribution and use in source and binary forms, with or without
51: * modification, are permitted provided that the following conditions
52: * are met:
53: * 1. Redistributions of source code must retain the above copyright
54: * notice, this list of conditions and the following disclaimer.
55: * 2. Redistributions in binary form must reproduce the above copyright
56: * notice, this list of conditions and the following disclaimer in the
57: * documentation and/or other materials provided with the distribution.
58: * 3. Neither the name of the University nor the names of its contributors
59: * may be used to endorse or promote products derived from this software
60: * without specific prior written permission.
61: *
62: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72: * SUCH DAMAGE.
73: *
74: * @(#)machdep.c 7.4 (Berkeley) 6/3/91
75: */
76:
77: #include <sys/param.h>
78: #include <sys/systm.h>
79: #include <sys/signalvar.h>
80: #include <sys/kernel.h>
81: #include <sys/proc.h>
82: #include <sys/user.h>
83: #include <sys/exec.h>
84: #include <sys/buf.h>
85: #include <sys/reboot.h>
86: #include <sys/conf.h>
87: #include <sys/file.h>
88: #include <sys/timeout.h>
89: #include <sys/malloc.h>
90: #include <sys/mbuf.h>
91: #include <sys/msgbuf.h>
92: #include <sys/mount.h>
93: #include <sys/vnode.h>
94: #include <sys/device.h>
95: #include <sys/extent.h>
96: #include <sys/sysctl.h>
97: #include <sys/syscallargs.h>
98: #include <sys/core.h>
99: #include <sys/kcore.h>
100: #include <sys/sensors.h>
101: #ifdef SYSVMSG
102: #include <sys/msg.h>
103: #endif
104:
105: #ifdef KGDB
106: #include <sys/kgdb.h>
107: #endif
108:
109: #include <dev/cons.h>
110: #include <stand/boot/bootarg.h>
111:
112: #include <uvm/uvm_extern.h>
113:
114: #define _I386_BUS_DMA_PRIVATE
115: #include <machine/bus.h>
116:
117: #include <machine/cpu.h>
118: #include <machine/cpufunc.h>
119: #include <machine/cpuvar.h>
120: #include <machine/gdt.h>
121: #include <machine/pio.h>
122: #include <machine/bus.h>
123: #include <machine/psl.h>
124: #include <machine/reg.h>
125: #include <machine/specialreg.h>
126: #include <machine/biosvar.h>
127:
128: #include <dev/rndvar.h>
129: #include <dev/isa/isareg.h>
130: #include <dev/isa/isavar.h>
131: #include <dev/ic/i8042reg.h>
132: #include <dev/ic/mc146818reg.h>
133: #include <i386/isa/isa_machdep.h>
134: #include <i386/isa/nvram.h>
135:
136: #include "acpi.h"
137: #if NACPI > 0
138: #include <dev/acpi/acpivar.h>
139: #endif
140:
141: #include "apm.h"
142: #if NAPM > 0
143: #include <machine/apmvar.h>
144: #endif
145:
146: #ifdef DDB
147: #include <machine/db_machdep.h>
148: #include <ddb/db_access.h>
149: #include <ddb/db_sym.h>
150: #include <ddb/db_extern.h>
151: #endif
152:
153: #ifdef VM86
154: #include <machine/vm86.h>
155: #endif
156:
157: #include "isa.h"
158: #include "isadma.h"
159: #include "npx.h"
160: #if NNPX > 0
161: extern struct proc *npxproc;
162: #endif
163:
164: #include "bios.h"
165: #include "com.h"
166: #include "pccom.h"
167:
168: #if NPCCOM > 0
169: #include <sys/termios.h>
170: #include <dev/ic/comreg.h>
171: #if NCOM > 0
172: #include <dev/ic/comvar.h>
173: #elif NPCCOM > 0
174: #include <arch/i386/isa/pccomvar.h>
175: #endif
176: #endif /* NCOM > 0 || NPCCOM > 0 */
177:
178: /* the following is used externally (sysctl_hw) */
179: char machine[] = MACHINE;
180:
181: /*
182: * Declare these as initialized data so we can patch them.
183: */
184: #if NAPM > 0
185: int cpu_apmhalt = 0; /* sysctl'd to 1 for halt -p hack */
186: #endif
187:
188: #ifdef USER_LDT
189: int user_ldt_enable = 0; /* sysctl'd to 1 to enable */
190: #endif
191:
192: #ifndef BUFCACHEPERCENT
193: #define BUFCACHEPERCENT 10
194: #endif
195:
196: #ifdef BUFPAGES
197: int bufpages = BUFPAGES;
198: #else
199: int bufpages = 0;
200: #endif
201: int bufcachepercent = BUFCACHEPERCENT;
202:
203: extern int boothowto;
204: int physmem;
205:
206: struct dumpmem {
207: paddr_t start;
208: paddr_t end;
209: } dumpmem[VM_PHYSSEG_MAX];
210: u_int ndumpmem;
211:
212: /*
213: * These variables are needed by /sbin/savecore
214: */
215: u_long dumpmag = 0x8fca0101; /* magic number */
216: int dumpsize = 0; /* pages */
217: long dumplo = 0; /* blocks */
218:
219: int cpu_class;
220: int i386_fpu_present;
221: int i386_fpu_exception;
222: int i386_fpu_fdivbug;
223:
224: int i386_use_fxsave;
225: int i386_has_sse;
226: int i386_has_sse2;
227: int i386_has_xcrypt;
228:
229: bootarg_t *bootargp;
230: paddr_t avail_end;
231:
232: struct vm_map *exec_map = NULL;
233: struct vm_map *phys_map = NULL;
234:
235: #if !defined(SMALL_KERNEL) && defined(I686_CPU)
236: int p4_model;
237: int p3_early;
238: void (*update_cpuspeed)(void) = NULL;
239: #endif
240: int kbd_reset;
241:
242: #if !defined(SMALL_KERNEL)
243: int bus_clock;
244: #endif
245: void (*setperf_setup)(struct cpu_info *);
246: int setperf_prio = 0; /* for concurrent handlers */
247:
248: void (*cpusensors_setup)(struct cpu_info *);
249:
250: void (*delay_func)(int) = i8254_delay;
251: void (*initclock_func)(void) = i8254_initclocks;
252:
253: /*
254: * Extent maps to manage I/O and ISA memory hole space. Allocate
255: * storage for 8 regions in each, initially. Later, ioport_malloc_safe
256: * will indicate that it's safe to use malloc() to dynamically allocate
257: * region descriptors.
258: *
259: * N.B. At least two regions are _always_ allocated from the iomem
260: * extent map; (0 -> ISA hole) and (end of ISA hole -> end of RAM).
261: *
262: * The extent maps are not static! Machine-dependent ISA and EISA
263: * routines need access to them for bus address space allocation.
264: */
265: static long ioport_ex_storage[EXTENT_FIXED_STORAGE_SIZE(16) / sizeof(long)];
266: static long iomem_ex_storage[EXTENT_FIXED_STORAGE_SIZE(16) / sizeof(long)];
267: struct extent *ioport_ex;
268: struct extent *iomem_ex;
269: static int ioport_malloc_safe;
270:
271: caddr_t allocsys(caddr_t);
272: void setup_buffers(void);
273: void dumpsys(void);
274: int cpu_dump(void);
275: void init386(paddr_t);
276: void consinit(void);
277: void (*cpuresetfn)(void);
278:
279: int bus_mem_add_mapping(bus_addr_t, bus_size_t,
280: int, bus_space_handle_t *);
281: int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,
282: bus_size_t, struct proc *, int, paddr_t *, int *, int);
283:
284: #ifdef KGDB
285: #ifndef KGDB_DEVNAME
286: #ifdef __i386__
287: #define KGDB_DEVNAME "pccom"
288: #else
289: #define KGDB_DEVNAME "com"
290: #endif
291: #endif /* KGDB_DEVNAME */
292: char kgdb_devname[] = KGDB_DEVNAME;
293: #if (NCOM > 0 || NPCCOM > 0)
294: #ifndef KGDBADDR
295: #define KGDBADDR 0x3f8
296: #endif
297: int comkgdbaddr = KGDBADDR;
298: #ifndef KGDBRATE
299: #define KGDBRATE TTYDEF_SPEED
300: #endif
301: int comkgdbrate = KGDBRATE;
302: #ifndef KGDBMODE
303: #define KGDBMODE ((TTYDEF_CFLAG & ~(CSIZE | CSTOPB | PARENB)) | CS8) /* 8N1 */
304: #endif
305: int comkgdbmode = KGDBMODE;
306: #endif /* NCOM || NPCCOM */
307: void kgdb_port_init(void);
308: #endif /* KGDB */
309:
310: #ifdef APERTURE
311: #ifdef INSECURE
312: int allowaperture = 1;
313: #else
314: int allowaperture = 0;
315: #endif
316: #endif
317:
318: void winchip_cpu_setup(struct cpu_info *);
319: void amd_family5_setperf_setup(struct cpu_info *);
320: void amd_family5_setup(struct cpu_info *);
321: void amd_family6_setperf_setup(struct cpu_info *);
322: void amd_family6_setup(struct cpu_info *);
323: void cyrix3_setperf_setup(struct cpu_info *);
324: void cyrix3_cpu_setup(struct cpu_info *);
325: void cyrix6x86_cpu_setup(struct cpu_info *);
326: void natsem6x86_cpu_setup(struct cpu_info *);
327: void intel586_cpu_setup(struct cpu_info *);
328: void intel686_cpusensors_setup(struct cpu_info *);
329: void intel686_setperf_setup(struct cpu_info *);
330: void intel686_common_cpu_setup(struct cpu_info *);
331: void intel686_cpu_setup(struct cpu_info *);
332: void intel686_p4_cpu_setup(struct cpu_info *);
333: void intelcore_update_sensor(void *);
334: void tm86_cpu_setup(struct cpu_info *);
335: char * intel686_cpu_name(int);
336: char * cyrix3_cpu_name(int, int);
337: char * tm86_cpu_name(int);
338: void cyrix3_get_bus_clock(struct cpu_info *);
339: void p4_get_bus_clock(struct cpu_info *);
340: void p3_get_bus_clock(struct cpu_info *);
341: void p4_update_cpuspeed(void);
342: void p3_update_cpuspeed(void);
343: int pentium_cpuspeed(int *);
344:
345: static __inline u_char
346: cyrix_read_reg(u_char reg)
347: {
348: outb(0x22, reg);
349: return inb(0x23);
350: }
351:
352: static __inline void
353: cyrix_write_reg(u_char reg, u_char data)
354: {
355: outb(0x22, reg);
356: outb(0x23, data);
357: }
358:
359: /*
360: * cpuid instruction. request in eax, result in eax, ebx, ecx, edx.
361: * requires caller to provide u_int32_t regs[4] array.
362: */
363: void
364: cpuid(u_int32_t ax, u_int32_t *regs)
365: {
366: __asm __volatile(
367: "cpuid\n\t"
368: "movl %%eax, 0(%2)\n\t"
369: "movl %%ebx, 4(%2)\n\t"
370: "movl %%ecx, 8(%2)\n\t"
371: "movl %%edx, 12(%2)\n\t"
372: :"=a" (ax)
373: :"0" (ax), "S" (regs)
374: :"bx", "cx", "dx");
375: }
376:
377: /*
378: * Machine-dependent startup code
379: */
380: void
381: cpu_startup()
382: {
383: unsigned i;
384: caddr_t v;
385: int sz;
386: vaddr_t minaddr, maxaddr, va;
387: paddr_t pa;
388:
389: /*
390: * Initialize error message buffer (at end of core).
391: * (space reserved in pmap_bootstrap)
392: */
393: pa = avail_end;
394: va = (vaddr_t)msgbufp;
395: for (i = 0; i < btoc(MSGBUFSIZE); i++) {
396: pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE);
397: va += PAGE_SIZE;
398: pa += PAGE_SIZE;
399: }
400: pmap_update(pmap_kernel());
401: initmsgbuf((caddr_t)msgbufp, round_page(MSGBUFSIZE));
402:
403: printf("%s", version);
404: startrtclock();
405:
406: /*
407: * We need to call identifycpu here early, so users have at least some
408: * basic information, if booting hangs later on.
409: */
410: strlcpy(curcpu()->ci_dev.dv_xname, "cpu0",
411: sizeof(curcpu()->ci_dev.dv_xname));
412: curcpu()->ci_signature = cpu_id;
413: curcpu()->ci_feature_flags = cpu_feature;
414: identifycpu(curcpu());
415:
416: printf("real mem = %llu (%lluMB)\n", ctob((unsigned long long)physmem),
417: ctob((unsigned long long)physmem)/1024U/1024U);
418:
419: /*
420: * Find out how much space we need, allocate it,
421: * and then give everything true virtual addresses.
422: */
423: sz = (int)allocsys((caddr_t)0);
424: if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0)
425: panic("startup: no room for tables");
426: if (allocsys(v) - v != sz)
427: panic("startup: table size inconsistency");
428:
429: /*
430: * Now allocate buffers proper. They are different than the above
431: * in that they usually occupy more virtual memory than physical.
432: */
433: setup_buffers();
434:
435: /*
436: * Allocate a submap for exec arguments. This map effectively
437: * limits the number of processes exec'ing at any time.
438: */
439: minaddr = vm_map_min(kernel_map);
440: exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
441: 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
442:
443: /*
444: * Allocate a submap for physio
445: */
446: phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
447: VM_PHYS_SIZE, 0, FALSE, NULL);
448:
449: printf("avail mem = %llu (%lluMB)\n",
450: ptoa((unsigned long long)uvmexp.free),
451: ptoa((unsigned long long)uvmexp.free)/1024U/1024U);
452:
453: /*
454: * Set up buffers, so they can be used to read disk labels.
455: */
456: bufinit();
457:
458: /*
459: * Configure the system.
460: */
461: if (boothowto & RB_CONFIG) {
462: #ifdef BOOT_CONFIG
463: user_config();
464: #else
465: printf("kernel does not support -c; continuing..\n");
466: #endif
467: }
468: ioport_malloc_safe = 1;
469: }
470:
471: /*
472: * Set up proc0's TSS and LDT.
473: */
474: void
475: i386_proc0_tss_ldt_init()
476: {
477: int x;
478: struct pcb *pcb;
479:
480: curpcb = pcb = &proc0.p_addr->u_pcb;
481:
482: pcb->pcb_tss.tss_ioopt =
483: ((caddr_t)pcb->pcb_iomap - (caddr_t)&pcb->pcb_tss) << 16;
484: for (x = 0; x < sizeof(pcb->pcb_iomap) / 4; x++)
485: pcb->pcb_iomap[x] = 0xffffffff;
486: pcb->pcb_iomap_pad = 0xff;
487:
488: pcb->pcb_ldt_sel = pmap_kernel()->pm_ldt_sel = GSEL(GLDT_SEL, SEL_KPL);
489: pcb->pcb_ldt = ldt;
490: pcb->pcb_cr0 = rcr0();
491: pcb->pcb_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
492: pcb->pcb_tss.tss_esp0 = (int)proc0.p_addr + USPACE - 16;
493: proc0.p_md.md_regs = (struct trapframe *)pcb->pcb_tss.tss_esp0 - 1;
494: proc0.p_md.md_tss_sel = tss_alloc(pcb);
495:
496: ltr(proc0.p_md.md_tss_sel);
497: lldt(pcb->pcb_ldt_sel);
498: }
499:
500: #ifdef MULTIPROCESSOR
501: void
502: i386_init_pcb_tss_ldt(struct cpu_info *ci)
503: {
504: int x;
505: struct pcb *pcb = ci->ci_idle_pcb;
506:
507: pcb->pcb_tss.tss_ioopt =
508: ((caddr_t)pcb->pcb_iomap - (caddr_t)&pcb->pcb_tss) << 16;
509: for (x = 0; x < sizeof(pcb->pcb_iomap) / 4; x++)
510: pcb->pcb_iomap[x] = 0xffffffff;
511: pcb->pcb_iomap_pad = 0xff;
512:
513: pcb->pcb_ldt_sel = pmap_kernel()->pm_ldt_sel = GSEL(GLDT_SEL, SEL_KPL);
514: pcb->pcb_ldt = ci->ci_ldt;
515: pcb->pcb_cr0 = rcr0();
516: ci->ci_idle_tss_sel = tss_alloc(pcb);
517: }
518: #endif /* MULTIPROCESSOR */
519:
520:
521: /*
522: * Allocate space for system data structures. We are given
523: * a starting virtual address and we return a final virtual
524: * address; along the way we set each data structure pointer.
525: *
526: * We call allocsys() with 0 to find out how much space we want,
527: * allocate that much and fill it with zeroes, and then call
528: * allocsys() again with the correct base virtual address.
529: */
530: caddr_t
531: allocsys(caddr_t v)
532: {
533:
534: #define valloc(name, type, num) \
535: v = (caddr_t)(((name) = (type *)v) + (num))
536:
537: #ifdef SYSVMSG
538: valloc(msgpool, char, msginfo.msgmax);
539: valloc(msgmaps, struct msgmap, msginfo.msgseg);
540: valloc(msghdrs, struct msg, msginfo.msgtql);
541: valloc(msqids, struct msqid_ds, msginfo.msgmni);
542: #endif
543:
544: return v;
545: }
546:
547: void
548: setup_buffers()
549: {
550: /*
551: * Determine how many buffers to allocate. We use bufcachepercent%
552: * of the memory below 4GB.
553: */
554: if (bufpages == 0)
555: bufpages = btoc(avail_end) * bufcachepercent / 100;
556:
557: /* Restrict to at most 25% filled kvm */
558: if (bufpages >
559: (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4)
560: bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) /
561: PAGE_SIZE / 4;
562: }
563:
564: /*
565: * Info for CTL_HW
566: */
567: char cpu_model[120];
568:
569: /*
570: * Note: these are just the ones that may not have a cpuid instruction.
571: * We deal with the rest in a different way.
572: */
573: const struct cpu_nocpuid_nameclass i386_nocpuid_cpus[] = {
574: { CPUVENDOR_INTEL, "Intel", "386SX", CPUCLASS_386,
575: NULL}, /* CPU_386SX */
576: { CPUVENDOR_INTEL, "Intel", "386DX", CPUCLASS_386,
577: NULL}, /* CPU_386 */
578: { CPUVENDOR_INTEL, "Intel", "486SX", CPUCLASS_486,
579: NULL}, /* CPU_486SX */
580: { CPUVENDOR_INTEL, "Intel", "486DX", CPUCLASS_486,
581: NULL}, /* CPU_486 */
582: { CPUVENDOR_CYRIX, "Cyrix", "486DLC", CPUCLASS_486,
583: NULL}, /* CPU_486DLC */
584: { CPUVENDOR_CYRIX, "Cyrix", "6x86", CPUCLASS_486,
585: cyrix6x86_cpu_setup}, /* CPU_6x86 */
586: { CPUVENDOR_NEXGEN,"NexGen","586", CPUCLASS_386,
587: NULL}, /* CPU_NX586 */
588: };
589:
590: const char *classnames[] = {
591: "386",
592: "486",
593: "586",
594: "686"
595: };
596:
597: const char *modifiers[] = {
598: "",
599: "OverDrive ",
600: "Dual ",
601: ""
602: };
603:
604: const struct cpu_cpuid_nameclass i386_cpuid_cpus[] = {
605: {
606: "GenuineIntel",
607: CPUVENDOR_INTEL,
608: "Intel",
609: /* Family 4 */
610: { {
611: CPUCLASS_486,
612: {
613: "486DX", "486DX", "486SX", "486DX2", "486SL",
614: "486SX2", 0, "486DX2 W/B",
615: "486DX4", 0, 0, 0, 0, 0, 0, 0,
616: "486" /* Default */
617: },
618: NULL
619: },
620: /* Family 5 */
621: {
622: CPUCLASS_586,
623: {
624: "Pentium (A-step)", "Pentium (P5)",
625: "Pentium (P54C)", "Pentium (P24T)",
626: "Pentium/MMX", "Pentium", 0,
627: "Pentium (P54C)", "Pentium/MMX",
628: 0, 0, 0, 0, 0, 0, 0,
629: "Pentium" /* Default */
630: },
631: intel586_cpu_setup
632: },
633: /* Family 6 */
634: {
635: CPUCLASS_686,
636: {
637: "Pentium Pro", "Pentium Pro", 0,
638: "Pentium II", "Pentium Pro",
639: "Pentium II/Celeron",
640: "Celeron",
641: "Pentium III",
642: "Pentium III",
643: "Pentium M",
644: "Pentium III Xeon",
645: "Pentium III", 0,
646: "Pentium M",
647: "Core Duo/Solo", 0,
648: "Pentium Pro, II or III" /* Default */
649: },
650: intel686_cpu_setup
651: },
652: /* Family 7 */
653: {
654: CPUCLASS_686,
655: } ,
656: /* Family 8 */
657: {
658: CPUCLASS_686,
659: } ,
660: /* Family 9 */
661: {
662: CPUCLASS_686,
663: } ,
664: /* Family A */
665: {
666: CPUCLASS_686,
667: } ,
668: /* Family B */
669: {
670: CPUCLASS_686,
671: } ,
672: /* Family C */
673: {
674: CPUCLASS_686,
675: } ,
676: /* Family D */
677: {
678: CPUCLASS_686,
679: } ,
680: /* Family E */
681: {
682: CPUCLASS_686,
683: } ,
684: /* Family F */
685: {
686: CPUCLASS_686,
687: {
688: "Pentium 4", 0, 0, 0,
689: 0, 0, 0, 0,
690: 0, 0, 0, 0,
691: 0, 0, 0, 0,
692: "Pentium 4" /* Default */
693: },
694: intel686_p4_cpu_setup
695: } }
696: },
697: {
698: "AuthenticAMD",
699: CPUVENDOR_AMD,
700: "AMD",
701: /* Family 4 */
702: { {
703: CPUCLASS_486,
704: {
705: 0, 0, 0, "Am486DX2 W/T",
706: 0, 0, 0, "Am486DX2 W/B",
707: "Am486DX4 W/T or Am5x86 W/T 150",
708: "Am486DX4 W/B or Am5x86 W/B 150", 0, 0,
709: 0, 0, "Am5x86 W/T 133/160",
710: "Am5x86 W/B 133/160",
711: "Am486 or Am5x86" /* Default */
712: },
713: NULL
714: },
715: /* Family 5 */
716: {
717: CPUCLASS_586,
718: {
719: "K5", "K5", "K5", "K5", 0, 0, "K6",
720: "K6", "K6-2", "K6-III", 0, 0, 0,
721: "K6-2+/III+", 0, 0,
722: "K5 or K6" /* Default */
723: },
724: amd_family5_setup
725: },
726: /* Family 6 */
727: {
728: CPUCLASS_686,
729: {
730: 0, "Athlon Model 1", "Athlon Model 2",
731: "Duron Model 3",
732: "Athlon Model 4",
733: 0, "Athlon XP Model 6",
734: "Duron Model 7",
735: "Athlon XP Model 8",
736: 0, "Athlon XP Model 10",
737: 0, 0, 0, 0, 0,
738: "K7" /* Default */
739: },
740: amd_family6_setup
741: },
742: /* Family 7 */
743: {
744: CPUCLASS_686,
745: } ,
746: /* Family 8 */
747: {
748: CPUCLASS_686,
749: } ,
750: /* Family 9 */
751: {
752: CPUCLASS_686,
753: } ,
754: /* Family A */
755: {
756: CPUCLASS_686,
757: } ,
758: /* Family B */
759: {
760: CPUCLASS_686,
761: } ,
762: /* Family C */
763: {
764: CPUCLASS_686,
765: } ,
766: /* Family D */
767: {
768: CPUCLASS_686,
769: } ,
770: /* Family E */
771: {
772: CPUCLASS_686,
773: } ,
774: /* Family F */
775: {
776: CPUCLASS_686,
777: {
778: 0, 0, 0, 0, "Athlon64",
779: "Opteron or Athlon64FX", 0, 0,
780: 0, 0, 0, 0, 0, 0, 0, 0,
781: "AMD64" /* DEFAULT */
782: },
783: amd_family6_setup
784: } }
785: },
786: {
787: "CyrixInstead",
788: CPUVENDOR_CYRIX,
789: "Cyrix",
790: /* Family 4 */
791: { {
792: CPUCLASS_486,
793: {
794: 0, 0, 0, "MediaGX", 0, 0, 0, 0, "5x86", 0, 0,
795: 0, 0, 0, 0,
796: "486 class" /* Default */
797: },
798: NULL
799: },
800: /* Family 5 */
801: {
802: CPUCLASS_586,
803: {
804: 0, 0, "6x86", 0, "GXm", 0, 0, 0, 0, 0,
805: 0, 0, 0, 0, 0, 0,
806: "586 class" /* Default */
807: },
808: cyrix6x86_cpu_setup
809: },
810: /* Family 6 */
811: {
812: CPUCLASS_686,
813: {
814: "6x86MX", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
815: 0, 0, 0, 0,
816: "686 class" /* Default */
817: },
818: NULL
819: } }
820: },
821: {
822: "CentaurHauls",
823: CPUVENDOR_IDT,
824: "IDT",
825: /* Family 4, not available from IDT */
826: { {
827: CPUCLASS_486,
828: {
829: 0, 0, 0, 0, 0, 0, 0, 0,
830: 0, 0, 0, 0, 0, 0, 0, 0,
831: "486 class" /* Default */
832: },
833: NULL
834: },
835: /* Family 5 */
836: {
837: CPUCLASS_586,
838: {
839: 0, 0, 0, 0, "WinChip C6", 0, 0, 0,
840: "WinChip 2", "WinChip 3", 0, 0, 0, 0, 0, 0,
841: "WinChip" /* Default */
842: },
843: winchip_cpu_setup
844: },
845: /* Family 6 */
846: {
847: CPUCLASS_686,
848: {
849: 0, 0, 0, 0, 0, 0,
850: "C3 Samuel",
851: "C3 Samuel 2/Ezra",
852: "C3 Ezra-T",
853: "C3 Nehemiah", "C3 Esther", 0, 0, 0, 0, 0,
854: "C3" /* Default */
855: },
856: cyrix3_cpu_setup
857: } }
858: },
859: {
860: "RiseRiseRise",
861: CPUVENDOR_RISE,
862: "Rise",
863: /* Family 4, not available from Rise */
864: { {
865: CPUCLASS_486,
866: {
867: 0, 0, 0, 0, 0, 0, 0, 0,
868: 0, 0, 0, 0, 0, 0, 0, 0,
869: "486 class" /* Default */
870: },
871: NULL
872: },
873: /* Family 5 */
874: {
875: CPUCLASS_586,
876: {
877: "mP6", 0, "mP6", 0, 0, 0, 0, 0,
878: 0, 0, 0, 0, 0, 0, 0, 0,
879: "mP6" /* Default */
880: },
881: NULL
882: },
883: /* Family 6, not yet available from Rise */
884: {
885: CPUCLASS_686,
886: {
887: 0, 0, 0, 0, 0, 0, 0, 0,
888: 0, 0, 0, 0, 0, 0, 0, 0,
889: "686 class" /* Default */
890: },
891: NULL
892: } }
893: },
894: {
895: "GenuineTMx86",
896: CPUVENDOR_TRANSMETA,
897: "Transmeta",
898: /* Family 4, not available from Transmeta */
899: { {
900: CPUCLASS_486,
901: {
902: 0, 0, 0, 0, 0, 0, 0, 0,
903: 0, 0, 0, 0, 0, 0, 0, 0,
904: "486 class" /* Default */
905: },
906: NULL
907: },
908: /* Family 5 */
909: {
910: CPUCLASS_586,
911: {
912: 0, 0, 0, 0, "TMS5x00", 0, 0,
913: 0, 0, 0, 0, 0, 0, 0, 0, 0,
914: "TMS5x00" /* Default */
915: },
916: tm86_cpu_setup
917: },
918: /* Family 6, not yet available from Transmeta */
919: {
920: CPUCLASS_686,
921: {
922: 0, 0, 0, 0, 0, 0, 0, 0,
923: 0, 0, 0, 0, 0, 0, 0, 0,
924: "686 class" /* Default */
925: },
926: NULL
927: } }
928: },
929: {
930: "Geode by NSC",
931: CPUVENDOR_NS,
932: "National Semiconductor",
933: /* Family 4, not available from National Semiconductor */
934: { {
935: CPUCLASS_486,
936: {
937: 0, 0, 0, 0, 0, 0, 0, 0,
938: 0, 0, 0, 0, 0, 0, 0, 0,
939: "486 class" /* Default */
940: },
941: NULL
942: },
943: /* Family 5 */
944: {
945: CPUCLASS_586,
946: {
947: 0, 0, 0, 0, "Geode GX1", 0, 0, 0, 0, 0,
948: 0, 0, 0, 0, 0, 0,
949: "586 class" /* Default */
950: },
951: natsem6x86_cpu_setup
952: } }
953: },
954: {
955: "SiS SiS SiS ",
956: CPUVENDOR_SIS,
957: "SiS",
958: /* Family 4, not available from SiS */
959: { {
960: CPUCLASS_486,
961: {
962: 0, 0, 0, 0, 0, 0, 0, 0,
963: 0, 0, 0, 0, 0, 0, 0, 0,
964: "486 class" /* Default */
965: },
966: NULL
967: },
968: /* Family 5 */
969: {
970: CPUCLASS_586,
971: {
972: "SiS55x", 0, 0, 0, 0, 0, 0, 0, 0, 0,
973: 0, 0, 0, 0, 0, 0,
974: "586 class" /* Default */
975: },
976: NULL
977: } }
978: }
979: };
980:
981: const struct cpu_cpuid_feature i386_cpuid_features[] = {
982: { CPUID_FPU, "FPU" },
983: { CPUID_VME, "V86" },
984: { CPUID_DE, "DE" },
985: { CPUID_PSE, "PSE" },
986: { CPUID_TSC, "TSC" },
987: { CPUID_MSR, "MSR" },
988: { CPUID_PAE, "PAE" },
989: { CPUID_MCE, "MCE" },
990: { CPUID_CX8, "CX8" },
991: { CPUID_APIC, "APIC" },
992: { CPUID_SYS1, "SYS" },
993: { CPUID_SEP, "SEP" },
994: { CPUID_MTRR, "MTRR" },
995: { CPUID_PGE, "PGE" },
996: { CPUID_MCA, "MCA" },
997: { CPUID_CMOV, "CMOV" },
998: { CPUID_PAT, "PAT" },
999: { CPUID_PSE36, "PSE36" },
1000: { CPUID_SER, "SER" },
1001: { CPUID_CFLUSH, "CFLUSH" },
1002: { CPUID_DS, "DS" },
1003: { CPUID_ACPI, "ACPI" },
1004: { CPUID_MMX, "MMX" },
1005: { CPUID_FXSR, "FXSR" },
1006: { CPUID_SSE, "SSE" },
1007: { CPUID_SSE2, "SSE2" },
1008: { CPUID_SS, "SS" },
1009: { CPUID_HTT, "HTT" },
1010: { CPUID_TM, "TM" },
1011: { CPUID_SBF, "SBF" },
1012: { CPUID_3DNOW, "3DNOW" },
1013: };
1014:
1015: const struct cpu_cpuid_feature i386_cpuid_ecxfeatures[] = {
1016: { CPUIDECX_SSE3, "SSE3" },
1017: { CPUIDECX_MWAIT, "MWAIT" },
1018: { CPUIDECX_DSCPL, "DS-CPL" },
1019: { CPUIDECX_VMX, "VMX" },
1020: { CPUIDECX_EST, "EST" },
1021: { CPUIDECX_TM2, "TM2" },
1022: { CPUIDECX_CNXTID, "CNXT-ID" },
1023: { CPUIDECX_CX16, "CX16" },
1024: { CPUIDECX_XTPR, "xTPR" },
1025: };
1026:
1027: void
1028: winchip_cpu_setup(struct cpu_info *ci)
1029: {
1030: #if defined(I586_CPU)
1031:
1032: switch ((ci->ci_signature >> 4) & 15) { /* model */
1033: case 4: /* WinChip C6 */
1034: ci->ci_feature_flags &= ~CPUID_TSC;
1035: /* Disable RDTSC instruction from user-level. */
1036: lcr4(rcr4() | CR4_TSD);
1037: printf("%s: TSC disabled\n", ci->ci_dev.dv_xname);
1038: break;
1039: }
1040: #endif
1041: }
1042:
1043: #if defined(I686_CPU) && !defined(SMALL_KERNEL)
1044: void
1045: cyrix3_setperf_setup(struct cpu_info *ci)
1046: {
1047: if (cpu_ecxfeature & CPUIDECX_EST) {
1048: if (rdmsr(MSR_MISC_ENABLE) & (1 << 16))
1049: est_init(ci->ci_dev.dv_xname, CPUVENDOR_VIA);
1050: else
1051: printf("%s: Enhanced SpeedStep disabled by BIOS\n",
1052: ci->ci_dev.dv_xname);
1053: }
1054: }
1055: #endif
1056:
1057: void
1058: cyrix3_cpu_setup(struct cpu_info *ci)
1059: {
1060: #if defined(I686_CPU)
1061: int model = (ci->ci_signature >> 4) & 15;
1062: int step = ci->ci_signature & 15;
1063:
1064: u_int64_t msreg;
1065: u_int32_t regs[4];
1066: unsigned int val;
1067: #if !defined(SMALL_KERNEL)
1068: extern void (*pagezero)(void *, size_t);
1069: extern void i686_pagezero(void *, size_t);
1070:
1071: pagezero = i686_pagezero;
1072:
1073: cyrix3_get_bus_clock(ci);
1074:
1075: setperf_setup = cyrix3_setperf_setup;
1076: #endif
1077:
1078: switch (model) {
1079: case 6: /* C3 Samuel 1 */
1080: case 7: /* C3 Samuel 2 or C3 Ezra */
1081: case 8: /* C3 Ezra-T */
1082: cpuid(0x80000001, regs);
1083: val = regs[3];
1084: if (val & (1U << 31)) {
1085: cpu_feature |= CPUID_3DNOW;
1086: } else {
1087: cpu_feature &= ~CPUID_3DNOW;
1088: }
1089: break;
1090:
1091: case 9:
1092: if (step < 3)
1093: break;
1094: /*
1095: * C3 Nehemiah: fall through.
1096: */
1097: case 10:
1098: /*
1099: * C3 Nehemiah/Esther:
1100: * First we check for extended feature flags, and then
1101: * (if present) retrieve the ones at 0xC0000001. In this
1102: * bit 2 tells us if the RNG is present. Bit 3 tells us
1103: * if the RNG has been enabled. In order to use the RNG
1104: * we need 3 things: We need an RNG, we need the FXSR bit
1105: * enabled in cr4 (SSE/SSE2 stuff), and we need to have
1106: * Bit 6 of MSR 0x110B set to 1 (the default), which will
1107: * show up as bit 3 set here.
1108: */
1109: cpuid(0xC0000000, regs); /* Check for RNG */
1110: val = regs[0];
1111: if (val >= 0xC0000001) {
1112: cpuid(0xC0000001, regs);
1113: val = regs[3];
1114: } else
1115: val = 0;
1116:
1117: if (val & (C3_CPUID_HAS_RNG | C3_CPUID_HAS_ACE))
1118: printf("%s:", ci->ci_dev.dv_xname);
1119:
1120: /* Enable RNG if present and disabled */
1121: if (val & C3_CPUID_HAS_RNG) {
1122: extern int viac3_rnd_present;
1123:
1124: if (!(val & C3_CPUID_DO_RNG)) {
1125: msreg = rdmsr(0x110B);
1126: msreg |= 0x40;
1127: wrmsr(0x110B, msreg);
1128: }
1129: viac3_rnd_present = 1;
1130: printf(" RNG");
1131: }
1132:
1133: /* Enable AES engine if present and disabled */
1134: if (val & C3_CPUID_HAS_ACE) {
1135: #ifdef CRYPTO
1136: if (!(val & C3_CPUID_DO_ACE)) {
1137: msreg = rdmsr(0x1107);
1138: msreg |= (0x01 << 28);
1139: wrmsr(0x1107, msreg);
1140: }
1141: i386_has_xcrypt |= C3_HAS_AES;
1142: #endif /* CRYPTO */
1143: printf(" AES");
1144: }
1145:
1146: /* Enable ACE2 engine if present and disabled */
1147: if (val & C3_CPUID_HAS_ACE2) {
1148: #ifdef CRYPTO
1149: if (!(val & C3_CPUID_DO_ACE2)) {
1150: msreg = rdmsr(0x1107);
1151: msreg |= (0x01 << 28);
1152: wrmsr(0x1107, msreg);
1153: }
1154: i386_has_xcrypt |= C3_HAS_AESCTR;
1155: #endif /* CRYPTO */
1156: printf(" AES-CTR");
1157: }
1158:
1159: /* Enable SHA engine if present and disabled */
1160: if (val & C3_CPUID_HAS_PHE) {
1161: #ifdef CRYPTO
1162: if (!(val & C3_CPUID_DO_PHE)) {
1163: msreg = rdmsr(0x1107);
1164: msreg |= (0x01 << 28/**/);
1165: wrmsr(0x1107, msreg);
1166: }
1167: i386_has_xcrypt |= C3_HAS_SHA;
1168: #endif /* CRYPTO */
1169: printf(" SHA1 SHA256");
1170: }
1171:
1172: /* Enable MM engine if present and disabled */
1173: if (val & C3_CPUID_HAS_PMM) {
1174: #ifdef CRYPTO
1175: if (!(val & C3_CPUID_DO_PMM)) {
1176: msreg = rdmsr(0x1107);
1177: msreg |= (0x01 << 28/**/);
1178: wrmsr(0x1107, msreg);
1179: }
1180: i386_has_xcrypt |= C3_HAS_MM;
1181: #endif /* CRYPTO */
1182: printf(" RSA");
1183: }
1184:
1185: printf("\n");
1186: break;
1187: }
1188: #endif
1189: }
1190:
1191: void
1192: cyrix6x86_cpu_setup(struct cpu_info *ci)
1193: {
1194: extern int clock_broken_latch;
1195:
1196: switch ((ci->ci_signature >> 4) & 15) { /* model */
1197: case -1: /* M1 w/o cpuid */
1198: case 2: /* M1 */
1199: /* set up various cyrix registers */
1200: /* Enable suspend on halt */
1201: cyrix_write_reg(0xc2, cyrix_read_reg(0xc2) | 0x08);
1202: /* enable access to ccr4/ccr5 */
1203: cyrix_write_reg(0xC3, cyrix_read_reg(0xC3) | 0x10);
1204: /* cyrix's workaround for the "coma bug" */
1205: cyrix_write_reg(0x31, cyrix_read_reg(0x31) | 0xf8);
1206: cyrix_write_reg(0x32, cyrix_read_reg(0x32) | 0x7f);
1207: cyrix_write_reg(0x33, cyrix_read_reg(0x33) & ~0xff);
1208: cyrix_write_reg(0x3c, cyrix_read_reg(0x3c) | 0x87);
1209: /* disable access to ccr4/ccr5 */
1210: cyrix_write_reg(0xC3, cyrix_read_reg(0xC3) & ~0x10);
1211:
1212: printf("%s: xchg bug workaround performed\n",
1213: ci->ci_dev.dv_xname);
1214: break; /* fallthrough? */
1215: case 4: /* GXm */
1216: /* Unset the TSC bit until calibrate_delay() gets fixed. */
1217: clock_broken_latch = 1;
1218: curcpu()->ci_feature_flags &= ~CPUID_TSC;
1219: printf("%s: TSC disabled\n", ci->ci_dev.dv_xname);
1220: break;
1221: }
1222: }
1223:
1224: void
1225: natsem6x86_cpu_setup(struct cpu_info *ci)
1226: {
1227: #if defined(I586_CPU) || defined(I686_CPU)
1228: extern int clock_broken_latch;
1229: int model = (ci->ci_signature >> 4) & 15;
1230:
1231: clock_broken_latch = 1;
1232: switch (model) {
1233: case 4:
1234: cpu_feature &= ~CPUID_TSC;
1235: printf("%s: TSC disabled\n", ci->ci_dev.dv_xname);
1236: break;
1237: }
1238: #endif
1239: }
1240:
1241: void
1242: intel586_cpu_setup(struct cpu_info *ci)
1243: {
1244: #if defined(I586_CPU)
1245: if (!cpu_f00f_bug) {
1246: fix_f00f();
1247: printf("%s: F00F bug workaround installed\n",
1248: ci->ci_dev.dv_xname);
1249: }
1250: #endif
1251: }
1252:
1253: #if !defined(SMALL_KERNEL) && defined(I586_CPU)
1254: void
1255: amd_family5_setperf_setup(struct cpu_info *ci)
1256: {
1257: k6_powernow_init();
1258: }
1259: #endif
1260:
1261: void
1262: amd_family5_setup(struct cpu_info *ci)
1263: {
1264: int model = (ci->ci_signature >> 4) & 15;
1265:
1266: switch (model) {
1267: case 0: /* AMD-K5 Model 0 */
1268: /*
1269: * According to the AMD Processor Recognition App Note,
1270: * the AMD-K5 Model 0 uses the wrong bit to indicate
1271: * support for global PTEs, instead using bit 9 (APIC)
1272: * rather than bit 13 (i.e. "0x200" vs. 0x2000". Oops!).
1273: */
1274: if (cpu_feature & CPUID_APIC)
1275: cpu_feature = (cpu_feature & ~CPUID_APIC) | CPUID_PGE;
1276: /*
1277: * XXX But pmap_pg_g is already initialized -- need to kick
1278: * XXX the pmap somehow. How does the MP branch do this?
1279: */
1280: break;
1281: case 12:
1282: case 13:
1283: #if !defined(SMALL_KERNEL) && defined(I586_CPU)
1284: setperf_setup = amd_family5_setperf_setup;
1285: #endif
1286: break;
1287: }
1288: }
1289:
1290: #if !defined(SMALL_KERNEL) && defined(I686_CPU)
1291: void
1292: amd_family6_setperf_setup(struct cpu_info *ci)
1293: {
1294: int family = (ci->ci_signature >> 8) & 15;
1295:
1296: switch (family) {
1297: case 6:
1298: k7_powernow_init();
1299: break;
1300: case 15:
1301: k8_powernow_init();
1302: break;
1303: }
1304: }
1305: #endif /* !SMALL_KERNEL && I686_CPU */
1306:
1307: void
1308: amd_family6_setup(struct cpu_info *ci)
1309: {
1310: #if !defined(SMALL_KERNEL) && defined(I686_CPU)
1311: extern void (*pagezero)(void *, size_t);
1312: extern void sse2_pagezero(void *, size_t);
1313: extern void i686_pagezero(void *, size_t);
1314:
1315: if (cpu_feature & CPUID_SSE2)
1316: pagezero = sse2_pagezero;
1317: else
1318: pagezero = i686_pagezero;
1319:
1320: setperf_setup = amd_family6_setperf_setup;
1321: #endif
1322: }
1323:
1324: #if !defined(SMALL_KERNEL) && defined(I686_CPU)
1325: /*
1326: * Temperature read on the CPU is relative to the maximum
1327: * temperature supported by the CPU, Tj(Max).
1328: * Poorly documented, refer to:
1329: * http://softwarecommunity.intel.com/isn/Community/
1330: * en-US/forums/thread/30228638.aspx
1331: * Basically, depending on a bit in one msr, the max is either 85 or 100.
1332: * Then we subtract the temperature portion of thermal status from
1333: * max to get current temperature.
1334: */
1335: void
1336: intelcore_update_sensor(void *args)
1337: {
1338: struct cpu_info *ci = (struct cpu_info *) args;
1339: u_int64_t msr;
1340: int max = 100;
1341:
1342: if (rdmsr(MSR_TEMPERATURE_TARGET) & MSR_TEMPERATURE_TARGET_LOW_BIT)
1343: max = 85;
1344:
1345: msr = rdmsr(MSR_THERM_STATUS);
1346: if (msr & MSR_THERM_STATUS_VALID_BIT) {
1347: ci->ci_sensor.value = max - MSR_THERM_STATUS_TEMP(msr);
1348: /* micro degrees */
1349: ci->ci_sensor.value *= 1000000;
1350: /* kelvin */
1351: ci->ci_sensor.value += 273150000;
1352: ci->ci_sensor.flags &= ~SENSOR_FINVALID;
1353: } else {
1354: ci->ci_sensor.value = 0;
1355: ci->ci_sensor.flags |= SENSOR_FINVALID;
1356: }
1357: }
1358:
1359: void
1360: intel686_cpusensors_setup(struct cpu_info *ci)
1361: {
1362: u_int regs[4];
1363:
1364: if (cpuid_level < 0x06)
1365: return;
1366:
1367: /* CPUID.06H.EAX[0] = 1 tells us if we have on-die sensor */
1368: cpuid(0x06, regs);
1369: if ((regs[0] & 0x01) != 1)
1370: return;
1371:
1372: /* Setup the sensors structures */
1373: strlcpy(ci->ci_sensordev.xname, ci->ci_dev.dv_xname,
1374: sizeof(ci->ci_sensordev.xname));
1375: ci->ci_sensor.type = SENSOR_TEMP;
1376: sensor_task_register(ci, intelcore_update_sensor, 5);
1377: sensor_attach(&ci->ci_sensordev, &ci->ci_sensor);
1378: sensordev_install(&ci->ci_sensordev);
1379: }
1380: #endif
1381:
1382: #if !defined(SMALL_KERNEL) && defined(I686_CPU)
1383: void
1384: intel686_setperf_setup(struct cpu_info *ci)
1385: {
1386: int family = (ci->ci_signature >> 8) & 15;
1387: int step = ci->ci_signature & 15;
1388:
1389: if (cpu_ecxfeature & CPUIDECX_EST) {
1390: if (rdmsr(MSR_MISC_ENABLE) & (1 << 16))
1391: est_init(ci->ci_dev.dv_xname, CPUVENDOR_INTEL);
1392: else
1393: printf("%s: Enhanced SpeedStep disabled by BIOS\n",
1394: ci->ci_dev.dv_xname);
1395: } else if ((cpu_feature & (CPUID_ACPI | CPUID_TM)) ==
1396: (CPUID_ACPI | CPUID_TM))
1397: p4tcc_init(family, step);
1398: }
1399: #endif
1400:
1401: void
1402: intel686_common_cpu_setup(struct cpu_info *ci)
1403: {
1404:
1405: #if !defined(SMALL_KERNEL) && defined(I686_CPU)
1406: setperf_setup = intel686_setperf_setup;
1407: cpusensors_setup = intel686_cpusensors_setup;
1408: {
1409: extern void (*pagezero)(void *, size_t);
1410: extern void sse2_pagezero(void *, size_t);
1411: extern void i686_pagezero(void *, size_t);
1412:
1413: if (cpu_feature & CPUID_SSE2)
1414: pagezero = sse2_pagezero;
1415: else
1416: pagezero = i686_pagezero;
1417: }
1418: #endif
1419: /*
1420: * Make sure SYSENTER is disabled.
1421: */
1422: if (cpu_feature & CPUID_SEP)
1423: wrmsr(MSR_SYSENTER_CS, 0);
1424: }
1425:
1426: void
1427: intel686_cpu_setup(struct cpu_info *ci)
1428: {
1429: int model = (ci->ci_signature >> 4) & 15;
1430: int step = ci->ci_signature & 15;
1431: u_quad_t msr119;
1432:
1433: #if !defined(SMALL_KERNEL) && defined(I686_CPU)
1434: p3_get_bus_clock(ci);
1435: #endif
1436:
1437: intel686_common_cpu_setup(ci);
1438:
1439: /*
1440: * Original PPro returns SYSCALL in CPUID but is non-functional.
1441: * From Intel Application Note #485.
1442: */
1443: if ((model == 1) && (step < 3))
1444: ci->ci_feature_flags &= ~CPUID_SEP;
1445:
1446: /*
1447: * Disable the Pentium3 serial number.
1448: */
1449: if ((model == 7) && (ci->ci_feature_flags & CPUID_SER)) {
1450: msr119 = rdmsr(MSR_BBL_CR_CTL);
1451: msr119 |= 0x0000000000200000LL;
1452: wrmsr(MSR_BBL_CR_CTL, msr119);
1453:
1454: printf("%s: disabling processor serial number\n",
1455: ci->ci_dev.dv_xname);
1456: ci->ci_feature_flags &= ~CPUID_SER;
1457: ci->ci_level = 2;
1458: }
1459:
1460: #if !defined(SMALL_KERNEL) && defined(I686_CPU)
1461: p3_early = (model == 8 && step == 1) ? 1 : 0;
1462: update_cpuspeed = p3_update_cpuspeed;
1463: #endif
1464: }
1465:
1466: void
1467: intel686_p4_cpu_setup(struct cpu_info *ci)
1468: {
1469: #if !defined(SMALL_KERNEL) && defined(I686_CPU)
1470: p4_get_bus_clock(ci);
1471: #endif
1472:
1473: intel686_common_cpu_setup(ci);
1474:
1475: #if !defined(SMALL_KERNEL) && defined(I686_CPU)
1476: p4_model = (ci->ci_signature >> 4) & 15;
1477: update_cpuspeed = p4_update_cpuspeed;
1478: #endif
1479: }
1480:
1481: void
1482: tm86_cpu_setup(struct cpu_info *ci)
1483: {
1484: #if !defined(SMALL_KERNEL) && defined(I586_CPU)
1485: longrun_init();
1486: #endif
1487: }
1488:
1489: char *
1490: intel686_cpu_name(int model)
1491: {
1492: char *ret = NULL;
1493:
1494: switch (model) {
1495: case 5:
1496: switch (cpu_cache_edx & 0xFF) {
1497: case 0x40:
1498: case 0x41:
1499: ret = "Celeron";
1500: break;
1501: /* 0x42 should not exist in this model. */
1502: case 0x43:
1503: ret = "Pentium II";
1504: break;
1505: case 0x44:
1506: case 0x45:
1507: ret = "Pentium II Xeon";
1508: break;
1509: }
1510: break;
1511: case 7:
1512: switch (cpu_cache_edx & 0xFF) {
1513: /* 0x40 - 0x42 should not exist in this model. */
1514: case 0x43:
1515: ret = "Pentium III";
1516: break;
1517: case 0x44:
1518: case 0x45:
1519: ret = "Pentium III Xeon";
1520: break;
1521: }
1522: break;
1523: }
1524:
1525: return (ret);
1526: }
1527:
1528: char *
1529: cyrix3_cpu_name(int model, int step)
1530: {
1531: char *name = NULL;
1532:
1533: switch (model) {
1534: case 7:
1535: if (step < 8)
1536: name = "C3 Samuel 2";
1537: else
1538: name = "C3 Ezra";
1539: break;
1540: }
1541: return name;
1542: }
1543:
1544: /*
1545: * Print identification for the given CPU.
1546: * XXX XXX
1547: * This is not as clean as one might like, because it references
1548: *
1549: * the "cpuid_level" and "cpu_vendor" globals.
1550: * cpuid_level isn't so bad, since both CPU's will hopefully
1551: * be of the same level.
1552: *
1553: * The Intel multiprocessor spec doesn't give us the cpu_vendor
1554: * information; however, the chance of multi-vendor SMP actually
1555: * ever *working* is sufficiently low that it's probably safe to assume
1556: * all processors are of the same vendor.
1557: */
1558:
1559: void
1560: identifycpu(struct cpu_info *ci)
1561: {
1562: const char *name, *modifier, *vendorname, *token;
1563: int class = CPUCLASS_386, vendor, i, max;
1564: int family, model, step, modif, cachesize;
1565: const struct cpu_cpuid_nameclass *cpup = NULL;
1566: char *brandstr_from, *brandstr_to;
1567: char *cpu_device = ci->ci_dev.dv_xname;
1568: int skipspace;
1569:
1570: if (cpuid_level == -1) {
1571: #ifdef DIAGNOSTIC
1572: if (cpu < 0 || cpu >=
1573: (sizeof i386_nocpuid_cpus/sizeof(struct cpu_nocpuid_nameclass)))
1574: panic("unknown cpu type %d", cpu);
1575: #endif
1576: name = i386_nocpuid_cpus[cpu].cpu_name;
1577: vendor = i386_nocpuid_cpus[cpu].cpu_vendor;
1578: vendorname = i386_nocpuid_cpus[cpu].cpu_vendorname;
1579: model = -1;
1580: step = -1;
1581: class = i386_nocpuid_cpus[cpu].cpu_class;
1582: ci->cpu_setup = i386_nocpuid_cpus[cpu].cpu_setup;
1583: modifier = "";
1584: token = "";
1585: } else {
1586: max = sizeof (i386_cpuid_cpus) / sizeof (i386_cpuid_cpus[0]);
1587: modif = (ci->ci_signature >> 12) & 3;
1588: family = (ci->ci_signature >> 8) & 15;
1589: model = (ci->ci_signature >> 4) & 15;
1590: step = ci->ci_signature & 15;
1591: #ifdef CPUDEBUG
1592: printf("%s: family %x model %x step %x\n", cpu_device, family,
1593: model, step);
1594: printf("%s: cpuid level %d cache eax %x ebx %x ecx %x edx %x\n",
1595: cpu_device, cpuid_level, cpu_cache_eax, cpu_cache_ebx,
1596: cpu_cache_ecx, cpu_cache_edx);
1597: #endif
1598: if (family < CPU_MINFAMILY)
1599: panic("identifycpu: strange family value");
1600:
1601: for (i = 0; i < max; i++) {
1602: if (!strncmp(cpu_vendor,
1603: i386_cpuid_cpus[i].cpu_id, 12)) {
1604: cpup = &i386_cpuid_cpus[i];
1605: break;
1606: }
1607: }
1608:
1609: if (cpup == NULL) {
1610: vendor = CPUVENDOR_UNKNOWN;
1611: if (cpu_vendor[0] != '\0')
1612: vendorname = &cpu_vendor[0];
1613: else
1614: vendorname = "Unknown";
1615: if (family > CPU_MAXFAMILY)
1616: family = CPU_MAXFAMILY;
1617: class = family - 3;
1618: if (class > CPUCLASS_686)
1619: class = CPUCLASS_686;
1620: modifier = "";
1621: name = "";
1622: token = "";
1623: ci->cpu_setup = NULL;
1624: } else {
1625: token = cpup->cpu_id;
1626: vendor = cpup->cpu_vendor;
1627: vendorname = cpup->cpu_vendorname;
1628: /*
1629: * Special hack for the VIA C3 series.
1630: *
1631: * VIA bought Centaur Technology from IDT in Aug 1999
1632: * and marketed the processors as VIA Cyrix III/C3.
1633: */
1634: if (vendor == CPUVENDOR_IDT && family >= 6) {
1635: vendor = CPUVENDOR_VIA;
1636: vendorname = "VIA";
1637: }
1638: modifier = modifiers[modif];
1639: if (family > CPU_MAXFAMILY) {
1640: family = CPU_MAXFAMILY;
1641: model = CPU_DEFMODEL;
1642: } else if (model > CPU_MAXMODEL)
1643: model = CPU_DEFMODEL;
1644: i = family - CPU_MINFAMILY;
1645:
1646: /* Special hack for the PentiumII/III series. */
1647: if (vendor == CPUVENDOR_INTEL && family == 6 &&
1648: (model == 5 || model == 7)) {
1649: name = intel686_cpu_name(model);
1650: /* Special hack for the VIA C3 series. */
1651: } else if (vendor == CPUVENDOR_VIA && family == 6 &&
1652: model == 7) {
1653: name = cyrix3_cpu_name(model, step);
1654: /* Special hack for the TMS5x00 series. */
1655: } else if (vendor == CPUVENDOR_TRANSMETA &&
1656: family == 5 && model == 4) {
1657: name = tm86_cpu_name(model);
1658: } else
1659: name = cpup->cpu_family[i].cpu_models[model];
1660: if (name == NULL) {
1661: name = cpup->cpu_family[i].cpu_models[CPU_DEFMODEL];
1662: if (name == NULL)
1663: name = "";
1664: }
1665: class = cpup->cpu_family[i].cpu_class;
1666: ci->cpu_setup = cpup->cpu_family[i].cpu_setup;
1667: }
1668: }
1669:
1670: /* Find the amount of on-chip L2 cache. */
1671: cachesize = -1;
1672: if (vendor == CPUVENDOR_INTEL && cpuid_level >= 2 && family < 0xf) {
1673: int intel_cachetable[] = { 0, 128, 256, 512, 1024, 2048 };
1674:
1675: if ((cpu_cache_edx & 0xFF) >= 0x40 &&
1676: (cpu_cache_edx & 0xFF) <= 0x45)
1677: cachesize = intel_cachetable[(cpu_cache_edx & 0xFF) - 0x40];
1678: } else if (vendor == CPUVENDOR_AMD && class == CPUCLASS_686) {
1679: u_int regs[4];
1680: cpuid(0x80000000, regs);
1681: if (regs[0] >= 0x80000006) {
1682: cpuid(0x80000006, regs);
1683: cachesize = (regs[2] >> 16);
1684: }
1685: }
1686:
1687: /* Remove leading and duplicated spaces from cpu_brandstr */
1688: brandstr_from = brandstr_to = cpu_brandstr;
1689: skipspace = 1;
1690: while (*brandstr_from != '\0') {
1691: if (!skipspace || *brandstr_from != ' ') {
1692: skipspace = 0;
1693: *(brandstr_to++) = *brandstr_from;
1694: }
1695: if (*brandstr_from == ' ')
1696: skipspace = 1;
1697: brandstr_from++;
1698: }
1699: *brandstr_to = '\0';
1700:
1701: if (cpu_brandstr[0] == '\0') {
1702: snprintf(cpu_brandstr, 48 /* sizeof(cpu_brandstr) */,
1703: "%s %s%s", vendorname, modifier, name);
1704: }
1705:
1706: if ((ci->ci_flags & CPUF_PRIMARY) == 0) {
1707: if (cachesize > -1) {
1708: snprintf(cpu_model, sizeof(cpu_model),
1709: "%s (%s%s%s%s-class, %dKB L2 cache)",
1710: cpu_brandstr,
1711: ((*token) ? "\"" : ""), ((*token) ? token : ""),
1712: ((*token) ? "\" " : ""), classnames[class], cachesize);
1713: } else {
1714: snprintf(cpu_model, sizeof(cpu_model),
1715: "%s (%s%s%s%s-class)",
1716: cpu_brandstr,
1717: ((*token) ? "\"" : ""), ((*token) ? token : ""),
1718: ((*token) ? "\" " : ""), classnames[class]);
1719: }
1720:
1721: printf("%s: %s", cpu_device, cpu_model);
1722: }
1723:
1724: #if defined(I586_CPU) || defined(I686_CPU)
1725: if (ci->ci_feature_flags && (ci->ci_feature_flags & CPUID_TSC)) {
1726: /* Has TSC */
1727: calibrate_cyclecounter();
1728: if (cpuspeed > 994) {
1729: int ghz, fr;
1730:
1731: ghz = (cpuspeed + 9) / 1000;
1732: fr = ((cpuspeed + 9) / 10 ) % 100;
1733: if ((ci->ci_flags & CPUF_PRIMARY) == 0) {
1734: if (fr)
1735: printf(" %d.%02d GHz", ghz, fr);
1736: else
1737: printf(" %d GHz", ghz);
1738: }
1739: } else {
1740: if ((ci->ci_flags & CPUF_PRIMARY) == 0) {
1741: printf(" %d MHz", cpuspeed);
1742: }
1743: }
1744: }
1745: #endif
1746: if ((ci->ci_flags & CPUF_PRIMARY) == 0) {
1747: printf("\n");
1748:
1749: if (ci->ci_feature_flags) {
1750: int numbits = 0;
1751:
1752: printf("%s: ", cpu_device);
1753: max = sizeof(i386_cpuid_features) /
1754: sizeof(i386_cpuid_features[0]);
1755: for (i = 0; i < max; i++) {
1756: if (ci->ci_feature_flags &
1757: i386_cpuid_features[i].feature_bit) {
1758: printf("%s%s", (numbits == 0 ? "" : ","),
1759: i386_cpuid_features[i].feature_name);
1760: numbits++;
1761: }
1762: }
1763: max = sizeof(i386_cpuid_ecxfeatures)
1764: / sizeof(i386_cpuid_ecxfeatures[0]);
1765: for (i = 0; i < max; i++) {
1766: if (cpu_ecxfeature &
1767: i386_cpuid_ecxfeatures[i].feature_bit) {
1768: printf("%s%s", (numbits == 0 ? "" : ","),
1769: i386_cpuid_ecxfeatures[i].feature_name);
1770: numbits++;
1771: }
1772: }
1773: printf("\n");
1774: }
1775: }
1776:
1777: #ifndef MULTIPROCESSOR
1778: /* configure the CPU if needed */
1779: if (ci->cpu_setup != NULL)
1780: (ci->cpu_setup)(ci);
1781: #endif
1782:
1783: #ifndef SMALL_KERNEL
1784: #if defined(I586_CPU) || defined(I686_CPU)
1785: if (cpuspeed != 0 && cpu_cpuspeed == NULL)
1786: cpu_cpuspeed = pentium_cpuspeed;
1787: #endif
1788: #endif
1789:
1790: cpu_class = class;
1791:
1792: /*
1793: * Now that we have told the user what they have,
1794: * let them know if that machine type isn't configured.
1795: */
1796: switch (cpu_class) {
1797: #if !defined(I486_CPU) && !defined(I586_CPU) && !defined(I686_CPU)
1798: #error No CPU classes configured.
1799: #endif
1800: #ifndef I686_CPU
1801: case CPUCLASS_686:
1802: printf("NOTICE: this kernel does not support Pentium Pro CPU class\n");
1803: #ifdef I586_CPU
1804: printf("NOTICE: lowering CPU class to i586\n");
1805: cpu_class = CPUCLASS_586;
1806: break;
1807: #endif
1808: #endif
1809: #ifndef I586_CPU
1810: case CPUCLASS_586:
1811: printf("NOTICE: this kernel does not support Pentium CPU class\n");
1812: #ifdef I486_CPU
1813: printf("NOTICE: lowering CPU class to i486\n");
1814: cpu_class = CPUCLASS_486;
1815: break;
1816: #endif
1817: #endif
1818: #ifndef I486_CPU
1819: case CPUCLASS_486:
1820: printf("NOTICE: this kernel does not support i486 CPU class\n");
1821: #endif
1822: case CPUCLASS_386:
1823: printf("NOTICE: this kernel does not support i386 CPU class\n");
1824: panic("no appropriate CPU class available");
1825: default:
1826: break;
1827: }
1828:
1829: ci->cpu_class = class;
1830:
1831: if (cpu == CPU_486DLC) {
1832: #ifndef CYRIX_CACHE_WORKS
1833: printf("WARNING: CYRIX 486DLC CACHE UNCHANGED.\n");
1834: #else
1835: #ifndef CYRIX_CACHE_REALLY_WORKS
1836: printf("WARNING: CYRIX 486DLC CACHE ENABLED IN HOLD-FLUSH MODE.\n");
1837: #else
1838: printf("WARNING: CYRIX 486DLC CACHE ENABLED.\n");
1839: #endif
1840: #endif
1841: }
1842:
1843: /*
1844: * Enable ring 0 write protection (486 or above, but 386
1845: * no longer supported).
1846: */
1847: lcr0(rcr0() | CR0_WP);
1848:
1849: #if defined(I686_CPU)
1850: /*
1851: * If we have FXSAVE/FXRESTOR, use them.
1852: */
1853: if (cpu_feature & CPUID_FXSR) {
1854: i386_use_fxsave = 1;
1855: lcr4(rcr4() | CR4_OSFXSR);
1856:
1857: /*
1858: * If we have SSE/SSE2, enable XMM exceptions, and
1859: * notify userland.
1860: */
1861: if (cpu_feature & (CPUID_SSE|CPUID_SSE2)) {
1862: if (cpu_feature & CPUID_SSE)
1863: i386_has_sse = 1;
1864: if (cpu_feature & CPUID_SSE2)
1865: i386_has_sse2 = 1;
1866: lcr4(rcr4() | CR4_OSXMMEXCPT);
1867: }
1868: } else
1869: i386_use_fxsave = 0;
1870:
1871: if (vendor == CPUVENDOR_AMD)
1872: amd64_errata(ci);
1873: #endif /* I686_CPU */
1874: }
1875:
1876: char *
1877: tm86_cpu_name(int model)
1878: {
1879: u_int32_t regs[4];
1880: char *name = NULL;
1881:
1882: cpuid(0x80860001, regs);
1883:
1884: switch (model) {
1885: case 4:
1886: if (((regs[1] >> 16) & 0xff) >= 0x3)
1887: name = "TMS5800";
1888: else
1889: name = "TMS5600";
1890: }
1891:
1892: return name;
1893: }
1894:
1895: #ifndef SMALL_KERNEL
1896: #ifdef I686_CPU
1897: void
1898: cyrix3_get_bus_clock(struct cpu_info *ci)
1899: {
1900: u_int64_t msr;
1901: int bus;
1902:
1903: msr = rdmsr(MSR_EBL_CR_POWERON);
1904: bus = (msr >> 18) & 0x3;
1905: switch (bus) {
1906: case 0:
1907: bus_clock = BUS100;
1908: break;
1909: case 1:
1910: bus_clock = BUS133;
1911: break;
1912: case 2:
1913: bus_clock = BUS200;
1914: break;
1915: case 3:
1916: bus_clock = BUS166;
1917: break;
1918: }
1919: }
1920:
1921: void
1922: p4_get_bus_clock(struct cpu_info *ci)
1923: {
1924: u_int64_t msr;
1925: int model, bus;
1926:
1927: model = (ci->ci_signature >> 4) & 15;
1928: msr = rdmsr(MSR_EBC_FREQUENCY_ID);
1929: if (model < 2) {
1930: bus = (msr >> 21) & 0x7;
1931: switch (bus) {
1932: case 0:
1933: bus_clock = BUS100;
1934: break;
1935: case 1:
1936: bus_clock = BUS133;
1937: break;
1938: default:
1939: printf("%s: unknown Pentium 4 (model %d) "
1940: "EBC_FREQUENCY_ID value %d\n",
1941: ci->ci_dev.dv_xname, model, bus);
1942: break;
1943: }
1944: } else {
1945: bus = (msr >> 16) & 0x7;
1946: switch (bus) {
1947: case 0:
1948: bus_clock = (model == 2) ? BUS100 : BUS266;
1949: break;
1950: case 1:
1951: bus_clock = BUS133;
1952: break;
1953: case 2:
1954: bus_clock = BUS200;
1955: break;
1956: case 3:
1957: bus_clock = BUS166;
1958: break;
1959: default:
1960: printf("%s: unknown Pentium 4 (model %d) "
1961: "EBC_FREQUENCY_ID value %d\n",
1962: ci->ci_dev.dv_xname, model, bus);
1963: break;
1964: }
1965: }
1966: }
1967:
1968: void
1969: p3_get_bus_clock(struct cpu_info *ci)
1970: {
1971: u_int64_t msr;
1972: int model, bus;
1973:
1974: model = (ci->ci_signature >> 4) & 15;
1975: switch (model) {
1976: case 0x9: /* Pentium M (130 nm, Banias) */
1977: bus_clock = BUS100;
1978: break;
1979: case 0xd: /* Pentium M (90 nm, Dothan) */
1980: msr = rdmsr(MSR_FSB_FREQ);
1981: bus = (msr >> 0) & 0x7;
1982: switch (bus) {
1983: case 0:
1984: bus_clock = BUS100;
1985: break;
1986: case 1:
1987: bus_clock = BUS133;
1988: break;
1989: default:
1990: printf("%s: unknown Pentium M FSB_FREQ value %d",
1991: ci->ci_dev.dv_xname, bus);
1992: goto print_msr;
1993: }
1994: break;
1995: case 0xe: /* Core Duo/Solo */
1996: case 0xf: /* Core Xeon */
1997: msr = rdmsr(MSR_FSB_FREQ);
1998: bus = (msr >> 0) & 0x7;
1999: switch (bus) {
2000: case 5:
2001: bus_clock = BUS100;
2002: break;
2003: case 1:
2004: bus_clock = BUS133;
2005: break;
2006: case 3:
2007: bus_clock = BUS166;
2008: break;
2009: case 2:
2010: bus_clock = BUS200;
2011: break;
2012: case 0:
2013: bus_clock = BUS266;
2014: break;
2015: case 4:
2016: bus_clock = BUS333;
2017: break;
2018: default:
2019: printf("%s: unknown Core FSB_FREQ value %d",
2020: ci->ci_dev.dv_xname, bus);
2021: goto print_msr;
2022: }
2023: break;
2024: case 0x1: /* Pentium Pro, model 1 */
2025: case 0x3: /* Pentium II, model 3 */
2026: case 0x5: /* Pentium II, II Xeon, Celeron, model 5 */
2027: case 0x6: /* Celeron, model 6 */
2028: case 0x7: /* Pentium III, III Xeon, model 7 */
2029: case 0x8: /* Pentium III, III Xeon, Celeron, model 8 */
2030: case 0xa: /* Pentium III Xeon, model A */
2031: case 0xb: /* Pentium III, model B */
2032: msr = rdmsr(MSR_EBL_CR_POWERON);
2033: bus = (msr >> 18) & 0x3;
2034: switch (bus) {
2035: case 0:
2036: bus_clock = BUS66;
2037: break;
2038: case 1:
2039: bus_clock = BUS133;
2040: break;
2041: case 2:
2042: bus_clock = BUS100;
2043: break;
2044: default:
2045: printf("%s: unknown i686 EBL_CR_POWERON value %d",
2046: ci->ci_dev.dv_xname, bus);
2047: goto print_msr;
2048: }
2049: break;
2050: default:
2051: printf("%s: unknown i686 model %d, can't get bus clock",
2052: ci->ci_dev.dv_xname, model);
2053: print_msr:
2054: /*
2055: * Show the EBL_CR_POWERON MSR, so we'll at least have
2056: * some extra information, such as clock ratio, etc.
2057: */
2058: printf(" (0x%llx)\n", rdmsr(MSR_EBL_CR_POWERON));
2059: break;
2060: }
2061: }
2062:
2063: void
2064: p4_update_cpuspeed(void)
2065: {
2066: u_int64_t msr;
2067: int mult;
2068:
2069: if (bus_clock == 0) {
2070: printf("p4_update_cpuspeed: unknown bus clock\n");
2071: return;
2072: }
2073:
2074: msr = rdmsr(MSR_EBC_FREQUENCY_ID);
2075: mult = ((msr >> 24) & 0xff);
2076:
2077: cpuspeed = (bus_clock * mult) / 100;
2078: }
2079:
2080: void
2081: p3_update_cpuspeed(void)
2082: {
2083: u_int64_t msr;
2084: int mult;
2085: const u_int8_t mult_code[] = {
2086: 50, 30, 40, 0, 55, 35, 45, 0, 0, 70, 80, 60, 0, 75, 0, 65 };
2087:
2088: if (bus_clock == 0) {
2089: printf("p3_update_cpuspeed: unknown bus clock\n");
2090: return;
2091: }
2092:
2093: msr = rdmsr(MSR_EBL_CR_POWERON);
2094: mult = (msr >> 22) & 0xf;
2095: mult = mult_code[mult];
2096: if (!p3_early)
2097: mult += ((msr >> 27) & 0x1) * 40;
2098:
2099: cpuspeed = (bus_clock * mult) / 1000;
2100: }
2101: #endif /* I686_CPU */
2102:
2103: #if defined(I586_CPU) || defined(I686_CPU)
2104: int
2105: pentium_cpuspeed(int *freq)
2106: {
2107: *freq = cpuspeed;
2108: return (0);
2109: }
2110: #endif
2111: #endif /* !SMALL_KERNEL */
2112:
2113: #ifdef COMPAT_IBCS2
2114: void ibcs2_sendsig(sig_t, int, int, u_long, int, union sigval);
2115:
2116: void
2117: ibcs2_sendsig(sig_t catcher, int sig, int mask, u_long code, int type,
2118: union sigval val)
2119: {
2120: extern int bsd_to_ibcs2_sig[];
2121:
2122: sendsig(catcher, bsd_to_ibcs2_sig[sig], mask, code, type, val);
2123: }
2124: #endif
2125:
2126: /*
2127: * To send an AST to a process on another cpu we send an IPI to that cpu,
2128: * the IPI schedules a special soft interrupt (that does nothing) and then
2129: * returns through the normal interrupt return path which in turn handles
2130: * the AST.
2131: *
2132: * The IPI can't handle the AST because it usually requires grabbing the
2133: * biglock and we can't afford spinning in the IPI handler with interrupts
2134: * unlocked (so that we take further IPIs and grow our stack until it
2135: * overflows).
2136: */
2137: void
2138: aston(struct proc *p)
2139: {
2140: #ifdef MULTIPROCESSOR
2141: if (i386_atomic_testset_i(&p->p_md.md_astpending, 1) == 0 &&
2142: p->p_cpu != curcpu())
2143: i386_fast_ipi(p->p_cpu, LAPIC_IPI_AST);
2144: #else
2145: p->p_md.md_astpending = 1;
2146: #endif
2147: }
2148:
2149: /*
2150: * Send an interrupt to process.
2151: *
2152: * Stack is set up to allow sigcode stored
2153: * in u. to call routine, followed by kcall
2154: * to sigreturn routine below. After sigreturn
2155: * resets the signal mask, the stack, and the
2156: * frame pointer, it returns to the user
2157: * specified pc, psl.
2158: */
2159: void
2160: sendsig(sig_t catcher, int sig, int mask, u_long code, int type,
2161: union sigval val)
2162: {
2163: #ifdef I686_CPU
2164: extern char sigcode, sigcode_xmm;
2165: #endif
2166: struct proc *p = curproc;
2167: struct trapframe *tf = p->p_md.md_regs;
2168: struct sigframe *fp, frame;
2169: struct sigacts *psp = p->p_sigacts;
2170: register_t sp;
2171: int oonstack = psp->ps_sigstk.ss_flags & SS_ONSTACK;
2172:
2173: /*
2174: * Build the argument list for the signal handler.
2175: */
2176: frame.sf_signum = sig;
2177:
2178: /*
2179: * Allocate space for the signal handler context.
2180: */
2181: if ((psp->ps_flags & SAS_ALTSTACK) && !oonstack &&
2182: (psp->ps_sigonstack & sigmask(sig))) {
2183: sp = (long)psp->ps_sigstk.ss_sp + psp->ps_sigstk.ss_size;
2184: psp->ps_sigstk.ss_flags |= SS_ONSTACK;
2185: } else
2186: sp = tf->tf_esp;
2187:
2188: frame.sf_fpstate = NULL;
2189: if (p->p_md.md_flags & MDP_USEDFPU) {
2190: sp -= sizeof(union savefpu);
2191: sp &= ~0xf; /* foe XMM regs */
2192: frame.sf_fpstate = (void *)sp;
2193: }
2194:
2195: fp = (struct sigframe *)sp - 1;
2196: frame.sf_scp = &fp->sf_sc;
2197: frame.sf_sip = NULL;
2198: frame.sf_handler = catcher;
2199:
2200: /*
2201: * Build the signal context to be used by sigreturn.
2202: */
2203: frame.sf_sc.sc_err = tf->tf_err;
2204: frame.sf_sc.sc_trapno = tf->tf_trapno;
2205: frame.sf_sc.sc_onstack = oonstack;
2206: frame.sf_sc.sc_mask = mask;
2207: #ifdef VM86
2208: if (tf->tf_eflags & PSL_VM) {
2209: frame.sf_sc.sc_gs = tf->tf_vm86_gs;
2210: frame.sf_sc.sc_fs = tf->tf_vm86_fs;
2211: frame.sf_sc.sc_es = tf->tf_vm86_es;
2212: frame.sf_sc.sc_ds = tf->tf_vm86_ds;
2213: frame.sf_sc.sc_eflags = get_vflags(p);
2214: } else
2215: #endif
2216: {
2217: frame.sf_sc.sc_fs = tf->tf_fs;
2218: frame.sf_sc.sc_gs = tf->tf_gs;
2219: frame.sf_sc.sc_es = tf->tf_es;
2220: frame.sf_sc.sc_ds = tf->tf_ds;
2221: frame.sf_sc.sc_eflags = tf->tf_eflags;
2222: }
2223: frame.sf_sc.sc_edi = tf->tf_edi;
2224: frame.sf_sc.sc_esi = tf->tf_esi;
2225: frame.sf_sc.sc_ebp = tf->tf_ebp;
2226: frame.sf_sc.sc_ebx = tf->tf_ebx;
2227: frame.sf_sc.sc_edx = tf->tf_edx;
2228: frame.sf_sc.sc_ecx = tf->tf_ecx;
2229: frame.sf_sc.sc_eax = tf->tf_eax;
2230: frame.sf_sc.sc_eip = tf->tf_eip;
2231: frame.sf_sc.sc_cs = tf->tf_cs;
2232: frame.sf_sc.sc_esp = tf->tf_esp;
2233: frame.sf_sc.sc_ss = tf->tf_ss;
2234:
2235: if (psp->ps_siginfo & sigmask(sig)) {
2236: frame.sf_sip = &fp->sf_si;
2237: initsiginfo(&frame.sf_si, sig, code, type, val);
2238: #ifdef VM86
2239: if (sig == SIGURG) /* VM86 userland trap */
2240: frame.sf_si.si_trapno = code;
2241: #endif
2242: }
2243:
2244: /* XXX don't copyout siginfo if not needed? */
2245: if (copyout(&frame, fp, sizeof(frame)) != 0) {
2246: /*
2247: * Process has trashed its stack; give it an illegal
2248: * instruction to halt it in its tracks.
2249: */
2250: sigexit(p, SIGILL);
2251: /* NOTREACHED */
2252: }
2253:
2254: /*
2255: * Build context to run handler in.
2256: */
2257: tf->tf_fs = GSEL(GUDATA_SEL, SEL_UPL);
2258: tf->tf_gs = GSEL(GUDATA_SEL, SEL_UPL);
2259: tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL);
2260: tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL);
2261: tf->tf_eip = p->p_sigcode;
2262: tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL);
2263: #ifdef I686_CPU
2264: if (i386_use_fxsave)
2265: tf->tf_eip += &sigcode_xmm - &sigcode;
2266: #endif
2267: tf->tf_eflags &= ~(PSL_T|PSL_VM|PSL_AC);
2268: tf->tf_esp = (int)fp;
2269: tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL);
2270: }
2271:
2272: /*
2273: * System call to cleanup state after a signal
2274: * has been taken. Reset signal mask and
2275: * stack state from context left by sendsig (above).
2276: * Return to previous pc and psl as specified by
2277: * context left by sendsig. Check carefully to
2278: * make sure that the user has not modified the
2279: * psl to gain improper privileges or to cause
2280: * a machine fault.
2281: */
2282: int
2283: sys_sigreturn(struct proc *p, void *v, register_t *retval)
2284: {
2285: struct sys_sigreturn_args /* {
2286: syscallarg(struct sigcontext *) sigcntxp;
2287: } */ *uap = v;
2288: struct sigcontext *scp, context;
2289: struct trapframe *tf;
2290:
2291: tf = p->p_md.md_regs;
2292:
2293: /*
2294: * The trampoline code hands us the context.
2295: * It is unsafe to keep track of it ourselves, in the event that a
2296: * program jumps out of a signal handler.
2297: */
2298: scp = SCARG(uap, sigcntxp);
2299: if (copyin((caddr_t)scp, &context, sizeof(*scp)) != 0)
2300: return (EFAULT);
2301:
2302: /*
2303: * Restore signal context.
2304: */
2305: #ifdef VM86
2306: if (context.sc_eflags & PSL_VM) {
2307: tf->tf_vm86_gs = context.sc_gs;
2308: tf->tf_vm86_fs = context.sc_fs;
2309: tf->tf_vm86_es = context.sc_es;
2310: tf->tf_vm86_ds = context.sc_ds;
2311: set_vflags(p, context.sc_eflags);
2312: } else
2313: #endif
2314: {
2315: /*
2316: * Check for security violations. If we're returning to
2317: * protected mode, the CPU will validate the segment registers
2318: * automatically and generate a trap on violations. We handle
2319: * the trap, rather than doing all of the checking here.
2320: */
2321: if (((context.sc_eflags ^ tf->tf_eflags) & PSL_USERSTATIC) != 0 ||
2322: !USERMODE(context.sc_cs, context.sc_eflags))
2323: return (EINVAL);
2324:
2325: tf->tf_fs = context.sc_fs;
2326: tf->tf_gs = context.sc_gs;
2327: tf->tf_es = context.sc_es;
2328: tf->tf_ds = context.sc_ds;
2329: tf->tf_eflags = context.sc_eflags;
2330: }
2331: tf->tf_edi = context.sc_edi;
2332: tf->tf_esi = context.sc_esi;
2333: tf->tf_ebp = context.sc_ebp;
2334: tf->tf_ebx = context.sc_ebx;
2335: tf->tf_edx = context.sc_edx;
2336: tf->tf_ecx = context.sc_ecx;
2337: tf->tf_eax = context.sc_eax;
2338: tf->tf_eip = context.sc_eip;
2339: tf->tf_cs = context.sc_cs;
2340: tf->tf_esp = context.sc_esp;
2341: tf->tf_ss = context.sc_ss;
2342:
2343: if (context.sc_onstack & 01)
2344: p->p_sigacts->ps_sigstk.ss_flags |= SS_ONSTACK;
2345: else
2346: p->p_sigacts->ps_sigstk.ss_flags &= ~SS_ONSTACK;
2347: p->p_sigmask = context.sc_mask & ~sigcantmask;
2348:
2349: return (EJUSTRETURN);
2350: }
2351:
2352: int waittime = -1;
2353: struct pcb dumppcb;
2354:
2355: void
2356: boot(int howto)
2357: {
2358: if (cold) {
2359: /*
2360: * If the system is cold, just halt, unless the user
2361: * explicitly asked for reboot.
2362: */
2363: if ((howto & RB_USERREQ) == 0)
2364: howto |= RB_HALT;
2365: goto haltsys;
2366: }
2367:
2368: boothowto = howto;
2369: if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
2370: extern struct proc proc0;
2371:
2372: /* protect against curproc->p_stats.foo refs in sync() XXX */
2373: if (curproc == NULL)
2374: curproc = &proc0;
2375:
2376: waittime = 0;
2377: vfs_shutdown();
2378: /*
2379: * If we've been adjusting the clock, the todr
2380: * will be out of synch; adjust it now.
2381: */
2382: if ((howto & RB_TIMEBAD) == 0) {
2383: resettodr();
2384: } else {
2385: printf("WARNING: not updating battery clock\n");
2386: }
2387: }
2388:
2389: /* Disable interrupts. */
2390: splhigh();
2391:
2392: /* Do a dump if requested. */
2393: if (howto & RB_DUMP)
2394: dumpsys();
2395:
2396: haltsys:
2397: doshutdownhooks();
2398:
2399: if (howto & RB_HALT) {
2400: #if NACPI > 0 && !defined(SMALL_KERNEL)
2401: extern int acpi_s5, acpi_enabled;
2402:
2403: if (acpi_enabled) {
2404: delay(500000);
2405: if ((howto & RB_POWERDOWN) || acpi_s5)
2406: acpi_powerdown();
2407: }
2408: #endif
2409:
2410: #if NAPM > 0
2411: if (howto & RB_POWERDOWN) {
2412: int rv;
2413:
2414: printf("\nAttempting to power down...\n");
2415: /*
2416: * Turn off, if we can. But try to turn disk off and
2417: * wait a bit first--some disk drives are slow to
2418: * clean up and users have reported disk corruption.
2419: *
2420: * If apm_set_powstate() fails the first time, don't
2421: * try to turn the system off.
2422: */
2423: delay(500000);
2424: /*
2425: * It's been reported that the following bit of code
2426: * is required on most systems <mickey@openbsd.org>
2427: * but cause powerdown problem on other systems
2428: * <smcho@tsp.korea.ac.kr>. Use sysctl to set
2429: * apmhalt to a non-zero value to skip the offending
2430: * code.
2431: */
2432: if (!cpu_apmhalt) {
2433: apm_set_powstate(APM_DEV_DISK(0xff),
2434: APM_SYS_OFF);
2435: delay(500000);
2436: }
2437: rv = apm_set_powstate(APM_DEV_DISK(0xff), APM_SYS_OFF);
2438: if (rv == 0 || rv == ENXIO) {
2439: delay(500000);
2440: (void) apm_set_powstate(APM_DEV_ALLDEVS,
2441: APM_SYS_OFF);
2442: }
2443: }
2444: #endif
2445: printf("\n");
2446: printf("The operating system has halted.\n");
2447: printf("Please press any key to reboot.\n\n");
2448: cngetc();
2449: }
2450:
2451: printf("rebooting...\n");
2452: cpu_reset();
2453: for(;;) ;
2454: /*NOTREACHED*/
2455: }
2456:
2457: /*
2458: * This is called by configure to set dumplo and dumpsize.
2459: * Dumps always skip the first block of disk space
2460: * in case there might be a disk label stored there.
2461: * If there is extra space, put dump at the end to
2462: * reduce the chance that swapping trashes it.
2463: */
2464: void
2465: dumpconf(void)
2466: {
2467: int nblks; /* size of dump area */
2468: int i;
2469:
2470: if (dumpdev == NODEV ||
2471: (nblks = (bdevsw[major(dumpdev)].d_psize)(dumpdev)) == 0)
2472: return;
2473: if (nblks <= ctod(1))
2474: return;
2475:
2476: /* Always skip the first block, in case there is a label there. */
2477: if (dumplo < ctod(1))
2478: dumplo = ctod(1);
2479:
2480: for (i = 0; i < ndumpmem; i++)
2481: dumpsize = max(dumpsize, dumpmem[i].end);
2482:
2483: /* Put dump at end of partition, and make it fit. */
2484: if (dumpsize > dtoc(nblks - dumplo - 1))
2485: dumpsize = dtoc(nblks - dumplo - 1);
2486: if (dumplo < nblks - ctod(dumpsize) - 1)
2487: dumplo = nblks - ctod(dumpsize) - 1;
2488: }
2489:
2490: /*
2491: * cpu_dump: dump machine-dependent kernel core dump headers.
2492: */
2493: int
2494: cpu_dump()
2495: {
2496: int (*dump)(dev_t, daddr64_t, caddr_t, size_t);
2497: long buf[dbtob(1) / sizeof (long)];
2498: kcore_seg_t *segp;
2499:
2500: dump = bdevsw[major(dumpdev)].d_dump;
2501:
2502: segp = (kcore_seg_t *)buf;
2503:
2504: /*
2505: * Generate a segment header.
2506: */
2507: CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
2508: segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
2509:
2510: return (dump(dumpdev, dumplo, (caddr_t)buf, dbtob(1)));
2511: }
2512:
2513: /*
2514: * Doadump comes here after turning off memory management and
2515: * getting on the dump stack, either when called above, or by
2516: * the auto-restart code.
2517: */
2518: static vaddr_t dumpspace;
2519:
2520: vaddr_t
2521: reserve_dumppages(vaddr_t p)
2522: {
2523:
2524: dumpspace = p;
2525: return (p + PAGE_SIZE);
2526: }
2527:
2528: void
2529: dumpsys()
2530: {
2531: u_int i, j, npg;
2532: int maddr;
2533: daddr64_t blkno;
2534: int (*dump)(dev_t, daddr64_t, caddr_t, size_t);
2535: int error;
2536: char *str;
2537: extern int msgbufmapped;
2538:
2539: /* Save registers. */
2540: savectx(&dumppcb);
2541:
2542: msgbufmapped = 0; /* don't record dump msgs in msgbuf */
2543: if (dumpdev == NODEV)
2544: return;
2545:
2546: /*
2547: * For dumps during autoconfiguration,
2548: * if dump device has already configured...
2549: */
2550: if (dumpsize == 0)
2551: dumpconf();
2552: if (dumplo < 0)
2553: return;
2554: printf("\ndumping to dev %x, offset %ld\n", dumpdev, dumplo);
2555:
2556: error = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
2557: printf("dump ");
2558: if (error == -1) {
2559: printf("area unavailable\n");
2560: return;
2561: }
2562:
2563: #if 0 /* XXX this doesn't work. grr. */
2564: /* toss any characters present prior to dump */
2565: while (sget() != NULL); /*syscons and pccons differ */
2566: #endif
2567:
2568: /* scan through the dumpmem list */
2569: dump = bdevsw[major(dumpdev)].d_dump;
2570: error = cpu_dump();
2571: for (i = 0; !error && i < ndumpmem; i++) {
2572:
2573: npg = dumpmem[i].end - dumpmem[i].start;
2574: maddr = ctob(dumpmem[i].start);
2575: blkno = dumplo + btodb(maddr) + 1;
2576: #if 0
2577: printf("(%d %lld %d) ", maddr, blkno, npg);
2578: #endif
2579: for (j = npg; j--; maddr += NBPG, blkno += btodb(NBPG)) {
2580:
2581: /* Print out how many MBs we have more to go. */
2582: if (dbtob(blkno - dumplo) % (1024 * 1024) < NBPG)
2583: printf("%d ",
2584: (ctob(dumpsize) - maddr) / (1024 * 1024));
2585: #if 0
2586: printf("(%x %lld) ", maddr, blkno);
2587: #endif
2588: pmap_enter(pmap_kernel(), dumpspace, maddr,
2589: VM_PROT_READ, PMAP_WIRED);
2590: if ((error = (*dump)(dumpdev, blkno,
2591: (caddr_t)dumpspace, NBPG)))
2592: break;
2593:
2594: #if 0 /* XXX this doesn't work. grr. */
2595: /* operator aborting dump? */
2596: if (sget() != NULL) {
2597: error = EINTR;
2598: break;
2599: }
2600: #endif
2601: }
2602: }
2603:
2604: switch (error) {
2605:
2606: case 0: str = "succeeded\n\n"; break;
2607: case ENXIO: str = "device bad\n\n"; break;
2608: case EFAULT: str = "device not ready\n\n"; break;
2609: case EINVAL: str = "area improper\n\n"; break;
2610: case EIO: str = "i/o error\n\n"; break;
2611: case EINTR: str = "aborted from console\n\n"; break;
2612: default: str = "error %d\n\n"; break;
2613: }
2614: printf(str, error);
2615:
2616: delay(5000000); /* 5 seconds */
2617: }
2618:
2619: /*
2620: * Clear registers on exec
2621: */
2622: void
2623: setregs(struct proc *p, struct exec_package *pack, u_long stack,
2624: register_t *retval)
2625: {
2626: struct pcb *pcb = &p->p_addr->u_pcb;
2627: struct pmap *pmap = vm_map_pmap(&p->p_vmspace->vm_map);
2628: struct trapframe *tf = p->p_md.md_regs;
2629:
2630: #if NNPX > 0
2631: /* If we were using the FPU, forget about it. */
2632: if (pcb->pcb_fpcpu != NULL)
2633: npxsave_proc(p, 0);
2634: #endif
2635:
2636: #ifdef USER_LDT
2637: pmap_ldt_cleanup(p);
2638: #endif
2639:
2640: /*
2641: * Reset the code segment limit to I386_MAX_EXE_ADDR in the pmap;
2642: * this gets copied into the GDT and LDT for {G,L}UCODE_SEL by
2643: * pmap_activate().
2644: */
2645: setsegment(&pmap->pm_codeseg, 0, atop(I386_MAX_EXE_ADDR) - 1,
2646: SDT_MEMERA, SEL_UPL, 1, 1);
2647:
2648: /*
2649: * And update the GDT and LDT since we return to the user process
2650: * by leaving the syscall (we don't do another pmap_activate()).
2651: */
2652: curcpu()->ci_gdt[GUCODE_SEL].sd = pcb->pcb_ldt[LUCODE_SEL].sd =
2653: pmap->pm_codeseg;
2654:
2655: /*
2656: * And reset the hiexec marker in the pmap.
2657: */
2658: pmap->pm_hiexec = 0;
2659:
2660: p->p_md.md_flags &= ~MDP_USEDFPU;
2661: if (i386_use_fxsave) {
2662: pcb->pcb_savefpu.sv_xmm.sv_env.en_cw = __OpenBSD_NPXCW__;
2663: pcb->pcb_savefpu.sv_xmm.sv_env.en_mxcsr = __INITIAL_MXCSR__;
2664: } else
2665: pcb->pcb_savefpu.sv_87.sv_env.en_cw = __OpenBSD_NPXCW__;
2666:
2667: tf->tf_fs = LSEL(LUDATA_SEL, SEL_UPL);
2668: tf->tf_gs = LSEL(LUDATA_SEL, SEL_UPL);
2669: tf->tf_es = LSEL(LUDATA_SEL, SEL_UPL);
2670: tf->tf_ds = LSEL(LUDATA_SEL, SEL_UPL);
2671: tf->tf_ebp = 0;
2672: tf->tf_ebx = (int)PS_STRINGS;
2673: tf->tf_eip = pack->ep_entry;
2674: tf->tf_cs = LSEL(LUCODE_SEL, SEL_UPL);
2675: tf->tf_eflags = PSL_USERSET;
2676: tf->tf_esp = stack;
2677: tf->tf_ss = LSEL(LUDATA_SEL, SEL_UPL);
2678:
2679: retval[1] = 0;
2680: }
2681:
2682: /*
2683: * Initialize segments and descriptor tables
2684: */
2685:
2686: union descriptor ldt[NLDT];
2687: struct gate_descriptor idt_region[NIDT];
2688: struct gate_descriptor *idt = idt_region;
2689:
2690: extern struct user *proc0paddr;
2691:
2692: void
2693: setgate(struct gate_descriptor *gd, void *func, int args, int type, int dpl,
2694: int seg)
2695: {
2696:
2697: gd->gd_looffset = (int)func;
2698: gd->gd_selector = GSEL(seg, SEL_KPL);
2699: gd->gd_stkcpy = args;
2700: gd->gd_xx = 0;
2701: gd->gd_type = type;
2702: gd->gd_dpl = dpl;
2703: gd->gd_p = 1;
2704: gd->gd_hioffset = (int)func >> 16;
2705: }
2706:
2707: void
2708: unsetgate(struct gate_descriptor *gd)
2709: {
2710: gd->gd_p = 0;
2711: gd->gd_hioffset = 0;
2712: gd->gd_looffset = 0;
2713: gd->gd_selector = 0;
2714: gd->gd_xx = 0;
2715: gd->gd_stkcpy = 0;
2716: gd->gd_type = 0;
2717: gd->gd_dpl = 0;
2718: }
2719:
2720: void
2721: setregion(struct region_descriptor *rd, void *base, size_t limit)
2722: {
2723:
2724: rd->rd_limit = (int)limit;
2725: rd->rd_base = (int)base;
2726: }
2727:
2728: void
2729: setsegment(struct segment_descriptor *sd, void *base, size_t limit, int type,
2730: int dpl, int def32, int gran)
2731: {
2732:
2733: sd->sd_lolimit = (int)limit;
2734: sd->sd_lobase = (int)base;
2735: sd->sd_type = type;
2736: sd->sd_dpl = dpl;
2737: sd->sd_p = 1;
2738: sd->sd_hilimit = (int)limit >> 16;
2739: sd->sd_xx = 0;
2740: sd->sd_def32 = def32;
2741: sd->sd_gran = gran;
2742: sd->sd_hibase = (int)base >> 24;
2743: }
2744:
2745: #define IDTVEC(name) __CONCAT(X, name)
2746: extern int IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
2747: IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(dble), IDTVEC(fpusegm),
2748: IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), IDTVEC(page),
2749: IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), IDTVEC(syscall), IDTVEC(mchk),
2750: IDTVEC(osyscall), IDTVEC(simd);
2751:
2752: #if defined(I586_CPU)
2753: extern int IDTVEC(f00f_redirect);
2754:
2755: int cpu_f00f_bug = 0;
2756:
2757: void
2758: fix_f00f(void)
2759: {
2760: struct region_descriptor region;
2761: vaddr_t va;
2762: void *p;
2763: pt_entry_t *pte;
2764:
2765: /* Allocate two new pages */
2766: va = uvm_km_zalloc(kernel_map, NBPG*2);
2767: p = (void *)(va + NBPG - 7*sizeof(*idt));
2768:
2769: /* Copy over old IDT */
2770: bcopy(idt, p, sizeof(idt_region));
2771: idt = p;
2772:
2773: /* Fix up paging redirect */
2774: setgate(&idt[ 14], &IDTVEC(f00f_redirect), 0, SDT_SYS386TGT, SEL_KPL,
2775: GCODE_SEL);
2776:
2777: /* Map first page RO */
2778: pte = PTE_BASE + atop(va);
2779: *pte &= ~PG_RW;
2780:
2781: /* Reload idtr */
2782: setregion(®ion, idt, sizeof(idt_region) - 1);
2783: lidt(®ion);
2784:
2785: /* Tell the rest of the world */
2786: cpu_f00f_bug = 1;
2787: }
2788: #endif
2789:
2790: #ifdef MULTIPROCESSOR
2791: void
2792: cpu_init_idt()
2793: {
2794: struct region_descriptor region;
2795: setregion(®ion, idt, NIDT * sizeof(idt[0]) - 1);
2796: lidt(®ion);
2797: }
2798:
2799: void
2800: cpu_default_ldt(struct cpu_info *ci)
2801: {
2802: ci->ci_ldt = ldt;
2803: ci->ci_ldt_len = sizeof(ldt);
2804: }
2805:
2806: void
2807: cpu_alloc_ldt(struct cpu_info *ci)
2808: {
2809: union descriptor *cpu_ldt;
2810: size_t len = sizeof(ldt);
2811:
2812: cpu_ldt = (union descriptor *)uvm_km_alloc(kernel_map, len);
2813: bcopy(ldt, cpu_ldt, len);
2814: ci->ci_ldt = cpu_ldt;
2815: ci->ci_ldt_len = len;
2816: }
2817:
2818: void
2819: cpu_init_ldt(struct cpu_info *ci)
2820: {
2821: setsegment(&ci->ci_gdt[GLDT_SEL].sd, ci->ci_ldt, ci->ci_ldt_len - 1,
2822: SDT_SYSLDT, SEL_KPL, 0, 0);
2823: }
2824: #endif /* MULTIPROCESSOR */
2825:
2826: void
2827: init386(paddr_t first_avail)
2828: {
2829: int i, kb;
2830: struct region_descriptor region;
2831: bios_memmap_t *im;
2832:
2833: proc0.p_addr = proc0paddr;
2834: cpu_info_primary.ci_self = &cpu_info_primary;
2835: cpu_info_primary.ci_curpcb = &proc0.p_addr->u_pcb;
2836:
2837: /*
2838: * Initialize the I/O port and I/O mem extent maps.
2839: * Note: we don't have to check the return value since
2840: * creation of a fixed extent map will never fail (since
2841: * descriptor storage has already been allocated).
2842: *
2843: * N.B. The iomem extent manages _all_ physical addresses
2844: * on the machine. When the amount of RAM is found, the two
2845: * extents of RAM are allocated from the map (0 -> ISA hole
2846: * and end of ISA hole -> end of RAM).
2847: */
2848: ioport_ex = extent_create("ioport", 0x0, 0xffff, M_DEVBUF,
2849: (caddr_t)ioport_ex_storage, sizeof(ioport_ex_storage),
2850: EX_NOCOALESCE|EX_NOWAIT);
2851: iomem_ex = extent_create("iomem", 0x0, 0xffffffff, M_DEVBUF,
2852: (caddr_t)iomem_ex_storage, sizeof(iomem_ex_storage),
2853: EX_NOCOALESCE|EX_NOWAIT);
2854:
2855: /* make bootstrap gdt gates and memory segments */
2856: setsegment(&gdt[GCODE_SEL].sd, 0, 0xfffff, SDT_MEMERA, SEL_KPL, 1, 1);
2857: setsegment(&gdt[GICODE_SEL].sd, 0, 0xfffff, SDT_MEMERA, SEL_KPL, 1, 1);
2858: setsegment(&gdt[GDATA_SEL].sd, 0, 0xfffff, SDT_MEMRWA, SEL_KPL, 1, 1);
2859: setsegment(&gdt[GLDT_SEL].sd, ldt, sizeof(ldt) - 1, SDT_SYSLDT,
2860: SEL_KPL, 0, 0);
2861: setsegment(&gdt[GUCODE_SEL].sd, 0, atop(I386_MAX_EXE_ADDR) - 1,
2862: SDT_MEMERA, SEL_UPL, 1, 1);
2863: setsegment(&gdt[GUDATA_SEL].sd, 0, atop(VM_MAXUSER_ADDRESS) - 1,
2864: SDT_MEMRWA, SEL_UPL, 1, 1);
2865: setsegment(&gdt[GCPU_SEL].sd, &cpu_info_primary,
2866: sizeof(struct cpu_info)-1, SDT_MEMRWA, SEL_KPL, 0, 0);
2867:
2868: /* make ldt gates and memory segments */
2869: setgate(&ldt[LSYS5CALLS_SEL].gd, &IDTVEC(osyscall), 1, SDT_SYS386CGT,
2870: SEL_UPL, GCODE_SEL);
2871: ldt[LUCODE_SEL] = gdt[GUCODE_SEL];
2872: ldt[LUDATA_SEL] = gdt[GUDATA_SEL];
2873: ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
2874:
2875: /* exceptions */
2876: setgate(&idt[ 0], &IDTVEC(div), 0, SDT_SYS386TGT, SEL_KPL, GCODE_SEL);
2877: setgate(&idt[ 1], &IDTVEC(dbg), 0, SDT_SYS386TGT, SEL_KPL, GCODE_SEL);
2878: setgate(&idt[ 2], &IDTVEC(nmi), 0, SDT_SYS386TGT, SEL_KPL, GCODE_SEL);
2879: setgate(&idt[ 3], &IDTVEC(bpt), 0, SDT_SYS386TGT, SEL_UPL, GCODE_SEL);
2880: setgate(&idt[ 4], &IDTVEC(ofl), 0, SDT_SYS386TGT, SEL_UPL, GCODE_SEL);
2881: setgate(&idt[ 5], &IDTVEC(bnd), 0, SDT_SYS386TGT, SEL_KPL, GCODE_SEL);
2882: setgate(&idt[ 6], &IDTVEC(ill), 0, SDT_SYS386TGT, SEL_KPL, GCODE_SEL);
2883: setgate(&idt[ 7], &IDTVEC(dna), 0, SDT_SYS386TGT, SEL_KPL, GCODE_SEL);
2884: setgate(&idt[ 8], &IDTVEC(dble), 0, SDT_SYS386TGT, SEL_KPL, GCODE_SEL);
2885: setgate(&idt[ 9], &IDTVEC(fpusegm), 0, SDT_SYS386TGT, SEL_KPL, GCODE_SEL);
2886: setgate(&idt[ 10], &IDTVEC(tss), 0, SDT_SYS386TGT, SEL_KPL, GCODE_SEL);
2887: setgate(&idt[ 11], &IDTVEC(missing), 0, SDT_SYS386TGT, SEL_KPL, GCODE_SEL);
2888: setgate(&idt[ 12], &IDTVEC(stk), 0, SDT_SYS386TGT, SEL_KPL, GCODE_SEL);
2889: setgate(&idt[ 13], &IDTVEC(prot), 0, SDT_SYS386TGT, SEL_KPL, GCODE_SEL);
2890: setgate(&idt[ 14], &IDTVEC(page), 0, SDT_SYS386TGT, SEL_KPL, GCODE_SEL);
2891: setgate(&idt[ 15], &IDTVEC(rsvd), 0, SDT_SYS386TGT, SEL_KPL, GCODE_SEL);
2892: setgate(&idt[ 16], &IDTVEC(fpu), 0, SDT_SYS386TGT, SEL_KPL, GCODE_SEL);
2893: setgate(&idt[ 17], &IDTVEC(align), 0, SDT_SYS386TGT, SEL_KPL, GCODE_SEL);
2894: setgate(&idt[ 18], &IDTVEC(mchk), 0, SDT_SYS386TGT, SEL_KPL, GCODE_SEL);
2895: setgate(&idt[ 19], &IDTVEC(simd), 0, SDT_SYS386TGT, SEL_KPL, GCODE_SEL);
2896: for (i = 20; i < NRSVIDT; i++)
2897: setgate(&idt[i], &IDTVEC(rsvd), 0, SDT_SYS386TGT, SEL_KPL, GCODE_SEL);
2898: for (i = NRSVIDT; i < NIDT; i++)
2899: unsetgate(&idt[i]);
2900: setgate(&idt[128], &IDTVEC(syscall), 0, SDT_SYS386TGT, SEL_UPL, GCODE_SEL);
2901:
2902: setregion(®ion, gdt, NGDT * sizeof(union descriptor) - 1);
2903: lgdt(®ion);
2904: setregion(®ion, idt, sizeof(idt_region) - 1);
2905: lidt(®ion);
2906:
2907: #if NISA > 0
2908: isa_defaultirq();
2909: #endif
2910:
2911: consinit(); /* XXX SHOULD NOT BE DONE HERE */
2912: /* XXX here, until we can use bios for printfs */
2913:
2914: /*
2915: * Saving SSE registers won't work if the save area isn't
2916: * 16-byte aligned.
2917: */
2918: if (offsetof(struct user, u_pcb.pcb_savefpu) & 0xf)
2919: panic("init386: pcb_savefpu not 16-byte aligned");
2920:
2921: /* call pmap initialization to make new kernel address space */
2922: pmap_bootstrap((vaddr_t)atdevbase + IOM_SIZE);
2923:
2924: /*
2925: * Boot arguments are in a single page specified by /boot.
2926: *
2927: * We require the "new" vector form, as well as memory ranges
2928: * to be given in bytes rather than KB.
2929: */
2930: if ((bootapiver & (BAPIV_VECTOR | BAPIV_BMEMMAP)) ==
2931: (BAPIV_VECTOR | BAPIV_BMEMMAP)) {
2932: if (bootargc > NBPG)
2933: panic("too many boot args");
2934:
2935: if (extent_alloc_region(iomem_ex, (paddr_t)bootargv, bootargc,
2936: EX_NOWAIT))
2937: panic("cannot reserve /boot args memory");
2938:
2939: pmap_enter(pmap_kernel(), (vaddr_t)bootargp, (paddr_t)bootargv,
2940: VM_PROT_READ|VM_PROT_WRITE,
2941: VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
2942:
2943: bios_getopt();
2944:
2945: } else
2946: panic("/boot too old: upgrade!");
2947:
2948: #ifdef DIAGNOSTIC
2949: if (bios_memmap == NULL)
2950: panic("no BIOS memory map supplied");
2951: #endif
2952:
2953: #if defined(MULTIPROCESSOR)
2954: /* install the page after boot args as PT page for first 4M */
2955: pmap_enter(pmap_kernel(), (u_long)vtopte(0),
2956: round_page((vaddr_t)(bootargv + bootargc)),
2957: VM_PROT_READ|VM_PROT_WRITE,
2958: VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
2959: memset(vtopte(0), 0, NBPG); /* make sure it is clean before using */
2960: #endif
2961:
2962: /*
2963: * account all the memory passed in the map from /boot
2964: * calculate avail_end and count the physmem.
2965: */
2966: avail_end = 0;
2967: physmem = 0;
2968: #ifdef DEBUG
2969: printf("memmap:");
2970: #endif
2971: for(i = 0, im = bios_memmap; im->type != BIOS_MAP_END; im++)
2972: if (im->type == BIOS_MAP_FREE) {
2973: paddr_t a, e;
2974: #ifdef DEBUG
2975: printf(" %llx-%llx", im->addr, im->addr + im->size);
2976: #endif
2977:
2978: if (im->addr >= 0x100000000ULL) {
2979: #ifdef DEBUG
2980: printf("-H");
2981: #endif
2982: continue;
2983: }
2984:
2985: a = round_page(im->addr);
2986: if (im->addr + im->size <= 0xfffff000ULL)
2987: e = trunc_page(im->addr + im->size);
2988: else {
2989: #ifdef DEBUG
2990: printf("-T");
2991: #endif
2992: e = 0xfffff000;
2993: }
2994:
2995: /* skip first eight pages */
2996: if (a < 8 * NBPG)
2997: a = 8 * NBPG;
2998:
2999: /* skip shorter than page regions */
3000: if (a >= e || (e - a) < NBPG) {
3001: #ifdef DEBUG
3002: printf("-S");
3003: #endif
3004: continue;
3005: }
3006: if ((a > IOM_BEGIN && a < IOM_END) ||
3007: (e > IOM_BEGIN && e < IOM_END)) {
3008: #ifdef DEBUG
3009: printf("-I");
3010: #endif
3011: continue;
3012: }
3013:
3014: if (extent_alloc_region(iomem_ex, a, e - a, EX_NOWAIT))
3015: /* XXX What should we do? */
3016: printf("\nWARNING: CAN'T ALLOCATE RAM (%x-%x)"
3017: " FROM IOMEM EXTENT MAP!\n", a, e);
3018:
3019: physmem += atop(e - a);
3020: dumpmem[i].start = atop(a);
3021: dumpmem[i].end = atop(e);
3022: i++;
3023: avail_end = max(avail_end, e);
3024: }
3025:
3026: ndumpmem = i;
3027: avail_end -= round_page(MSGBUFSIZE);
3028:
3029: #ifdef DEBUG
3030: printf(": %lx\n", avail_end);
3031: #endif
3032: if (physmem < atop(4 * 1024 * 1024)) {
3033: printf("\awarning: too little memory available;"
3034: "running in degraded mode\npress a key to confirm\n\n");
3035: cngetc();
3036: }
3037:
3038: #ifdef DEBUG
3039: printf("physload: ");
3040: #endif
3041: kb = atop(KERNTEXTOFF - KERNBASE);
3042: if (kb > atop(0x100000)) {
3043: paddr_t lim = atop(0x100000);
3044: #ifdef DEBUG
3045: printf(" %x-%x (<16M)", lim, kb);
3046: #endif
3047: uvm_page_physload(lim, kb, lim, kb, VM_FREELIST_FIRST16);
3048: }
3049:
3050: for (i = 0; i < ndumpmem; i++) {
3051: paddr_t a, e;
3052: paddr_t lim;
3053:
3054: a = dumpmem[i].start;
3055: e = dumpmem[i].end;
3056: if (a < atop(first_avail) && e > atop(first_avail))
3057: a = atop(first_avail);
3058: if (e > atop(avail_end))
3059: e = atop(avail_end);
3060:
3061: if (a < e) {
3062: if (a < atop(16 * 1024 * 1024)) {
3063: lim = MIN(atop(16 * 1024 * 1024), e);
3064: #ifdef DEBUG
3065: - printf(" %x-%x (<16M)", a, lim);
3066: #endif
3067: uvm_page_physload(a, lim, a, lim,
3068: VM_FREELIST_FIRST16);
3069: if (e > lim) {
3070: #ifdef DEBUG
3071: - printf(" %x-%x", lim, e);
3072: #endif
3073: uvm_page_physload(lim, e, lim, e,
3074: VM_FREELIST_DEFAULT);
3075: }
3076: } else {
3077: #ifdef DEBUG
3078: - printf(" %x-%x", a, e);
3079: #endif
3080: uvm_page_physload(a, e, a, e,
3081: VM_FREELIST_DEFAULT);
3082: }
3083: }
3084: }
3085: #ifdef DEBUG
3086: printf("\n");
3087: #endif
3088: tlbflush();
3089: #if 0
3090: #if NISADMA > 0
3091: /*
3092: * Some motherboards/BIOSes remap the 384K of RAM that would
3093: * normally be covered by the ISA hole to the end of memory
3094: * so that it can be used. However, on a 16M system, this
3095: * would cause bounce buffers to be allocated and used.
3096: * This is not desirable behaviour, as more than 384K of
3097: * bounce buffers might be allocated. As a work-around,
3098: * we round memory down to the nearest 1M boundary if
3099: * we're using any isadma devices and the remapped memory
3100: * is what puts us over 16M.
3101: */
3102: if (extmem > (15*1024) && extmem < (16*1024)) {
3103: printf("Warning: ignoring %dk of remapped memory\n",
3104: extmem - (15*1024));
3105: extmem = (15*1024);
3106: }
3107: #endif
3108: #endif
3109:
3110: #ifdef DDB
3111: db_machine_init();
3112: ddb_init();
3113: if (boothowto & RB_KDB)
3114: Debugger();
3115: #endif
3116: #ifdef KGDB
3117: kgdb_port_init();
3118: if (boothowto & RB_KDB) {
3119: kgdb_debug_init = 1;
3120: kgdb_connect(1);
3121: }
3122: #endif /* KGDB */
3123: }
3124:
3125: /*
3126: * cpu_exec_aout_makecmds():
3127: * cpu-dependent a.out format hook for execve().
3128: *
3129: * Determine of the given exec package refers to something which we
3130: * understand and, if so, set up the vmcmds for it.
3131: */
3132: int
3133: cpu_exec_aout_makecmds(struct proc *p, struct exec_package *epp)
3134: {
3135: return ENOEXEC;
3136: }
3137:
3138: /*
3139: * consinit:
3140: * initialize the system console.
3141: * XXX - shouldn't deal with this initted thing, but then,
3142: * it shouldn't be called from init386 either.
3143: */
3144: void
3145: consinit()
3146: {
3147: static int initted;
3148:
3149: if (initted)
3150: return;
3151: initted = 1;
3152: cninit();
3153: }
3154:
3155: #ifdef KGDB
3156: void
3157: kgdb_port_init()
3158: {
3159:
3160: #if (NCOM > 0 || NPCCOM > 0)
3161: if (!strcmp(kgdb_devname, "com") || !strcmp(kgdb_devname, "pccom")) {
3162: bus_space_tag_t tag = I386_BUS_SPACE_IO;
3163: com_kgdb_attach(tag, comkgdbaddr, comkgdbrate, COM_FREQ,
3164: comkgdbmode);
3165: }
3166: #endif
3167: }
3168: #endif /* KGDB */
3169:
3170: void
3171: cpu_reset()
3172: {
3173: struct region_descriptor region;
3174:
3175: disable_intr();
3176:
3177: if (cpuresetfn)
3178: (*cpuresetfn)();
3179:
3180: /*
3181: * The keyboard controller has 4 random output pins, one of which is
3182: * connected to the RESET pin on the CPU in many PCs. We tell the
3183: * keyboard controller to pulse this line a couple of times.
3184: */
3185: outb(IO_KBD + KBCMDP, KBC_PULSE0);
3186: delay(100000);
3187: outb(IO_KBD + KBCMDP, KBC_PULSE0);
3188: delay(100000);
3189:
3190: /*
3191: * Try to cause a triple fault and watchdog reset by setting the
3192: * IDT to point to nothing.
3193: */
3194: bzero((caddr_t)idt, sizeof(idt_region));
3195: setregion(®ion, idt, sizeof(idt_region) - 1);
3196: lidt(®ion);
3197: __asm __volatile("divl %0,%1" : : "q" (0), "a" (0));
3198:
3199: #if 1
3200: /*
3201: * Try to cause a triple fault and watchdog reset by unmapping the
3202: * entire address space.
3203: */
3204: bzero((caddr_t)PTD, NBPG);
3205: tlbflush();
3206: #endif
3207:
3208: for (;;);
3209: }
3210:
3211: void
3212: cpu_initclocks(void)
3213: {
3214: (*initclock_func)();
3215:
3216: if (initclock_func == i8254_initclocks)
3217: i8254_inittimecounter();
3218: else
3219: i8254_inittimecounter_simple();
3220: }
3221:
3222: void
3223: need_resched(struct cpu_info *ci)
3224: {
3225: struct proc *p;
3226:
3227: ci->ci_want_resched = 1;
3228:
3229: /*
3230: * Need to catch the curproc in case it's cleared just
3231: * between the check and the aston().
3232: */
3233: if ((p = ci->ci_curproc) != NULL)
3234: aston(p);
3235: }
3236:
3237: #ifdef MULTIPROCESSOR
3238: /* Allocate an IDT vector slot within the given range.
3239: * XXX needs locking to avoid MP allocation races.
3240: */
3241:
3242: int
3243: idt_vec_alloc(int low, int high)
3244: {
3245: int vec;
3246:
3247: for (vec = low; vec <= high; vec++)
3248: if (idt[vec].gd_p == 0)
3249: return (vec);
3250: return (0);
3251: }
3252:
3253: void
3254: idt_vec_set(int vec, void (*function)(void))
3255: {
3256: setgate(&idt[vec], function, 0, SDT_SYS386IGT, SEL_KPL, GCODE_SEL);
3257: }
3258:
3259: void
3260: idt_vec_free(int vec)
3261: {
3262: unsetgate(&idt[vec]);
3263: }
3264: #endif /* MULTIPROCESSOR */
3265:
3266: /*
3267: * machine dependent system variables.
3268: */
3269: int
3270: cpu_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
3271: size_t newlen, struct proc *p)
3272: {
3273: dev_t dev;
3274:
3275: switch (name[0]) {
3276: case CPU_CONSDEV:
3277: if (namelen != 1)
3278: return (ENOTDIR); /* overloaded */
3279:
3280: if (cn_tab != NULL)
3281: dev = cn_tab->cn_dev;
3282: else
3283: dev = NODEV;
3284: return sysctl_rdstruct(oldp, oldlenp, newp, &dev, sizeof(dev));
3285: #if NBIOS > 0
3286: case CPU_BIOS:
3287: return bios_sysctl(name + 1, namelen - 1, oldp, oldlenp,
3288: newp, newlen, p);
3289: #endif
3290: case CPU_BLK2CHR:
3291: if (namelen != 2)
3292: return (ENOTDIR); /* overloaded */
3293: dev = blktochr((dev_t)name[1]);
3294: return sysctl_rdstruct(oldp, oldlenp, newp, &dev, sizeof(dev));
3295: case CPU_CHR2BLK:
3296: if (namelen != 2)
3297: return (ENOTDIR); /* overloaded */
3298: dev = chrtoblk((dev_t)name[1]);
3299: return sysctl_rdstruct(oldp, oldlenp, newp, &dev, sizeof(dev));
3300: case CPU_ALLOWAPERTURE:
3301: #ifdef APERTURE
3302: if (securelevel > 0)
3303: return (sysctl_int_lower(oldp, oldlenp, newp, newlen,
3304: &allowaperture));
3305: else
3306: return (sysctl_int(oldp, oldlenp, newp, newlen,
3307: &allowaperture));
3308: #else
3309: return (sysctl_rdint(oldp, oldlenp, newp, 0));
3310: #endif
3311: case CPU_CPUVENDOR:
3312: return (sysctl_rdstring(oldp, oldlenp, newp, cpu_vendor));
3313: case CPU_CPUID:
3314: return (sysctl_rdint(oldp, oldlenp, newp, cpu_id));
3315: case CPU_CPUFEATURE:
3316: return (sysctl_rdint(oldp, oldlenp, newp, curcpu()->ci_feature_flags));
3317: #if NAPM > 0
3318: case CPU_APMWARN:
3319: return (sysctl_int(oldp, oldlenp, newp, newlen, &cpu_apmwarn));
3320: case CPU_APMHALT:
3321: return (sysctl_int(oldp, oldlenp, newp, newlen, &cpu_apmhalt));
3322: #endif
3323: case CPU_KBDRESET:
3324: if (securelevel > 0)
3325: return (sysctl_rdint(oldp, oldlenp, newp,
3326: kbd_reset));
3327: else
3328: return (sysctl_int(oldp, oldlenp, newp, newlen,
3329: &kbd_reset));
3330: #ifdef USER_LDT
3331: case CPU_USERLDT:
3332: return (sysctl_int(oldp, oldlenp, newp, newlen,
3333: &user_ldt_enable));
3334: #endif
3335: case CPU_OSFXSR:
3336: return (sysctl_rdint(oldp, oldlenp, newp, i386_use_fxsave));
3337: case CPU_SSE:
3338: return (sysctl_rdint(oldp, oldlenp, newp, i386_has_sse));
3339: case CPU_SSE2:
3340: return (sysctl_rdint(oldp, oldlenp, newp, i386_has_sse2));
3341: case CPU_XCRYPT:
3342: return (sysctl_rdint(oldp, oldlenp, newp, i386_has_xcrypt));
3343: default:
3344: return (EOPNOTSUPP);
3345: }
3346: /* NOTREACHED */
3347: }
3348:
3349: int
3350: bus_space_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size, int cacheable,
3351: bus_space_handle_t *bshp)
3352: {
3353: int error;
3354: struct extent *ex;
3355:
3356: /*
3357: * Pick the appropriate extent map.
3358: */
3359: switch (t) {
3360: case I386_BUS_SPACE_IO:
3361: ex = ioport_ex;
3362: break;
3363:
3364: case I386_BUS_SPACE_MEM:
3365: ex = iomem_ex;
3366: break;
3367:
3368: default:
3369: panic("bus_space_map: bad bus space tag");
3370: }
3371:
3372: /*
3373: * Before we go any further, let's make sure that this
3374: * region is available.
3375: */
3376: error = extent_alloc_region(ex, bpa, size,
3377: EX_NOWAIT | (ioport_malloc_safe ? EX_MALLOCOK : 0));
3378: if (error)
3379: return (error);
3380:
3381: /*
3382: * For I/O space, that's all she wrote.
3383: */
3384: if (t == I386_BUS_SPACE_IO) {
3385: *bshp = bpa;
3386: return (0);
3387: }
3388:
3389: if (IOM_BEGIN <= bpa && bpa <= IOM_END) {
3390: *bshp = (bus_space_handle_t)ISA_HOLE_VADDR(bpa);
3391: return (0);
3392: }
3393:
3394: /*
3395: * For memory space, map the bus physical address to
3396: * a kernel virtual address.
3397: */
3398: error = bus_mem_add_mapping(bpa, size, cacheable, bshp);
3399: if (error) {
3400: if (extent_free(ex, bpa, size, EX_NOWAIT |
3401: (ioport_malloc_safe ? EX_MALLOCOK : 0))) {
3402: printf("bus_space_map: pa 0x%lx, size 0x%lx\n",
3403: bpa, size);
3404: printf("bus_space_map: can't free region\n");
3405: }
3406: }
3407:
3408: return (error);
3409: }
3410:
3411: int
3412: _bus_space_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size,
3413: int cacheable, bus_space_handle_t *bshp)
3414: {
3415: /*
3416: * For I/O space, that's all she wrote.
3417: */
3418: if (t == I386_BUS_SPACE_IO) {
3419: *bshp = bpa;
3420: return (0);
3421: }
3422:
3423: /*
3424: * For memory space, map the bus physical address to
3425: * a kernel virtual address.
3426: */
3427: return (bus_mem_add_mapping(bpa, size, cacheable, bshp));
3428: }
3429:
3430: int
3431: bus_space_alloc(bus_space_tag_t t, bus_addr_t rstart, bus_addr_t rend,
3432: bus_size_t size, bus_size_t alignment, bus_size_t boundary,
3433: int cacheable, bus_addr_t *bpap, bus_space_handle_t *bshp)
3434: {
3435: struct extent *ex;
3436: u_long bpa;
3437: int error;
3438:
3439: /*
3440: * Pick the appropriate extent map.
3441: */
3442: switch (t) {
3443: case I386_BUS_SPACE_IO:
3444: ex = ioport_ex;
3445: break;
3446:
3447: case I386_BUS_SPACE_MEM:
3448: ex = iomem_ex;
3449: break;
3450:
3451: default:
3452: panic("bus_space_alloc: bad bus space tag");
3453: }
3454:
3455: /*
3456: * Sanity check the allocation against the extent's boundaries.
3457: */
3458: if (rstart < ex->ex_start || rend > ex->ex_end)
3459: panic("bus_space_alloc: bad region start/end");
3460:
3461: /*
3462: * Do the requested allocation.
3463: */
3464: error = extent_alloc_subregion(ex, rstart, rend, size, alignment, 0,
3465: boundary, EX_NOWAIT | (ioport_malloc_safe ? EX_MALLOCOK : 0),
3466: &bpa);
3467:
3468: if (error)
3469: return (error);
3470:
3471: /*
3472: * For I/O space, that's all she wrote.
3473: */
3474: if (t == I386_BUS_SPACE_IO) {
3475: *bshp = *bpap = bpa;
3476: return (0);
3477: }
3478:
3479: /*
3480: * For memory space, map the bus physical address to
3481: * a kernel virtual address.
3482: */
3483: error = bus_mem_add_mapping(bpa, size, cacheable, bshp);
3484: if (error) {
3485: if (extent_free(iomem_ex, bpa, size, EX_NOWAIT |
3486: (ioport_malloc_safe ? EX_MALLOCOK : 0))) {
3487: printf("bus_space_alloc: pa 0x%lx, size 0x%lx\n",
3488: bpa, size);
3489: printf("bus_space_alloc: can't free region\n");
3490: }
3491: }
3492:
3493: *bpap = bpa;
3494:
3495: return (error);
3496: }
3497:
3498: int
3499: bus_mem_add_mapping(bus_addr_t bpa, bus_size_t size, int cacheable,
3500: bus_space_handle_t *bshp)
3501: {
3502: u_long pa, endpa;
3503: vaddr_t va;
3504: pt_entry_t *pte;
3505: bus_size_t map_size;
3506:
3507: pa = trunc_page(bpa);
3508: endpa = round_page(bpa + size);
3509:
3510: #ifdef DIAGNOSTIC
3511: if (endpa <= pa && endpa != 0)
3512: panic("bus_mem_add_mapping: overflow");
3513: #endif
3514:
3515: map_size = endpa - pa;
3516:
3517: va = uvm_km_valloc(kernel_map, map_size);
3518: if (va == 0)
3519: return (ENOMEM);
3520:
3521: *bshp = (bus_space_handle_t)(va + (bpa & PGOFSET));
3522:
3523: for (; map_size > 0;
3524: pa += PAGE_SIZE, va += PAGE_SIZE, map_size -= PAGE_SIZE) {
3525: pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
3526:
3527: pte = kvtopte(va);
3528: if (cacheable)
3529: *pte &= ~PG_N;
3530: else
3531: *pte |= PG_N;
3532: pmap_tlb_shootpage(pmap_kernel(), va);
3533: }
3534:
3535: pmap_tlb_shootwait();
3536: pmap_update(pmap_kernel());
3537:
3538: return 0;
3539: }
3540:
3541: void
3542: bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size)
3543: {
3544: struct extent *ex;
3545: u_long va, endva;
3546: bus_addr_t bpa;
3547:
3548: /*
3549: * Find the correct extent and bus physical address.
3550: */
3551: if (t == I386_BUS_SPACE_IO) {
3552: ex = ioport_ex;
3553: bpa = bsh;
3554: } else if (t == I386_BUS_SPACE_MEM) {
3555: ex = iomem_ex;
3556: bpa = (bus_addr_t)ISA_PHYSADDR(bsh);
3557: if (IOM_BEGIN <= bpa && bpa <= IOM_END)
3558: goto ok;
3559:
3560: va = trunc_page(bsh);
3561: endva = round_page(bsh + size);
3562:
3563: #ifdef DIAGNOSTIC
3564: if (endva <= va)
3565: panic("bus_space_unmap: overflow");
3566: #endif
3567:
3568: (void) pmap_extract(pmap_kernel(), va, &bpa);
3569: bpa += (bsh & PGOFSET);
3570:
3571: /*
3572: * Free the kernel virtual mapping.
3573: */
3574: uvm_km_free(kernel_map, va, endva - va);
3575: } else
3576: panic("bus_space_unmap: bad bus space tag");
3577:
3578: ok:
3579: if (extent_free(ex, bpa, size,
3580: EX_NOWAIT | (ioport_malloc_safe ? EX_MALLOCOK : 0))) {
3581: printf("bus_space_unmap: %s 0x%lx, size 0x%lx\n",
3582: (t == I386_BUS_SPACE_IO) ? "port" : "pa", bpa, size);
3583: printf("bus_space_unmap: can't free region\n");
3584: }
3585: }
3586:
3587: void
3588: _bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size,
3589: bus_addr_t *adrp)
3590: {
3591: u_long va, endva;
3592: bus_addr_t bpa;
3593:
3594: /*
3595: * Find the correct bus physical address.
3596: */
3597: if (t == I386_BUS_SPACE_IO) {
3598: bpa = bsh;
3599: } else if (t == I386_BUS_SPACE_MEM) {
3600: bpa = (bus_addr_t)ISA_PHYSADDR(bsh);
3601: if (IOM_BEGIN <= bpa && bpa <= IOM_END)
3602: goto ok;
3603:
3604: va = trunc_page(bsh);
3605: endva = round_page(bsh + size);
3606:
3607: #ifdef DIAGNOSTIC
3608: if (endva <= va)
3609: panic("_bus_space_unmap: overflow");
3610: #endif
3611:
3612: (void) pmap_extract(pmap_kernel(), va, &bpa);
3613: bpa += (bsh & PGOFSET);
3614:
3615: /*
3616: * Free the kernel virtual mapping.
3617: */
3618: uvm_km_free(kernel_map, va, endva - va);
3619: } else
3620: panic("bus_space_unmap: bad bus space tag");
3621:
3622: ok:
3623: if (adrp != NULL)
3624: *adrp = bpa;
3625: }
3626:
3627: void
3628: bus_space_free(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size)
3629: {
3630:
3631: /* bus_space_unmap() does all that we need to do. */
3632: bus_space_unmap(t, bsh, size);
3633: }
3634:
3635: int
3636: bus_space_subregion(bus_space_tag_t t, bus_space_handle_t bsh,
3637: bus_size_t offset, bus_size_t size, bus_space_handle_t *nbshp)
3638: {
3639: *nbshp = bsh + offset;
3640: return (0);
3641: }
3642:
3643: /*
3644: * Common function for DMA map creation. May be called by bus-specific
3645: * DMA map creation functions.
3646: */
3647: int
3648: _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
3649: bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
3650: {
3651: struct i386_bus_dmamap *map;
3652: void *mapstore;
3653: size_t mapsize;
3654:
3655: /*
3656: * Allocate and initialize the DMA map. The end of the map
3657: * is a variable-sized array of segments, so we allocate enough
3658: * room for them in one shot.
3659: *
3660: * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
3661: * of ALLOCNOW notifies others that we've reserved these resources,
3662: * and they are not to be freed.
3663: *
3664: * The bus_dmamap_t includes one bus_dma_segment_t, hence
3665: * the (nsegments - 1).
3666: */
3667: mapsize = sizeof(struct i386_bus_dmamap) +
3668: (sizeof(bus_dma_segment_t) * (nsegments - 1));
3669: if ((mapstore = malloc(mapsize, M_DEVBUF,
3670: (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
3671: return (ENOMEM);
3672:
3673: bzero(mapstore, mapsize);
3674: map = (struct i386_bus_dmamap *)mapstore;
3675: map->_dm_size = size;
3676: map->_dm_segcnt = nsegments;
3677: map->_dm_maxsegsz = maxsegsz;
3678: map->_dm_boundary = boundary;
3679: map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
3680: map->dm_mapsize = 0; /* no valid mappings */
3681: map->dm_nsegs = 0;
3682:
3683: *dmamp = map;
3684: return (0);
3685: }
3686:
3687: /*
3688: * Common function for DMA map destruction. May be called by bus-specific
3689: * DMA map destruction functions.
3690: */
3691: void
3692: _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
3693: {
3694:
3695: free(map, M_DEVBUF);
3696: }
3697:
3698: /*
3699: * Common function for loading a DMA map with a linear buffer. May
3700: * be called by bus-specific DMA map load functions.
3701: */
3702: int
3703: _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
3704: bus_size_t buflen, struct proc *p, int flags)
3705: {
3706: bus_addr_t lastaddr;
3707: int seg, error;
3708:
3709: /*
3710: * Make sure that on error condition we return "no valid mappings".
3711: */
3712: map->dm_mapsize = 0;
3713: map->dm_nsegs = 0;
3714:
3715: if (buflen > map->_dm_size)
3716: return (EINVAL);
3717:
3718: seg = 0;
3719: error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags,
3720: &lastaddr, &seg, 1);
3721: if (error == 0) {
3722: map->dm_mapsize = buflen;
3723: map->dm_nsegs = seg + 1;
3724: }
3725: return (error);
3726: }
3727:
3728: /*
3729: * Like _bus_dmamap_load(), but for mbufs.
3730: */
3731: int
3732: _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
3733: int flags)
3734: {
3735: paddr_t lastaddr;
3736: int seg, error, first;
3737: struct mbuf *m;
3738:
3739: /*
3740: * Make sure that on error condition we return "no valid mappings".
3741: */
3742: map->dm_mapsize = 0;
3743: map->dm_nsegs = 0;
3744:
3745: #ifdef DIAGNOSTIC
3746: if ((m0->m_flags & M_PKTHDR) == 0)
3747: panic("_bus_dmamap_load_mbuf: no packet header");
3748: #endif
3749:
3750: if (m0->m_pkthdr.len > map->_dm_size)
3751: return (EINVAL);
3752:
3753: first = 1;
3754: seg = 0;
3755: error = 0;
3756: for (m = m0; m != NULL && error == 0; m = m->m_next) {
3757: if (m->m_len == 0)
3758: continue;
3759: error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
3760: NULL, flags, &lastaddr, &seg, first);
3761: first = 0;
3762: }
3763: if (error == 0) {
3764: map->dm_mapsize = m0->m_pkthdr.len;
3765: map->dm_nsegs = seg + 1;
3766: }
3767: return (error);
3768: }
3769:
3770: /*
3771: * Like _bus_dmamap_load(), but for uios.
3772: */
3773: int
3774: _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
3775: int flags)
3776: {
3777: paddr_t lastaddr;
3778: int seg, i, error, first;
3779: bus_size_t minlen, resid;
3780: struct proc *p = NULL;
3781: struct iovec *iov;
3782: caddr_t addr;
3783:
3784: /*
3785: * Make sure that on error condition we return "no valid mappings".
3786: */
3787: map->dm_mapsize = 0;
3788: map->dm_nsegs = 0;
3789:
3790: resid = uio->uio_resid;
3791: iov = uio->uio_iov;
3792:
3793: if (resid > map->_dm_size)
3794: return (EINVAL);
3795:
3796: if (uio->uio_segflg == UIO_USERSPACE) {
3797: p = uio->uio_procp;
3798: #ifdef DIAGNOSTIC
3799: if (p == NULL)
3800: panic("_bus_dmamap_load_uio: USERSPACE but no proc");
3801: #endif
3802: }
3803:
3804: first = 1;
3805: seg = 0;
3806: error = 0;
3807: for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
3808: /*
3809: * Now at the first iovec to load. Load each iovec
3810: * until we have exhausted the residual count.
3811: */
3812: minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
3813: addr = (caddr_t)iov[i].iov_base;
3814:
3815: error = _bus_dmamap_load_buffer(t, map, addr, minlen,
3816: p, flags, &lastaddr, &seg, first);
3817: first = 0;
3818:
3819: resid -= minlen;
3820: }
3821: if (error == 0) {
3822: map->dm_mapsize = uio->uio_resid;
3823: map->dm_nsegs = seg + 1;
3824: }
3825: return (error);
3826: }
3827:
3828: /*
3829: * Like _bus_dmamap_load(), but for raw memory allocated with
3830: * bus_dmamem_alloc().
3831: */
3832: int
3833: _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
3834: int nsegs, bus_size_t size, int flags)
3835: {
3836: if (nsegs > map->_dm_segcnt || size > map->_dm_size)
3837: return (EINVAL);
3838:
3839: /*
3840: * Make sure we don't cross any boundaries.
3841: */
3842: if (map->_dm_boundary) {
3843: bus_addr_t bmask = ~(map->_dm_boundary - 1);
3844: int i;
3845:
3846: for (i = 0; i < nsegs; i++) {
3847: if (segs[i].ds_len > map->_dm_maxsegsz)
3848: return (EINVAL);
3849: if ((segs[i].ds_addr & bmask) !=
3850: ((segs[i].ds_addr + segs[i].ds_len - 1) & bmask))
3851: return (EINVAL);
3852: }
3853: }
3854:
3855: bcopy(segs, map->dm_segs, nsegs * sizeof(*segs));
3856: map->dm_nsegs = nsegs;
3857: return (0);
3858: }
3859:
3860: /*
3861: * Common function for unloading a DMA map. May be called by
3862: * bus-specific DMA map unload functions.
3863: */
3864: void
3865: _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
3866: {
3867:
3868: /*
3869: * No resources to free; just mark the mappings as
3870: * invalid.
3871: */
3872: map->dm_mapsize = 0;
3873: map->dm_nsegs = 0;
3874: }
3875:
3876: /*
3877: * Common function for DMA-safe memory allocation. May be called
3878: * by bus-specific DMA memory allocation functions.
3879: */
3880: int
3881: _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
3882: bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
3883: int flags)
3884: {
3885:
3886: return (_bus_dmamem_alloc_range(t, size, alignment, boundary,
3887: segs, nsegs, rsegs, flags, 0, trunc_page(avail_end)));
3888: }
3889:
3890: /*
3891: * Common function for freeing DMA-safe memory. May be called by
3892: * bus-specific DMA memory free functions.
3893: */
3894: void
3895: _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
3896: {
3897: struct vm_page *m;
3898: bus_addr_t addr;
3899: struct pglist mlist;
3900: int curseg;
3901:
3902: /*
3903: * Build a list of pages to free back to the VM system.
3904: */
3905: TAILQ_INIT(&mlist);
3906: for (curseg = 0; curseg < nsegs; curseg++) {
3907: for (addr = segs[curseg].ds_addr;
3908: addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
3909: addr += PAGE_SIZE) {
3910: m = PHYS_TO_VM_PAGE(addr);
3911: TAILQ_INSERT_TAIL(&mlist, m, pageq);
3912: }
3913: }
3914:
3915: uvm_pglistfree(&mlist);
3916: }
3917:
3918: /*
3919: * Common function for mapping DMA-safe memory. May be called by
3920: * bus-specific DMA memory map functions.
3921: */
3922: int
3923: _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
3924: size_t size, caddr_t *kvap, int flags)
3925: {
3926: vaddr_t va;
3927: bus_addr_t addr;
3928: int curseg;
3929:
3930: size = round_page(size);
3931: va = uvm_km_valloc(kernel_map, size);
3932: if (va == 0)
3933: return (ENOMEM);
3934:
3935: *kvap = (caddr_t)va;
3936:
3937: for (curseg = 0; curseg < nsegs; curseg++) {
3938: for (addr = segs[curseg].ds_addr;
3939: addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
3940: addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
3941: if (size == 0)
3942: panic("_bus_dmamem_map: size botch");
3943: pmap_enter(pmap_kernel(), va, addr,
3944: VM_PROT_READ | VM_PROT_WRITE,
3945: VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
3946: }
3947: }
3948: pmap_update(pmap_kernel());
3949:
3950: return (0);
3951: }
3952:
3953: /*
3954: * Common function for unmapping DMA-safe memory. May be called by
3955: * bus-specific DMA memory unmapping functions.
3956: */
3957: void
3958: _bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
3959: {
3960:
3961: #ifdef DIAGNOSTIC
3962: if ((u_long)kva & PGOFSET)
3963: panic("_bus_dmamem_unmap");
3964: #endif
3965:
3966: size = round_page(size);
3967: uvm_km_free(kernel_map, (vaddr_t)kva, size);
3968: }
3969:
3970: /*
3971: * Common functin for mmap(2)'ing DMA-safe memory. May be called by
3972: * bus-specific DMA mmap(2)'ing functions.
3973: */
3974: paddr_t
3975: _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off,
3976: int prot, int flags)
3977: {
3978: int i;
3979:
3980: for (i = 0; i < nsegs; i++) {
3981: #ifdef DIAGNOSTIC
3982: if (off & PGOFSET)
3983: panic("_bus_dmamem_mmap: offset unaligned");
3984: if (segs[i].ds_addr & PGOFSET)
3985: panic("_bus_dmamem_mmap: segment unaligned");
3986: if (segs[i].ds_len & PGOFSET)
3987: panic("_bus_dmamem_mmap: segment size not multiple"
3988: " of page size");
3989: #endif
3990: if (off >= segs[i].ds_len) {
3991: off -= segs[i].ds_len;
3992: continue;
3993: }
3994:
3995: return (atop(segs[i].ds_addr + off));
3996: }
3997:
3998: /* Page not found. */
3999: return (-1);
4000: }
4001:
4002: /**********************************************************************
4003: * DMA utility functions
4004: **********************************************************************/
4005: /*
4006: * Utility function to load a linear buffer. lastaddrp holds state
4007: * between invocations (for multiple-buffer loads). segp contains
4008: * the starting segment on entrance, and the ending segment on exit.
4009: * first indicates if this is the first invocation of this function.
4010: */
4011: int
4012: _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
4013: bus_size_t buflen, struct proc *p, int flags, paddr_t *lastaddrp, int *segp,
4014: int first)
4015: {
4016: bus_size_t sgsize;
4017: bus_addr_t curaddr, lastaddr, baddr, bmask;
4018: vaddr_t vaddr = (vaddr_t)buf;
4019: int seg;
4020: pmap_t pmap;
4021:
4022: if (p != NULL)
4023: pmap = p->p_vmspace->vm_map.pmap;
4024: else
4025: pmap = pmap_kernel();
4026:
4027: lastaddr = *lastaddrp;
4028: bmask = ~(map->_dm_boundary - 1);
4029:
4030: for (seg = *segp; buflen > 0 ; ) {
4031: /*
4032: * Get the physical address for this segment.
4033: */
4034: pmap_extract(pmap, vaddr, (paddr_t *)&curaddr);
4035:
4036: /*
4037: * Compute the segment size, and adjust counts.
4038: */
4039: sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
4040: if (buflen < sgsize)
4041: sgsize = buflen;
4042:
4043: /*
4044: * Make sure we don't cross any boundaries.
4045: */
4046: if (map->_dm_boundary > 0) {
4047: baddr = (curaddr + map->_dm_boundary) & bmask;
4048: if (sgsize > (baddr - curaddr))
4049: sgsize = (baddr - curaddr);
4050: }
4051:
4052: /*
4053: * Insert chunk into a segment, coalescing with
4054: * previous segment if possible.
4055: */
4056: if (first) {
4057: map->dm_segs[seg].ds_addr = curaddr;
4058: map->dm_segs[seg].ds_len = sgsize;
4059: first = 0;
4060: } else {
4061: if (curaddr == lastaddr &&
4062: (map->dm_segs[seg].ds_len + sgsize) <=
4063: map->_dm_maxsegsz &&
4064: (map->_dm_boundary == 0 ||
4065: (map->dm_segs[seg].ds_addr & bmask) ==
4066: (curaddr & bmask)))
4067: map->dm_segs[seg].ds_len += sgsize;
4068: else {
4069: if (++seg >= map->_dm_segcnt)
4070: break;
4071: map->dm_segs[seg].ds_addr = curaddr;
4072: map->dm_segs[seg].ds_len = sgsize;
4073: }
4074: }
4075:
4076: lastaddr = curaddr + sgsize;
4077: vaddr += sgsize;
4078: buflen -= sgsize;
4079: }
4080:
4081: *segp = seg;
4082: *lastaddrp = lastaddr;
4083:
4084: /*
4085: * Did we fit?
4086: */
4087: if (buflen != 0)
4088: return (EFBIG); /* XXX better return value here? */
4089: return (0);
4090: }
4091:
4092: /*
4093: * Allocate physical memory from the given physical address range.
4094: * Called by DMA-safe memory allocation methods.
4095: */
4096: int
4097: _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
4098: bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
4099: int flags, paddr_t low, paddr_t high)
4100: {
4101: paddr_t curaddr, lastaddr;
4102: struct vm_page *m;
4103: struct pglist mlist;
4104: int curseg, error;
4105:
4106: /* Always round the size. */
4107: size = round_page(size);
4108:
4109: TAILQ_INIT(&mlist);
4110: /*
4111: * Allocate pages from the VM system.
4112: * For non-ISA mappings first try higher memory segments.
4113: */
4114: if (high <= ISA_DMA_BOUNCE_THRESHOLD || (error = uvm_pglistalloc(size,
4115: round_page(ISA_DMA_BOUNCE_THRESHOLD), high, alignment, boundary,
4116: &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0)))
4117: error = uvm_pglistalloc(size, low, high, alignment, boundary,
4118: &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
4119: if (error)
4120: return (error);
4121:
4122: /*
4123: * Compute the location, size, and number of segments actually
4124: * returned by the VM code.
4125: */
4126: m = TAILQ_FIRST(&mlist);
4127: curseg = 0;
4128: lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
4129: segs[curseg].ds_len = PAGE_SIZE;
4130:
4131: for (m = TAILQ_NEXT(m, pageq); m != NULL; m = TAILQ_NEXT(m, pageq)) {
4132: curaddr = VM_PAGE_TO_PHYS(m);
4133: #ifdef DIAGNOSTIC
4134: if (curseg == nsegs) {
4135: printf("uvm_pglistalloc returned too many\n");
4136: panic("_bus_dmamem_alloc_range");
4137: }
4138: if (curaddr < low || curaddr >= high) {
4139: printf("uvm_pglistalloc returned non-sensical"
4140: " address 0x%lx\n", curaddr);
4141: panic("_bus_dmamem_alloc_range");
4142: }
4143: #endif
4144: if (curaddr == (lastaddr + PAGE_SIZE))
4145: segs[curseg].ds_len += PAGE_SIZE;
4146: else {
4147: curseg++;
4148: segs[curseg].ds_addr = curaddr;
4149: segs[curseg].ds_len = PAGE_SIZE;
4150: }
4151: lastaddr = curaddr;
4152: }
4153: *rsegs = curseg + 1;
4154:
4155: return (0);
4156: }
4157:
4158: #ifdef DIAGNOSTIC
4159: void
4160: splassert_check(int wantipl, const char *func)
4161: {
4162: if (lapic_tpr < wantipl)
4163: splassert_fail(wantipl, lapic_tpr, func);
4164: if (wantipl == IPL_NONE && curcpu()->ci_idepth != 0)
4165: splassert_fail(-1, curcpu()->ci_idepth, func);
4166: }
4167: #endif
4168:
4169: #ifdef MULTIPROCESSOR
4170: void
4171: i386_intlock(int ipl)
4172: {
4173: if (ipl < IPL_SCHED)
4174: __mp_lock(&kernel_lock);
4175:
4176: curcpu()->ci_idepth++;
4177: }
4178:
4179: void
4180: i386_intunlock(int ipl)
4181: {
4182: curcpu()->ci_idepth--;
4183:
4184: if (ipl < IPL_SCHED)
4185: __mp_unlock(&kernel_lock);
4186: }
4187:
4188: void
4189: i386_softintlock(void)
4190: {
4191: __mp_lock(&kernel_lock);
4192: curcpu()->ci_idepth++;
4193: }
4194:
4195: void
4196: i386_softintunlock(void)
4197: {
4198: curcpu()->ci_idepth--;
4199: __mp_unlock(&kernel_lock);
4200: }
4201: #endif
4202:
4203: /*
4204: * Software interrupt registration
4205: *
4206: * We hand-code this to ensure that it's atomic.
4207: */
4208: void
4209: softintr(int sir, int vec)
4210: {
4211: __asm __volatile("orl %1, %0" : "=m" (ipending) : "ir" (sir));
4212: #ifdef MULTIPROCESSOR
4213: i82489_writereg(LAPIC_ICRLO,
4214: vec | LAPIC_DLMODE_FIXED | LAPIC_LVL_ASSERT | LAPIC_DEST_SELF);
4215: #endif
4216: }
4217:
4218: /*
4219: * Raise current interrupt priority level, and return the old one.
4220: */
4221: int
4222: splraise(int ncpl)
4223: {
4224: int ocpl;
4225:
4226: _SPLRAISE(ocpl, ncpl);
4227: return (ocpl);
4228: }
4229:
4230: /*
4231: * Restore an old interrupt priority level. If any thereby unmasked
4232: * interrupts are pending, call Xspllower() to process them.
4233: */
4234: void
4235: splx(int ncpl)
4236: {
4237: _SPLX(ncpl);
4238: }
4239:
4240: /*
4241: * Same as splx(), but we return the old value of spl, for the
4242: * benefit of some splsoftclock() callers.
4243: */
4244: int
4245: spllower(int ncpl)
4246: {
4247: int ocpl = lapic_tpr;
4248:
4249: splx(ncpl);
4250: return (ocpl);
4251: }
CVSweb