Annotation of sys/arch/i386/i386/kvm86.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: kvm86.c,v 1.3 2007/02/20 21:15:01 tom Exp $ */
2: /* $NetBSD: kvm86.c,v 1.10 2005/12/26 19:23:59 perry Exp $ */
3: /*
4: * Copyright (c) 2002
5: * Matthias Drochner. All rights reserved.
6: *
7: * Redistribution and use in source and binary forms, with or without
8: * modification, are permitted provided that the following conditions
9: * are met:
10: * 1. Redistributions of source code must retain the above copyright
11: * notice, this list of conditions, and the following disclaimer.
12: * 2. Redistributions in binary form must reproduce the above copyright
13: * notice, this list of conditions and the following disclaimer in the
14: * documentation and/or other materials provided with the distribution.
15: *
16: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26: * SUCH DAMAGE.
27: */
28: #include <sys/cdefs.h>
29:
30: #include <sys/param.h>
31: #include <sys/systm.h>
32: #include <sys/proc.h>
33: #include <sys/user.h>
34: #include <sys/malloc.h>
35: #include <sys/mutex.h>
36: #include <sys/simplelock.h>
37: #include <uvm/uvm_extern.h>
38: #include <uvm/uvm.h>
39: #include <machine/pcb.h>
40: #include <machine/pte.h>
41: #include <machine/pmap.h>
42: #include <machine/kvm86.h>
43: #include <machine/cpu.h>
44:
45: /* assembler functions in kvm86call.s */
46: extern int kvm86_call(struct trapframe *);
47: extern void kvm86_ret(struct trapframe *, int);
48:
49: #define PGTABLE_SIZE ((1024 + 64) * 1024 / PAGE_SIZE)
50:
51: struct kvm86_data {
52: pt_entry_t pgtbl[PGTABLE_SIZE];
53:
54: struct segment_descriptor sd;
55:
56: struct pcb pcb;
57: u_long iomap[0x10000/32];
58: };
59:
60: void kvm86_map(struct kvm86_data *, paddr_t, uint32_t);
61: void kvm86_mapbios(struct kvm86_data *);
62: void kvm86_prepare(struct kvm86_data *vmd);
63: /*
64: * global VM for BIOS calls
65: */
66: struct kvm86_data *bioscallvmd;
67: /* page for trampoline and stack */
68: void *bioscallscratchpage;
69: /* where this page is mapped in the vm86 */
70: #define BIOSCALLSCRATCHPAGE_VMVA 0x1000
71: /* a virtual page to map in vm86 memory temporarily */
72: vaddr_t bioscalltmpva;
73:
74: struct mutex kvm86_mp_mutex;
75:
76: #define KVM86_IOPL3 /* not strictly necessary, saves a lot of traps */
77:
78: void
79: kvm86_init()
80: {
81: size_t vmdsize;
82: char *buf;
83: struct kvm86_data *vmd;
84: struct pcb *pcb;
85: paddr_t pa;
86: int i;
87:
88: vmdsize = round_page(sizeof(struct kvm86_data)) + PAGE_SIZE;
89:
90: if ((buf = (char *)uvm_km_zalloc(kernel_map, vmdsize)) == NULL)
91: return;
92:
93: /* first page is stack */
94: vmd = (struct kvm86_data *)(buf + PAGE_SIZE);
95: pcb = &vmd->pcb;
96:
97: /*
98: * derive pcb and TSS from proc0
99: * we want to access all IO ports, so we need a full-size
100: * permission bitmap
101: * XXX do we really need the pcb or just the TSS?
102: */
103: memcpy(pcb, &proc0.p_addr->u_pcb, sizeof(struct pcb));
104: pcb->pcb_tss.tss_esp0 = (int)vmd;
105: pcb->pcb_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
106: for (i = 0; i < sizeof(vmd->iomap) / 4; i++)
107: vmd->iomap[i] = 0;
108: pcb->pcb_tss.tss_ioopt =
109: ((caddr_t)vmd->iomap - (caddr_t)&pcb->pcb_tss) << 16;
110:
111: /* setup TSS descriptor (including our iomap) */
112: setsegment(&vmd->sd, &pcb->pcb_tss,
113: sizeof(struct pcb) + sizeof(vmd->iomap) - 1,
114: SDT_SYS386TSS, SEL_KPL, 0, 0);
115:
116: /* prepare VM for BIOS calls */
117: kvm86_mapbios(vmd);
118: if ((bioscallscratchpage = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE))
119: == 0)
120: return;
121:
122: pmap_extract(pmap_kernel(), (vaddr_t)bioscallscratchpage, &pa);
123: kvm86_map(vmd, pa, BIOSCALLSCRATCHPAGE_VMVA);
124: bioscallvmd = vmd;
125: bioscalltmpva = uvm_km_alloc(kernel_map, PAGE_SIZE);
126: mtx_init(&kvm86_mp_mutex, IPL_IPI);
127: }
128:
129: /*
130: * XXX pass some stuff to the assembler code
131: * XXX this should be done cleanly (in call argument to kvm86_call())
132: */
133:
134: volatile struct pcb *vm86pcb;
135: volatile int vm86tssd0, vm86tssd1;
136: volatile paddr_t vm86newptd;
137: volatile struct trapframe *vm86frame;
138: volatile pt_entry_t *vm86pgtableva;
139:
140: void
141: kvm86_prepare(struct kvm86_data *vmd)
142: {
143: vm86newptd = vtophys((vaddr_t)vmd) | PG_V | PG_RW | PG_U | PG_u;
144: vm86pgtableva = vmd->pgtbl;
145: vm86frame = (struct trapframe *)vmd - 1;
146: vm86pcb = &vmd->pcb;
147: vm86tssd0 = *(int*)&vmd->sd;
148: vm86tssd1 = *((int*)&vmd->sd + 1);
149: }
150:
151: void
152: kvm86_map(struct kvm86_data *vmd, paddr_t pa, uint32_t vmva)
153: {
154:
155: vmd->pgtbl[vmva >> 12] = pa | PG_V | PG_RW | PG_U | PG_u;
156: }
157:
158: void
159: kvm86_mapbios(struct kvm86_data *vmd)
160: {
161: paddr_t pa;
162:
163: /* map first physical page (vector table, BIOS data) */
164: kvm86_map(vmd, 0, 0);
165:
166: /* map ISA hole */
167: for (pa = 0xa0000; pa < 0x100000; pa += PAGE_SIZE)
168: kvm86_map(vmd, pa, pa);
169: }
170:
171: void *
172: kvm86_bios_addpage(uint32_t vmva)
173: {
174: void *mem;
175: paddr_t pa;
176:
177: if (bioscallvmd->pgtbl[vmva >> 12]) /* allocated? */
178: return (NULL);
179:
180: if ((mem = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE)) == NULL)
181: return (NULL);
182:
183: pmap_extract(pmap_kernel(), (vaddr_t)mem, &pa);
184: kvm86_map(bioscallvmd, pa, vmva);
185:
186: return (mem);
187: }
188:
189: void
190: kvm86_bios_delpage(uint32_t vmva, void *kva)
191: {
192:
193: bioscallvmd->pgtbl[vmva >> 12] = 0;
194: uvm_km_free(kernel_map, (vaddr_t)kva, PAGE_SIZE);
195: }
196:
197: size_t
198: kvm86_bios_read(u_int32_t vmva, char *buf, size_t len)
199: {
200: size_t todo, now;
201: paddr_t vmpa;
202:
203: todo = len;
204: while (todo > 0) {
205: now = min(todo, PAGE_SIZE - (vmva & (PAGE_SIZE - 1)));
206:
207: if (!bioscallvmd->pgtbl[vmva >> 12])
208: break;
209: vmpa = bioscallvmd->pgtbl[vmva >> 12] & ~(PAGE_SIZE - 1);
210: pmap_kenter_pa(bioscalltmpva, vmpa, VM_PROT_READ);
211: pmap_update(pmap_kernel());
212:
213: memcpy(buf, (void *)(bioscalltmpva + (vmva & (PAGE_SIZE - 1))),
214: now);
215: buf += now;
216: todo -= now;
217: vmva += now;
218: }
219: return (len - todo);
220: }
221:
222: int
223: kvm86_bioscall(int intno, struct trapframe *tf)
224: {
225: static const unsigned char call[] = {
226: 0xfa, /* CLI */
227: 0xcd, /* INTxx */
228: 0,
229: 0xfb, /* STI */
230: 0xf4 /* HLT */
231: };
232:
233: memcpy(bioscallscratchpage, call, sizeof(call));
234: *((unsigned char *)bioscallscratchpage + 2) = intno;
235:
236: tf->tf_eip = BIOSCALLSCRATCHPAGE_VMVA;
237: tf->tf_cs = 0;
238: tf->tf_esp = BIOSCALLSCRATCHPAGE_VMVA + PAGE_SIZE - 2;
239: tf->tf_ss = 0;
240: tf->tf_eflags = PSL_USERSET | PSL_VM;
241: #ifdef KVM86_IOPL3
242: tf->tf_eflags |= PSL_IOPL;
243: #endif
244: tf->tf_ds = tf->tf_es = tf->tf_fs = tf->tf_gs = 0;
245:
246: kvm86_prepare(bioscallvmd); /* XXX */
247: return (kvm86_call(tf));
248: }
249:
250: int
251: kvm86_simplecall(int no, struct kvm86regs *regs)
252: {
253: struct trapframe tf;
254: int res;
255:
256: memset(&tf, 0, sizeof(struct trapframe));
257: tf.tf_eax = regs->eax;
258: tf.tf_ebx = regs->ebx;
259: tf.tf_ecx = regs->ecx;
260: tf.tf_edx = regs->edx;
261: tf.tf_esi = regs->esi;
262: tf.tf_edi = regs->edi;
263: tf.tf_vm86_es = regs->es;
264:
265: mtx_enter(&kvm86_mp_mutex);
266: res = kvm86_bioscall(no, &tf);
267: mtx_leave(&kvm86_mp_mutex);
268:
269: regs->eax = tf.tf_eax;
270: regs->ebx = tf.tf_ebx;
271: regs->ecx = tf.tf_ecx;
272: regs->edx = tf.tf_edx;
273: regs->esi = tf.tf_esi;
274: regs->edi = tf.tf_edi;
275: regs->es = tf.tf_vm86_es;
276: regs->eflags = tf.tf_eflags;
277:
278: return (res);
279: }
280:
281: void
282: kvm86_gpfault(struct trapframe *tf)
283: {
284: unsigned char *kva, insn, trapno;
285: uint16_t *sp;
286:
287: kva = (unsigned char *)((tf->tf_cs << 4) + tf->tf_eip);
288: insn = *kva;
289: #ifdef KVM86DEBUG
290: printf("kvm86_gpfault: cs=%x, eip=%x, insn=%x, eflags=%x\n",
291: tf->tf_cs, tf->tf_eip, insn, tf->tf_eflags);
292: #endif
293:
294: KASSERT(tf->tf_eflags & PSL_VM);
295:
296: switch (insn) {
297: case 0xf4: /* HLT - normal exit */
298: kvm86_ret(tf, 0);
299: break;
300: case 0xcd: /* INTxx */
301: /* fake a return stack frame and call real mode handler */
302: trapno = *(kva + 1);
303: sp = (uint16_t *)((tf->tf_ss << 4) + tf->tf_esp);
304: *(--sp) = tf->tf_eflags;
305: *(--sp) = tf->tf_cs;
306: *(--sp) = tf->tf_eip + 2;
307: tf->tf_esp -= 6;
308: tf->tf_cs = *(uint16_t *)(trapno * 4 + 2);
309: tf->tf_eip = *(uint16_t *)(trapno * 4);
310: break;
311: case 0xcf: /* IRET */
312: sp = (uint16_t *)((tf->tf_ss << 4) + tf->tf_esp);
313: tf->tf_eip = *(sp++);
314: tf->tf_cs = *(sp++);
315: tf->tf_eflags = *(sp++);
316: tf->tf_esp += 6;
317: tf->tf_eflags |= PSL_VM; /* outside of 16bit flag reg */
318: break;
319: #ifndef KVM86_IOPL3 /* XXX check VME? */
320: case 0xfa: /* CLI */
321: case 0xfb: /* STI */
322: /* XXX ignore for now */
323: tf->tf_eip++;
324: break;
325: case 0x9c: /* PUSHF */
326: sp = (uint16_t *)((tf->tf_ss << 4) + tf->tf_esp);
327: *(--sp) = tf->tf_eflags;
328: tf->tf_esp -= 2;
329: tf->tf_eip++;
330: break;
331: case 0x9d: /* POPF */
332: sp = (uint16_t *)((tf->tf_ss << 4) + tf->tf_esp);
333: tf->tf_eflags = *(sp++);
334: tf->tf_esp += 2;
335: tf->tf_eip++;
336: tf->tf_eflags |= PSL_VM; /* outside of 16bit flag reg */
337: break;
338: #endif
339: default:
340: #ifdef KVM86DEBUG
341: printf("kvm86_gpfault: unhandled\n");
342: #else
343: printf("kvm86_gpfault: cs=%x, eip=%x, insn=%x, eflags=%x\n",
344: tf->tf_cs, tf->tf_eip, insn, tf->tf_eflags);
345: #endif
346: /*
347: * signal error to caller
348: */
349: kvm86_ret(tf, -1);
350: break;
351: }
352: }
CVSweb