Annotation of prex-old/sys/arch/i386/i386/mmu.c, Revision 1.1.1.1.2.1
1.1 nbrk 1: /*-
2: * Copyright (c) 2005, Kohsuke Ohtani
3: * All rights reserved.
4: *
5: * Redistribution and use in source and binary forms, with or without
6: * modification, are permitted provided that the following conditions
7: * are met:
8: * 1. Redistributions of source code must retain the above copyright
9: * notice, this list of conditions and the following disclaimer.
10: * 2. Redistributions in binary form must reproduce the above copyright
11: * notice, this list of conditions and the following disclaimer in the
12: * documentation and/or other materials provided with the distribution.
13: * 3. Neither the name of the author nor the names of any co-contributors
14: * may be used to endorse or promote products derived from this software
15: * without specific prior written permission.
16: *
17: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27: * SUCH DAMAGE.
28: */
29:
30: /*
31: * mmu.c - memory management unit support routines
32: */
33:
34: /*
35: * This module provides virtual/physical address translation for
36: * intel x86 MMU. This kernel will do only page level translation
37: * and protection and it does not use x86 segment mechanism.
38: */
39:
40: #include <kernel.h>
41: #include <page.h>
42: #include <cpu.h>
1.1.1.1.2.1! nbrk 43: #include <locore.h>
1.1 nbrk 44:
45: /*
46: * Map physical memory range into virtual address
47: *
48: * Returns 0 on success, or -1 on failure.
49: *
50: * Map type can be one of the following type.
51: * PG_UNMAP - Remove mapping
52: * PG_READ - Read only mapping
53: * PG_WRITE - Read/write allowed
54: *
55: * Setup the appropriate page tables for mapping. If there is no
1.1.1.1.2.1! nbrk 56: * page table for the specified address, new page table is
! 57: * allocated.
1.1 nbrk 58: *
59: * This routine does not return any error even if the specified
60: * address has been already mapped to other physical address.
61: * In this case, it will just override the existing mapping.
62: *
1.1.1.1.2.1! nbrk 63: * In order to unmap the page, pg_type is specified as 0. But,
! 64: * the page tables are not released even if there is no valid
1.1 nbrk 65: * page entry in it. All page tables are released when mmu_delmap()
66: * is called when task is terminated.
67: *
1.1.1.1.2.1! nbrk 68: * TODO: TLB should be flushed for specific page by invalpg in
! 69: * case of i486.
1.1 nbrk 70: */
71: int
72: mmu_map(pgd_t pgd, void *phys, void *virt, size_t size, int type)
73: {
1.1.1.1.2.1! nbrk 74: uint32_t pg_type;
1.1 nbrk 75: page_table_t pte;
76: void *pg; /* page */
1.1.1.1.2.1! nbrk 77: vaddr_t va;
! 78: paddr_t pa;
1.1 nbrk 79:
1.1.1.1.2.1! nbrk 80: pa = (paddr_t)PAGE_ALIGN(phys);
! 81: va = (vaddr_t)PAGE_ALIGN(virt);
1.1 nbrk 82: size = PAGE_TRUNC(size);
83:
84: /* Build page type */
85: pg_type = 0;
86: switch (type) {
87: case PG_UNMAP:
88: break;
89: case PG_READ:
1.1.1.1.2.1! nbrk 90: pg_type = (uint32_t)(PTE_USER | PTE_PRESENT);
1.1 nbrk 91: break;
92: case PG_WRITE:
1.1.1.1.2.1! nbrk 93: pg_type = (uint32_t)(PTE_USER | PTE_WRITE | PTE_PRESENT);
1.1 nbrk 94: break;
95: }
96: /* Map all pages */
97: while (size > 0) {
98: if (pte_present(pgd, va)) {
99: /* Page table already exists for the address */
100: pte = pgd_to_pte(pgd, va);
101: } else {
102: ASSERT(pg_type != 0);
103: if ((pg = page_alloc(PAGE_SIZE)) == NULL) {
1.1.1.1.2.1! nbrk 104: DPRINTF(("Error: MMU mapping failed\n"));
1.1 nbrk 105: return -1;
106: }
107: pgd[PAGE_DIR(va)] =
1.1.1.1.2.1! nbrk 108: (uint32_t)pg | PDE_PRESENT | PDE_WRITE | PDE_USER;
1.1 nbrk 109: pte = phys_to_virt(pg);
110: memset(pte, 0, PAGE_SIZE);
111: }
112: /* Set new entry into page table */
1.1.1.1.2.1! nbrk 113: pte[PAGE_TABLE(va)] = (uint32_t)pa | pg_type;
1.1 nbrk 114:
115: /* Process next page */
116: pa += PAGE_SIZE;
117: va += PAGE_SIZE;
118: size -= PAGE_SIZE;
119: }
120: flush_tlb();
121: return 0;
122: }
123:
124: /*
125: * Create new page map.
1.1.1.1.2.1! nbrk 126: *
! 127: * Returns a page directory on success, or NULL on failure. This
! 128: * routine is called when new task is created. All page map must
! 129: * have the same kernel page table in it. So, the kernel page
! 130: * tables are copied to newly created map.
1.1 nbrk 131: */
132: pgd_t
133: mmu_newmap(void)
134: {
135: void *pg;
136: pgd_t pgd, kern_pgd;
1.1.1.1.2.1! nbrk 137: int i;
1.1 nbrk 138:
139: /* Allocate page directory */
140: if ((pg = page_alloc(PAGE_SIZE)) == NULL)
141: return NULL;
142: pgd = phys_to_virt(pg);
143: memset(pgd, 0, PAGE_SIZE);
144:
145: /* Copy kernel page tables */
146: kern_pgd = phys_to_virt(KERNEL_PGD);
147: i = PAGE_DIR(PAGE_OFFSET);
1.1.1.1.2.1! nbrk 148: memcpy(&pgd[i], &kern_pgd[i], (size_t)(1024 - i));
1.1 nbrk 149: return pgd;
150: }
151:
152: /*
153: * Delete all page map.
154: */
155: void
156: mmu_delmap(pgd_t pgd)
157: {
1.1.1.1.2.1! nbrk 158: int i;
1.1 nbrk 159: page_table_t pte;
160:
161: flush_tlb();
162:
163: /* Release all user page table */
164: for (i = 0; i < PAGE_DIR(PAGE_OFFSET); i++) {
165: pte = (page_table_t)pgd[i];
166: if (pte != 0)
1.1.1.1.2.1! nbrk 167: page_free((void *)((paddr_t)pte & PTE_ADDRESS),
1.1 nbrk 168: PAGE_SIZE);
169: }
170: /* Release page directory */
171: page_free(virt_to_phys(pgd), PAGE_SIZE);
172: }
173:
174: /*
175: * Switch to new page directory
176: *
177: * This is called when context is switched.
178: * Whole TLB are flushed automatically by loading
179: * CR3 register.
180: */
181: void
182: mmu_switch(pgd_t pgd)
183: {
1.1.1.1.2.1! nbrk 184: uint32_t phys = (uint32_t)virt_to_phys(pgd);
1.1 nbrk 185:
186: if (phys != get_cr3())
187: set_cr3(phys);
188: }
189:
190: /*
191: * Returns the physical address for the specified virtual address.
192: * This routine checks if the virtual area actually exist.
193: * It returns NULL if at least one page is not mapped.
194: */
195: void *
196: mmu_extract(pgd_t pgd, void *virt, size_t size)
197: {
198: page_table_t pte;
1.1.1.1.2.1! nbrk 199: vaddr_t start, end, pg;
1.1 nbrk 200:
201: start = PAGE_TRUNC(virt);
1.1.1.1.2.1! nbrk 202: end = PAGE_TRUNC((vaddr_t)virt + size - 1);
1.1 nbrk 203:
204: /* Check all pages exist */
205: for (pg = start; pg <= end; pg += PAGE_SIZE) {
206: if (!pte_present(pgd, pg))
207: return NULL;
208: pte = pgd_to_pte(pgd, pg);
209: if (!page_present(pte, pg))
210: return NULL;
211: }
212:
213: /* Get physical address */
214: pte = pgd_to_pte(pgd, start);
215: pg = pte_to_page(pte, start);
1.1.1.1.2.1! nbrk 216: return (void *)(pg + ((vaddr_t)virt - start));
1.1 nbrk 217: }
218:
219: /*
220: * Initialize mmu
221: *
222: * Paging is already enabled in locore.S. And, physical address
223: * 0-4M has been already mapped into kernel space in locore.S.
224: * Now, all physical memory is mapped into kernel virtual address
225: * as straight 1:1 mapping. User mode access is not allowed for
226: * these kernel pages.
227: * page_init() must be called before calling this routine.
228: *
229: * Note: This routine requires 4K bytes to map 4M bytes memory. So,
230: * if the system has a lot of RAM, the "used memory" by kernel will
231: * become large, too. For example, page table requires 512K bytes
232: * for 512M bytes system RAM.
233: */
234: void
235: mmu_init(void)
236: {
237: pgd_t kern_pgd;
238: int npages, nptes;
1.1.1.1.2.1! nbrk 239: uint32_t pte_entry;
! 240: page_table_t pte;
! 241: int pgd_index;
1.1 nbrk 242: int i, j;
243: void *pg;
244:
245: kern_pgd = phys_to_virt(KERNEL_PGD);
1.1.1.1.2.1! nbrk 246: npages = (int)(boot_info->main_mem.size / PAGE_SIZE);
1.1 nbrk 247: nptes = (npages + 1023) / 1024;
248: pgd_index = PAGE_DIR(PAGE_OFFSET);
249: pte_entry = 0 | PTE_PRESENT | PTE_WRITE;
250:
251: /*
252: * Build kernel page tables for whole physical pages.
253: */
254: for (i = 0; i < nptes; i++) {
255: /* Allocate new page table */
256: if ((pg = page_alloc(PAGE_SIZE)) == NULL)
257: panic("mmu_init: out of memory");
1.1.1.1.2.1! nbrk 258: pte = (vaddr_t *)phys_to_virt(pg);
1.1 nbrk 259: memset(pte, 0, PAGE_SIZE);
260:
261: /* Fill all entries in this page table */
262: for (j = 0; j < 1024; j++) {
263: pte[j] = pte_entry;
264: pte_entry += PAGE_SIZE;
265: if (--npages <= 0)
266: break;
267: }
268: /* Set the page table address into page directory. */
1.1.1.1.2.1! nbrk 269: kern_pgd[pgd_index] = (uint32_t)pg | PDE_PRESENT | PDE_WRITE;
1.1 nbrk 270: pgd_index++;
271: }
272: /* Unmap address 0 for NULL pointer detection in kernel mode */
273: pte = phys_to_virt(kern_pgd[PAGE_DIR(PAGE_OFFSET)] & PDE_ADDRESS);
274: pte[0] = 0;
275:
276: /* Flush translation look-aside buffer */
277: flush_tlb();
278: }
CVSweb