Annotation of prex/sys/mem/vm.c, Revision 1.1.1.1
1.1 nbrk 1: /*-
2: * Copyright (c) 2005-2007, Kohsuke Ohtani
3: * All rights reserved.
4: *
5: * Redistribution and use in source and binary forms, with or without
6: * modification, are permitted provided that the following conditions
7: * are met:
8: * 1. Redistributions of source code must retain the above copyright
9: * notice, this list of conditions and the following disclaimer.
10: * 2. Redistributions in binary form must reproduce the above copyright
11: * notice, this list of conditions and the following disclaimer in the
12: * documentation and/or other materials provided with the distribution.
13: * 3. Neither the name of the author nor the names of any co-contributors
14: * may be used to endorse or promote products derived from this software
15: * without specific prior written permission.
16: *
17: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27: * SUCH DAMAGE.
28: */
29:
30: /*
31: * vm.c - virtual memory allocator
32: */
33:
34: /*
35: * A task owns its private virtual address space. All threads in
36: * a task share one same memory space.
37: * When new task is made, the address mapping of the parent task
38: * is copied to child task's. In this time, the read-only space
39: * is shared with old map.
40: *
41: * Since this kernel does not do page out to the physical storage,
42: * it is guaranteed that the allocated memory is always continuing
43: * and existing. Thereby, a kernel and drivers can be constructed
44: * very simply.
45: */
46:
47: #include <kernel.h>
48: #include <kmem.h>
49: #include <thread.h>
50: #include <page.h>
51: #include <task.h>
52: #include <sched.h>
53: #include <vm.h>
54:
55: /* forward declarations */
56: static struct region *region_create(struct region *, void *, size_t);
57: static void region_delete(struct region *, struct region *);
58: static struct region *region_find(struct region *, void *, size_t);
59: static struct region *region_alloc(struct region *, size_t);
60: static void region_free(struct region *, struct region *);
61: static struct region *region_split(struct region *, struct region *,
62: void *, size_t);
63: static void region_init(struct region *);
64: static int do_allocate(vm_map_t, void **, size_t, int);
65: static int do_free(vm_map_t, void *);
66: static int do_attribute(vm_map_t, void *, int);
67: static int do_map(vm_map_t, void *, size_t, void **);
68: static vm_map_t do_fork(vm_map_t);
69:
70:
71: /* vm mapping for kernel task */
72: static struct vm_map kern_map;
73:
74: /**
75: * vm_allocate - allocate zero-filled memory for specified address
76: *
77: * If "anywhere" argument is true, the "addr" argument will be
78: * ignored. In this case, the address of free space will be
79: * found automatically.
80: *
81: * The allocated area has writable, user-access attribute by
82: * default. The "addr" and "size" argument will be adjusted
83: * to page boundary.
84: */
85: int
86: vm_allocate(task_t task, void **addr, size_t size, int anywhere)
87: {
88: int err;
89: void *uaddr;
90:
91: sched_lock();
92:
93: if (!task_valid(task)) {
94: err = ESRCH;
95: goto out;
96: }
97: if (task != cur_task() && !task_capable(CAP_MEMORY)) {
98: err = EPERM;
99: goto out;
100: }
101: if (umem_copyin(addr, &uaddr, sizeof(void *))) {
102: err = EFAULT;
103: goto out;
104: }
105: if (anywhere == 0 && !user_area(*addr)) {
106: err = EACCES;
107: goto out;
108: }
109:
110: err = do_allocate(task->map, &uaddr, size, anywhere);
111: if (err == 0) {
112: if (umem_copyout(&uaddr, addr, sizeof(void *)))
113: err = EFAULT;
114: }
115: out:
116: sched_unlock();
117: return err;
118: }
119:
120: static int
121: do_allocate(vm_map_t map, void **addr, size_t size, int anywhere)
122: {
123: struct region *reg;
124: char *start, *end, *phys;
125:
126: if (size == 0)
127: return EINVAL;
128:
129: /*
130: * Allocate region
131: */
132: if (anywhere) {
133: size = (size_t)PAGE_ALIGN(size);
134: if ((reg = region_alloc(&map->head, size)) == NULL)
135: return ENOMEM;
136: } else {
137: start = (char *)PAGE_TRUNC(*addr);
138: end = (char *)PAGE_ALIGN(start + size);
139: size = (size_t)(end - start);
140:
141: reg = region_find(&map->head, start, size);
142: if (reg == NULL || !(reg->flags & REG_FREE))
143: return EINVAL;
144:
145: reg = region_split(&map->head, reg, start, size);
146: if (reg == NULL)
147: return ENOMEM;
148: }
149: reg->flags = REG_READ | REG_WRITE;
150:
151: /*
152: * Allocate physical pages, and map them into virtual address
153: */
154: if ((phys = page_alloc(size)) == 0)
155: goto err1;
156:
157: if (mmu_map(map->pgd, phys, reg->addr, size, PG_WRITE))
158: goto err2;
159:
160: reg->phys = phys;
161:
162: /* Zero fill */
163: memset(phys_to_virt(phys), 0, reg->size);
164: *addr = reg->addr;
165: return 0;
166:
167: err2:
168: page_free(phys, size);
169: err1:
170: region_free(&map->head, reg);
171: return ENOMEM;
172: }
173:
174: /*
175: * Deallocate memory region for specified address.
176: *
177: * The "addr" argument points to a memory region previously
178: * allocated through a call to vm_allocate() or vm_map(). The
179: * number of bytes freed is the number of bytes of the
180: * allocated region. If one of the region of previous and next
181: * are free, it combines with them, and larger free region is
182: * created.
183: */
184: int
185: vm_free(task_t task, void *addr)
186: {
187: int err;
188:
189: sched_lock();
190: if (!task_valid(task)) {
191: err = ESRCH;
192: goto out;
193: }
194: if (task != cur_task() && !task_capable(CAP_MEMORY)) {
195: err = EPERM;
196: goto out;
197: }
198: if (!user_area(addr)) {
199: err = EFAULT;
200: goto out;
201: }
202:
203: err = do_free(task->map, addr);
204: out:
205: sched_unlock();
206: return err;
207: }
208:
209: static int
210: do_free(vm_map_t map, void *addr)
211: {
212: struct region *reg;
213:
214: addr = (void *)PAGE_TRUNC(addr);
215:
216: /*
217: * Find the target region.
218: */
219: reg = region_find(&map->head, addr, 1);
220: if (reg == NULL || reg->addr != addr || (reg->flags & REG_FREE))
221: return EINVAL;
222:
223: /*
224: * Unmap pages of the region.
225: */
226: mmu_map(map->pgd, reg->phys, reg->addr, reg->size, PG_UNMAP);
227:
228: /*
229: * Relinquish use of the page if it is not shared and mapped.
230: */
231: if (!(reg->flags & REG_SHARED) && !(reg->flags & REG_MAPPED))
232: page_free(reg->phys, reg->size);
233:
234: region_free(&map->head, reg);
235: return 0;
236: }
237:
238: /*
239: * Change attribute of specified virtual address.
240: *
241: * The "addr" argument points to a memory region previously
242: * allocated through a call to vm_allocate(). The attribute
243: * type can be chosen a combination of VMA_READ, VMA_WRITE.
244: * Note: VMA_EXEC is not supported, yet.
245: */
246: int
247: vm_attribute(task_t task, void *addr, int attr)
248: {
249: int err;
250:
251: sched_lock();
252: if (attr == 0 || attr & ~(VMA_READ | VMA_WRITE)) {
253: err = EINVAL;
254: goto out;
255: }
256: if (!task_valid(task)) {
257: err = ESRCH;
258: goto out;
259: }
260: if (task != cur_task() && !task_capable(CAP_MEMORY)) {
261: err = EPERM;
262: goto out;
263: }
264: if (!user_area(addr)) {
265: err = EFAULT;
266: goto out;
267: }
268:
269: err = do_attribute(task->map, addr, attr);
270: out:
271: sched_unlock();
272: return err;
273: }
274:
275: static int
276: do_attribute(vm_map_t map, void *addr, int attr)
277: {
278: struct region *reg;
279: int new_flags = 0;
280: void *old_addr, *new_addr = NULL;
281: int map_type;
282:
283: addr = (void *)PAGE_TRUNC(addr);
284:
285: /*
286: * Find the target region.
287: */
288: reg = region_find(&map->head, addr, 1);
289: if (reg == NULL || reg->addr != addr || (reg->flags & REG_FREE)) {
290: return EINVAL; /* not allocated */
291: }
292: /*
293: * The attribute of the mapped region can not be changed.
294: */
295: if (reg->flags & REG_MAPPED)
296: return EINVAL;
297:
298: /*
299: * Check new and old flag.
300: */
301: if (reg->flags & REG_WRITE) {
302: if (!(attr & VMA_WRITE))
303: new_flags = REG_READ;
304: } else {
305: if (attr & VMA_WRITE)
306: new_flags = REG_READ | REG_WRITE;
307: }
308: if (new_flags == 0)
309: return 0; /* same attribute */
310:
311: map_type = (new_flags & REG_WRITE) ? PG_WRITE : PG_READ;
312:
313: /*
314: * If it is shared region, duplicate it.
315: */
316: if (reg->flags & REG_SHARED) {
317:
318: old_addr = reg->phys;
319:
320: /* Allocate new physical page. */
321: if ((new_addr = page_alloc(reg->size)) == 0)
322: return ENOMEM;
323:
324: /* Copy source page */
325: memcpy(phys_to_virt(new_addr), phys_to_virt(old_addr),
326: reg->size);
327:
328: /* Map new region */
329: if (mmu_map(map->pgd, new_addr, reg->addr, reg->size,
330: map_type)) {
331: page_free(new_addr, reg->size);
332: return ENOMEM;
333: }
334: reg->phys = new_addr;
335:
336: /* Unlink from shared list */
337: reg->sh_prev->sh_next = reg->sh_next;
338: reg->sh_next->sh_prev = reg->sh_prev;
339: if (reg->sh_prev == reg->sh_next)
340: reg->sh_prev->flags &= ~REG_SHARED;
341: reg->sh_next = reg->sh_prev = reg;
342: } else {
343: if (mmu_map(map->pgd, reg->phys, reg->addr, reg->size,
344: map_type))
345: return ENOMEM;
346: }
347: reg->flags = new_flags;
348: return 0;
349: }
350:
351: /**
352: * vm_map - map another task's memory to current task.
353: *
354: * Note: This routine does not support mapping to the specific address.
355: */
356: int
357: vm_map(task_t target, void *addr, size_t size, void **alloc)
358: {
359: int err;
360:
361: sched_lock();
362: if (!task_valid(target)) {
363: err = ESRCH;
364: goto out;
365: }
366: if (target == cur_task()) {
367: err = EINVAL;
368: goto out;
369: }
370: if (!task_capable(CAP_MEMORY)) {
371: err = EPERM;
372: goto out;
373: }
374: if (!user_area(addr)) {
375: err = EFAULT;
376: goto out;
377: }
378: err = do_map(target->map, addr, size, alloc);
379: out:
380: sched_unlock();
381: return err;
382: }
383:
384: static int
385: do_map(vm_map_t map, void *addr, size_t size, void **alloc)
386: {
387: vm_map_t curmap;
388: char *start, *end, *phys;
389: size_t offset;
390: struct region *reg, *cur, *tgt;
391: task_t self;
392: int map_type;
393: void *tmp;
394:
395: if (size == 0)
396: return EINVAL;
397:
398: /* check fault */
399: tmp = NULL;
400: if (umem_copyout(&tmp, alloc, sizeof(void *)))
401: return EFAULT;
402:
403: start = (char *)PAGE_TRUNC(addr);
404: end = (char *)PAGE_ALIGN((char *)addr + size);
405: size = (size_t)(end - start);
406: offset = (size_t)((char *)addr - start);
407:
408: /*
409: * Find the region that includes target address
410: */
411: reg = region_find(&map->head, start, size);
412: if (reg == NULL || (reg->flags & REG_FREE))
413: return EINVAL; /* not allocated */
414: tgt = reg;
415:
416: /*
417: * Find the free region in current task
418: */
419: self = cur_task();
420: curmap = self->map;
421: if ((reg = region_alloc(&curmap->head, size)) == NULL)
422: return ENOMEM;
423: cur = reg;
424:
425: /*
426: * Try to map into current memory
427: */
428: if (tgt->flags & REG_WRITE)
429: map_type = PG_WRITE;
430: else
431: map_type = PG_READ;
432:
433: phys = (char *)tgt->phys + (start - (char *)tgt->addr);
434: if (mmu_map(curmap->pgd, phys, cur->addr, size, map_type)) {
435: region_free(&curmap->head, reg);
436: return ENOMEM;
437: }
438:
439: cur->flags = tgt->flags | REG_MAPPED;
440: cur->phys = phys;
441:
442: tmp = (char *)cur->addr + offset;
443: umem_copyout(&tmp, alloc, sizeof(void *));
444: return 0;
445: }
446:
447: /*
448: * Create new virtual memory space.
449: * No memory is inherited.
450: *
451: * Must be called with scheduler locked.
452: */
453: vm_map_t
454: vm_create(void)
455: {
456: vm_map_t map;
457:
458: /* Allocate new map structure */
459: if ((map = kmem_alloc(sizeof(struct vm_map))) == NULL)
460: return NULL;
461:
462: map->refcnt = 1;
463:
464: /* Allocate new page directory */
465: if ((map->pgd = mmu_newmap()) == NULL) {
466: kmem_free(map);
467: return NULL;
468: }
469: region_init(&map->head);
470: return map;
471: }
472:
473: /*
474: * Terminate specified virtual memory space.
475: * This is called when task is terminated.
476: */
477: void
478: vm_terminate(vm_map_t map)
479: {
480: struct region *reg, *tmp;
481:
482: if (--map->refcnt >= 1)
483: return;
484:
485: sched_lock();
486: reg = &map->head;
487: do {
488: if (reg->flags != REG_FREE) {
489: /* Unmap region */
490: mmu_map(map->pgd, reg->phys, reg->addr,
491: reg->size, PG_UNMAP);
492:
493: /* Free region if it is not shared and mapped */
494: if (!(reg->flags & REG_SHARED) &&
495: !(reg->flags & REG_MAPPED)) {
496: page_free(reg->phys, reg->size);
497: }
498: }
499: tmp = reg;
500: reg = reg->next;
501: region_delete(&map->head, tmp);
502: } while (reg != &map->head);
503:
504: mmu_delmap(map->pgd);
505: kmem_free(map);
506: sched_unlock();
507: }
508:
509: /*
510: * Duplicate specified virtual memory space.
511: * This is called when new task is created.
512: *
513: * Returns new map id, NULL if it fails.
514: *
515: * All regions of original memory map are copied to new memory map.
516: * If the region is read-only, executable, or shared region, it is
517: * no need to copy. These regions are physically shared with the
518: * original map.
519: */
520: vm_map_t
521: vm_fork(vm_map_t org_map)
522: {
523: vm_map_t new_map;
524:
525: sched_lock();
526: new_map = do_fork(org_map);
527: sched_unlock();
528: return new_map;
529: }
530:
531: static vm_map_t
532: do_fork(vm_map_t org_map)
533: {
534: vm_map_t new_map;
535: struct region *tmp, *src, *dest;
536: int map_type;
537:
538: if ((new_map = vm_create()) == NULL)
539: return NULL;
540: /*
541: * Copy all regions
542: */
543: tmp = &new_map->head;
544: src = &org_map->head;
545:
546: /*
547: * Copy top region
548: */
549: *tmp = *src;
550: tmp->next = tmp->prev = tmp;
551:
552: if (src == src->next) /* Blank memory ? */
553: return new_map;
554:
555: do {
556: ASSERT(src != NULL);
557: ASSERT(src->next != NULL);
558:
559: if (src == &org_map->head) {
560: dest = tmp;
561: } else {
562: /* Create new region struct */
563: dest = kmem_alloc(sizeof(struct region));
564: if (dest == NULL)
565: return NULL;
566:
567: *dest = *src; /* memcpy */
568:
569: dest->prev = tmp;
570: dest->next = tmp->next;
571: tmp->next->prev = dest;
572: tmp->next = dest;
573: tmp = dest;
574: }
575: if (src->flags == REG_FREE) {
576: /*
577: * Skip free region
578: */
579: } else {
580: /* Check if the region can be shared */
581: if (!(src->flags & REG_WRITE) &&
582: !(src->flags & REG_MAPPED)) {
583: dest->flags |= REG_SHARED;
584: }
585:
586: if (!(dest->flags & REG_SHARED)) {
587: /* Allocate new physical page. */
588: dest->phys = page_alloc(src->size);
589: if (dest->phys == 0)
590: return NULL;
591:
592: /* Copy source page */
593: memcpy(phys_to_virt(dest->phys),
594: phys_to_virt(src->phys), src->size);
595: }
596: /* Map the region to virtual address */
597: if (dest->flags & REG_WRITE)
598: map_type = PG_WRITE;
599: else
600: map_type = PG_READ;
601:
602: if (mmu_map(new_map->pgd, dest->phys, dest->addr,
603: dest->size, map_type))
604: return NULL;
605: }
606: src = src->next;
607: } while (src != &org_map->head);
608:
609: /*
610: * No error. Now, link all shared regions
611: */
612: dest = &new_map->head;
613: src = &org_map->head;
614: do {
615: if (dest->flags & REG_SHARED) {
616: src->flags |= REG_SHARED;
617: dest->sh_prev = src;
618: dest->sh_next = src->sh_next;
619: src->sh_next->sh_prev = dest;
620: src->sh_next = dest;
621: }
622: dest = dest->next;
623: src = src->next;
624: } while (src != &org_map->head);
625: return new_map;
626: }
627:
628: /*
629: * Switch VM mapping.
630: *
631: * Since a kernel task does not have user mode memory image, we
632: * don't have to setup the page directory for it. Thus, an idle
633: * thread and interrupt threads can be switched quickly.
634: */
635: void
636: vm_switch(vm_map_t map)
637: {
638:
639: if (map != &kern_map)
640: mmu_switch(map->pgd);
641: }
642:
643: /*
644: * Increment reference count of VM mapping.
645: */
646: int
647: vm_reference(vm_map_t map)
648: {
649:
650: map->refcnt++;
651: return 0;
652: }
653:
654: /*
655: * Load task image for boot task.
656: * Return 0 on success, -1 on failure.
657: */
658: int
659: vm_load(vm_map_t map, struct module *mod, void **stack)
660: {
661: char *src;
662: void *text, *data;
663:
664: DPRINTF(("Loading task: %s\n", mod->name));
665:
666: /*
667: * We have to switch VM mapping to touch the virtual
668: * memory space of a target task without page fault.
669: */
670: vm_switch(map);
671:
672: src = phys_to_virt(mod->phys);
673: text = (void *)mod->text;
674: data = (void *)mod->data;
675:
676: /*
677: * Create text segment
678: */
679: if (do_allocate(map, &text, mod->textsz, 0))
680: return -1;
681: memcpy(text, src, mod->textsz);
682: if (do_attribute(map, text, VMA_READ))
683: return -1;
684:
685: /*
686: * Create data & BSS segment
687: */
688: if (mod->datasz + mod->bsssz != 0) {
689: if (do_allocate(map, &data, mod->datasz + mod->bsssz, 0))
690: return -1;
691: src = src + (mod->data - mod->text);
692: memcpy(data, src, mod->datasz);
693: }
694: /*
695: * Create stack
696: */
697: *stack = (void *)(USER_MAX - USTACK_SIZE);
698: if (do_allocate(map, stack, USTACK_SIZE, 0))
699: return -1;
700:
701: /* Free original pages */
702: page_free((void *)mod->phys, mod->size);
703: return 0;
704: }
705:
706: /*
707: * Translate virtual address of current task to physical address.
708: * Returns physical address on success, or NULL if no mapped memory.
709: */
710: void *
711: vm_translate(void *addr, size_t size)
712: {
713: task_t self = cur_task();
714:
715: return mmu_extract(self->map->pgd, addr, size);
716: }
717:
718: /*
719: * Initialize region
720: */
721: static void
722: region_init(struct region *reg)
723: {
724:
725: reg->next = reg->prev = reg;
726: reg->sh_next = reg->sh_prev = reg;
727: reg->addr = (void *)PAGE_SIZE;
728: reg->phys = 0;
729: reg->size = USER_MAX - PAGE_SIZE;
730: reg->flags = REG_FREE;
731: }
732:
733: /*
734: * Create new free region after the specified region.
735: * Returns region on success, or NULL on failure.
736: */
737: static struct region *
738: region_create(struct region *prev, void *addr, size_t size)
739: {
740: struct region *reg;
741:
742: if ((reg = kmem_alloc(sizeof(*reg))) == NULL)
743: return NULL;
744:
745: reg->addr = addr;
746: reg->size = size;
747: reg->phys = 0;
748: reg->flags = REG_FREE;
749: reg->sh_next = reg->sh_prev = reg;
750:
751: reg->next = prev->next;
752: reg->prev = prev;
753: prev->next->prev = reg;
754: prev->next = reg;
755: return reg;
756: }
757:
758: /*
759: * Delete specified region
760: */
761: static void
762: region_delete(struct region *head, struct region *reg)
763: {
764:
765: /* If it is shared region, unlink from shared list */
766: if (reg->flags & REG_SHARED) {
767: reg->sh_prev->sh_next = reg->sh_next;
768: reg->sh_next->sh_prev = reg->sh_prev;
769: if (reg->sh_prev == reg->sh_next)
770: reg->sh_prev->flags &= ~REG_SHARED;
771: }
772: if (head != reg)
773: kmem_free(reg);
774: }
775:
776: /*
777: * Find the region at the specified area.
778: */
779: static struct region *
780: region_find(struct region *head, void *addr, size_t size)
781: {
782: struct region *reg;
783:
784: reg = head;
785: do {
786: if (reg->addr <= addr &&
787: (char *)reg->addr + reg->size >= (char *)addr + size) {
788: return reg;
789: }
790: reg = reg->next;
791: } while (reg != head);
792: return NULL;
793: }
794:
795: /*
796: * Allocate free region for specified size.
797: */
798: static struct region *
799: region_alloc(struct region *head, size_t size)
800: {
801: struct region *reg;
802:
803: reg = head;
804: do {
805: if ((reg->flags & REG_FREE) && reg->size >= size) {
806: if (reg->size != size) {
807: /* Split this region and return its head */
808: if (region_create(reg,
809: (char *)reg->addr + size,
810: reg->size - size) == NULL)
811: return NULL;
812: }
813: reg->size = size;
814: return reg;
815: }
816: reg = reg->next;
817: } while (reg != head);
818: return NULL;
819: }
820:
821: /*
822: * Delete specified free region
823: */
824: static void
825: region_free(struct region *head, struct region *reg)
826: {
827: struct region *prev, *next;
828:
829: ASSERT(reg->flags != REG_FREE);
830:
831: reg->flags = REG_FREE;
832:
833: /* If it is shared region, unlink from shared list */
834: if (reg->flags & REG_SHARED) {
835: reg->sh_prev->sh_next = reg->sh_next;
836: reg->sh_next->sh_prev = reg->sh_prev;
837: if (reg->sh_prev == reg->sh_next)
838: reg->sh_prev->flags &= ~REG_SHARED;
839: }
840:
841: /* If next region is free, merge with it. */
842: next = reg->next;
843: if (next != head && (next->flags & REG_FREE)) {
844: reg->next = next->next;
845: next->next->prev = reg;
846: reg->size += next->size;
847: kmem_free(next);
848: }
849:
850: /* If previous region is free, merge with it. */
851: prev = reg->prev;
852: if (reg != head && (prev->flags & REG_FREE)) {
853: prev->next = reg->next;
854: reg->next->prev = prev;
855: prev->size += reg->size;
856: kmem_free(reg);
857: }
858: }
859:
860: /*
861: * Sprit region for the specified address/size.
862: */
863: static struct region *
864: region_split(struct region *head, struct region *reg, void *addr,
865: size_t size)
866: {
867: struct region *prev, *next;
868: size_t diff;
869:
870: /*
871: * Check previous region to split region.
872: */
873: prev = NULL;
874: if (reg->addr != addr) {
875: prev = reg;
876: diff = (size_t)((char *)addr - (char *)reg->addr);
877: reg = region_create(prev, addr, prev->size - diff);
878: if (reg == NULL)
879: return NULL;
880: prev->size = diff;
881: }
882:
883: /*
884: * Check next region to split region.
885: */
886: if (reg->size != size) {
887: next = region_create(reg, (char *)reg->addr + size,
888: reg->size - size);
889: if (next == NULL) {
890: if (prev) {
891: /* Undo previous region_create() */
892: region_free(head, reg);
893: }
894: return NULL;
895: }
896: reg->size = size;
897: }
898: reg->flags = 0;
899: return reg;
900: }
901:
902: #ifdef DEBUG
903: static void
904: vm_dump_one(task_t task)
905: {
906: vm_map_t map;
907: struct region *reg;
908: char flags[6];
909: size_t total = 0;
910:
911: printf("task=%x map=%x name=%s\n", task, task->map,
912: task->name != NULL ? task->name : "no name");
913: printf(" region virtual physical size flags\n");
914: printf(" -------- -------- -------- -------- -----\n");
915:
916: map = task->map;
917: reg = &map->head;
918: do {
919: if (reg->flags != REG_FREE) {
920: strlcpy(flags, "-----", 6);
921: if (reg->flags & REG_READ)
922: flags[0] = 'R';
923: if (reg->flags & REG_WRITE)
924: flags[1] = 'W';
925: if (reg->flags & REG_EXEC)
926: flags[2] = 'E';
927: if (reg->flags & REG_SHARED)
928: flags[3] = 'S';
929: if (reg->flags & REG_MAPPED)
930: flags[4] = 'M';
931:
932: printf(" %08x %08x %08x %8x %s\n", reg,
933: reg->addr, reg->phys, reg->size, flags);
934: total += reg->size;
935: }
936: reg = reg->next;
937: } while (reg != &map->head); /* Process all regions */
938: printf(" *total=%dK bytes\n\n", total / 1024);
939: }
940:
941: void
942: vm_dump(void)
943: {
944: list_t n;
945: task_t task;
946:
947: printf("\nVM dump:\n");
948: n = list_first(&kern_task.link);
949: while (n != &kern_task.link) {
950: task = list_entry(n, struct task, link);
951: vm_dump_one(task);
952: n = list_next(n);
953: }
954: }
955: #endif
956:
957: void
958: vm_init(void)
959: {
960: pgd_t pgd;
961:
962: /*
963: * Setup vm mapping for kernel task.
964: */
965: pgd = mmu_newmap();
966: ASSERT(pgd != NULL);
967: kern_map.pgd = pgd;
968: mmu_switch(pgd);
969: region_init(&kern_map.head);
970: kern_task.map = &kern_map;
971: }
CVSweb