Annotation of prex-old/sys/mem/vm.c, Revision 1.1.1.1
1.1 nbrk 1: /*-
2: * Copyright (c) 2005-2007, Kohsuke Ohtani
3: * All rights reserved.
4: *
5: * Redistribution and use in source and binary forms, with or without
6: * modification, are permitted provided that the following conditions
7: * are met:
8: * 1. Redistributions of source code must retain the above copyright
9: * notice, this list of conditions and the following disclaimer.
10: * 2. Redistributions in binary form must reproduce the above copyright
11: * notice, this list of conditions and the following disclaimer in the
12: * documentation and/or other materials provided with the distribution.
13: * 3. Neither the name of the author nor the names of any co-contributors
14: * may be used to endorse or promote products derived from this software
15: * without specific prior written permission.
16: *
17: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27: * SUCH DAMAGE.
28: */
29:
30: /*
31: * vm.c - virtual memory manager
32: */
33:
34: /*
35: * A task owns its private virtual address space. All threads in
36: * a task share one same memory space.
37: * When new task is made, the address mapping of the parent task
38: * is copied to child task's. In this time, the read-only space
39: * is shared with old map.
40: *
41: * Since this kernel does not do page out to the physical storage,
42: * it is guaranteed that the allocated memory is always continuing
43: * and existing. Thereby, a kernel and drivers can be constructed
44: * very simply.
45: */
46:
47: #include <kernel.h>
48: #include <kmem.h>
49: #include <thread.h>
50: #include <page.h>
51: #include <task.h>
52: #include <sched.h>
53: #include <vm.h>
54:
55: #ifdef CONFIG_VMTRACE
56: static void vm_error(const char *, int);
57: #define LOG(x) printk x
58: #define CHK(fn,x) do { if (x) vm_error(fn, x); } while (0)
59: #else
60: #define LOG(x)
61: #define CHK(fn,x)
62: #endif
63:
64: /* forward declarations */
65: static struct region *region_create(struct region *, u_long, size_t);
66: static void region_delete(struct region *, struct region *);
67: static struct region *region_find(struct region *, u_long, size_t);
68: static struct region *region_alloc(struct region *, size_t);
69: static void region_free(struct region *, struct region *);
70: static struct region *region_split(struct region *, struct region *,
71: u_long, size_t);
72: static void region_init(struct region *);
73: static int do_allocate(vm_map_t, void **, size_t, int);
74: static int do_free(vm_map_t, void *);
75: static int do_attribute(vm_map_t, void *, int);
76: static int do_map(vm_map_t, void *, size_t, void **);
77: static vm_map_t do_fork(vm_map_t);
78:
79: /* vm mapping for kernel task */
80: static struct vm_map kern_map;
81:
82: /**
83: * vm_allocate - allocate zero-filled memory for specified address
84: * @task: task id to allocate memory
85: * @addr: required address. set an allocated address in return.
86: * @size: allocation size
87: * @anywhere: if it is true, the "addr" argument will be ignored.
88: * In this case, the address of free space will be found
89: * automatically.
90: *
91: * The allocated area has writable, user-access attribute by default.
92: * The "addr" and "size" argument will be adjusted to page boundary.
93: */
94: int
95: vm_allocate(task_t task, void **addr, size_t size, int anywhere)
96: {
97: int err;
98: void *uaddr;
99:
100: LOG(("vm_aloc: task=%s addr=%x size=%x anywhere=%d\n",
101: task->name ? task->name : "no name", *addr, size, anywhere));
102:
103: sched_lock();
104:
105: if (!task_valid(task)) {
106: err = ESRCH;
107: } else if (task != cur_task() && !task_capable(CAP_MEMORY)) {
108: err = EPERM;
109: } else if (umem_copyin(addr, &uaddr, sizeof(void *))) {
110: err = EFAULT;
111: } else if (anywhere == 0 && !user_area(*addr)) {
112: err = EACCES;
113: } else {
114: err = do_allocate(task->map, &uaddr, size, anywhere);
115: if (err == 0) {
116: if (umem_copyout(&uaddr, addr, sizeof(void *)))
117: err = EFAULT;
118: }
119: }
120: sched_unlock();
121: CHK("vm_allocate", err);
122: return err;
123: }
124:
125: static int
126: do_allocate(vm_map_t map, void **addr, size_t size, int anywhere)
127: {
128: struct region *reg;
129: u_long start, end, phys;
130:
131: if (size == 0)
132: return EINVAL;
133:
134: /*
135: * Allocate region
136: */
137: if (anywhere) {
138: size = (size_t)PAGE_ALIGN(size);
139: if ((reg = region_alloc(&map->head, size)) == NULL)
140: return ENOMEM;
141: } else {
142: start = PAGE_TRUNC(*addr);
143: end = PAGE_ALIGN(start + size);
144: size = (size_t)(end - start);
145:
146: reg = region_find(&map->head, start, size);
147: if (reg == NULL || !(reg->flags & REG_FREE))
148: return EINVAL;
149:
150: reg = region_split(&map->head, reg, start, size);
151: if (reg == NULL)
152: return ENOMEM;
153: }
154:
155: reg->flags = REG_READ | REG_WRITE;
156:
157: /*
158: * Allocate physical pages, and map them into virtual address
159: */
160: if ((phys = (u_long)page_alloc(size)) == 0)
161: goto err1;
162:
163: if (mmu_map(map->pgd, (void *)phys, (void *)reg->addr,
164: size, PG_WRITE))
165: goto err2;
166:
167: reg->phys = phys;
168:
169: /* Zero fill */
170: memset(phys_to_virt(phys), 0, reg->size);
171: *addr = (void *)reg->addr;
172: return 0;
173:
174: err2:
175: page_free((void *)phys, size);
176: err1:
177: region_free(&map->head, reg);
178: return ENOMEM;
179: }
180:
181: /*
182: * Deallocate memory region for specified address.
183: *
184: * The "addr" argument points to a memory region previously
185: * allocated through a call to vm_allocate() or vm_map(). The number
186: * of bytes freed is the number of bytes of the allocated region.
187: * If one of the region of previous and next are free, it combines
188: * with them, and larger free region is created.
189: */
190: int
191: vm_free(task_t task, void *addr)
192: {
193: int err;
194:
195: LOG(("vm_free: task=%s addr=%x\n",
196: task->name ? task->name : "no name", addr));
197:
198: sched_lock();
199: if (!task_valid(task)) {
200: err = ESRCH;
201: } else if (task != cur_task() && !task_capable(CAP_MEMORY)) {
202: err = EPERM;
203: } else if (!user_area(addr)) {
204: err = EFAULT;
205: } else {
206: err = do_free(task->map, addr);
207: }
208: sched_unlock();
209: CHK("vm_free", err);
210: return err;
211: }
212:
213: static int
214: do_free(vm_map_t map, void *addr)
215: {
216: struct region *reg;
217:
218: addr = (void *)PAGE_TRUNC(addr);
219:
220: /*
221: * Find the target region.
222: */
223: reg = region_find(&map->head, (u_long)addr, 1);
224: if (reg == NULL || reg->addr != (u_long)addr ||
225: (reg->flags & REG_FREE))
226: return EINVAL;
227:
228: /*
229: * Unmap pages of the region.
230: */
231: mmu_map(map->pgd, (void *)reg->phys, (void *)reg->addr,
232: reg->size, PG_UNMAP);
233:
234: /*
235: * Free pages if it is not shared and mapped.
236: */
237: if (!(reg->flags & REG_SHARED) && !(reg->flags & REG_MAPPED))
238: page_free((void *)reg->phys, reg->size);
239:
240: region_free(&map->head, reg);
241: return 0;
242: }
243:
244: /*
245: * Change attribute of specified virtual address.
246: *
247: * The "addr" argument points to a memory region previously allocated
248: * through a call to vm_allocate(). The attribute type can be chosen
249: * a combination of VMA_READ, VMA_WRITE.
250: * Note: VMA_EXEC is not supported, yet.
251: */
252: int
253: vm_attribute(task_t task, void *addr, int attr)
254: {
255: int err;
256:
257: LOG(("vm_attribute: task=%s addr=%x attr=%x\n",
258: task->name ? task->name : "no name", addr, attr));
259:
260: sched_lock();
261: if (attr == 0 || attr & ~(VMA_READ | VMA_WRITE)) {
262: err = EINVAL;
263: } else if (!task_valid(task)) {
264: err = ESRCH;
265: } else if (task != cur_task() && !task_capable(CAP_MEMORY)) {
266: err = EPERM;
267: } else if (!user_area(addr)) {
268: err = EFAULT;
269: } else {
270: err = do_attribute(task->map, addr, attr);
271: }
272: sched_unlock();
273: CHK("vm_attribute", err);
274: return err;
275: }
276:
277: static int
278: do_attribute(vm_map_t map, void *addr, int attr)
279: {
280: struct region *reg;
281: int new_flags = 0;
282: u_long old_addr, new_addr = 0;
283: int map_type;
284:
285: addr = (void *)PAGE_TRUNC(addr);
286:
287: /*
288: * Find the target region.
289: */
290: reg = region_find(&map->head, (u_long)addr, 1);
291: if (reg == NULL || reg->addr != (u_long)addr ||
292: (reg->flags & REG_FREE)) {
293: return EINVAL; /* not allocated */
294: }
295: /*
296: * The attribute of the mapped region can not be changed.
297: */
298: if (reg->flags & REG_MAPPED)
299: return EINVAL;
300:
301: /*
302: * Check new and old flag.
303: */
304: if (reg->flags & REG_WRITE) {
305: if (!(attr & VMA_WRITE))
306: new_flags = REG_READ;
307: } else {
308: if (attr & VMA_WRITE)
309: new_flags = REG_READ | REG_WRITE;
310: }
311: if (new_flags == 0)
312: return 0; /* same attribute */
313:
314: map_type = (new_flags & REG_WRITE) ? PG_WRITE : PG_READ;
315:
316: /*
317: * If it is shared region, duplicate it.
318: */
319: if (reg->flags & REG_SHARED) {
320:
321: old_addr = reg->phys;
322:
323: /* Allocate new physical page. */
324: if ((new_addr = (u_long)page_alloc(reg->size)) == 0)
325: return ENOMEM;
326:
327: /* Copy source page */
328: memcpy(phys_to_virt(new_addr), phys_to_virt(old_addr),
329: reg->size);
330:
331: /* Map new region */
332: if (mmu_map(map->pgd, (void *)new_addr, (void *)reg->addr,
333: reg->size, map_type)) {
334: page_free((void *)new_addr, reg->size);
335: return ENOMEM;
336: }
337: reg->phys = new_addr;
338:
339: /* Unlink from shared list */
340: reg->sh_prev->sh_next = reg->sh_next;
341: reg->sh_next->sh_prev = reg->sh_prev;
342: if (reg->sh_prev == reg->sh_next)
343: reg->sh_prev->flags &= ~REG_SHARED;
344: reg->sh_next = reg->sh_prev = reg;
345: } else {
346: if (mmu_map(map->pgd, (void *)reg->phys, (void *)reg->addr,
347: reg->size, map_type))
348: return ENOMEM;
349: }
350: reg->flags = new_flags;
351: return 0;
352: }
353:
354: /**
355: * vm_map - map another task's memory to current task.
356: * @target: memory owner
357: * @addr: target address
358: * @size: map size
359: * @alloc: map address returned
360: *
361: * Note: This routine does not support mapping to the specific address.
362: */
363: int
364: vm_map(task_t target, void *addr, size_t size, void **alloc)
365: {
366: int err;
367:
368: LOG(("vm_map : task=%s addr=%x size=%x\n",
369: target->name ? target->name : "no name", addr, size));
370:
371: sched_lock();
372: if (!task_valid(target)) {
373: err = ESRCH;
374: } else if (target == cur_task()) {
375: err = EINVAL;
376: } else if (!task_capable(CAP_MEMORY)) {
377: err = EPERM;
378: } else if (!user_area(addr)) {
379: err = EFAULT;
380: } else {
381: err = do_map(target->map, addr, size, alloc);
382: }
383: sched_unlock();
384: CHK("vm_map", err);
385: return err;
386: }
387:
388: static int
389: do_map(vm_map_t map, void *addr, size_t size, void **alloc)
390: {
391: vm_map_t curmap;
392: u_long start, end, offset, phys;
393: struct region *reg, *cur, *tgt;
394: int map_type;
395: void *tmp;
396:
397: if (size == 0)
398: return EINVAL;
399:
400: /* check fault */
401: tmp = NULL;
402: if (umem_copyout(&tmp, alloc, sizeof(void *)))
403: return EFAULT;
404:
405: start = PAGE_TRUNC(addr);
406: end = PAGE_ALIGN((u_long)addr + size);
407: size = (size_t)(end - start);
408: offset = (u_long)addr - start;
409:
410: /*
411: * Find the region that includes target address
412: */
413: reg = region_find(&map->head, start, size);
414: if (reg == NULL || (reg->flags & REG_FREE))
415: return EINVAL; /* not allocated */
416: tgt = reg;
417:
418: /*
419: * Find the free region in current task
420: */
421: curmap = cur_task()->map;
422: if ((reg = region_alloc(&curmap->head, size)) == NULL)
423: return ENOMEM;
424: cur = reg;
425:
426: /*
427: * Try to map into current memory
428: */
429: if (tgt->flags & REG_WRITE)
430: map_type = PG_WRITE;
431: else
432: map_type = PG_READ;
433:
434: phys = tgt->phys + (start - tgt->addr);
435: if (mmu_map(curmap->pgd, (void *)phys, (void *)cur->addr,
436: size, map_type)) {
437: region_free(&curmap->head, reg);
438: return ENOMEM;
439: }
440:
441: cur->flags = tgt->flags | REG_MAPPED;
442: cur->phys = phys;
443:
444: tmp = (void *)((u_long)cur->addr + offset);
445: umem_copyout(&tmp, alloc, sizeof(void *));
446: return 0;
447: }
448:
449: /*
450: * Create new virtual memory space.
451: * No memory is inherited.
452: *
453: * Must be called with scheduler locked.
454: */
455: vm_map_t
456: vm_create(void)
457: {
458: vm_map_t map;
459:
460: /* Allocate new map structure */
461: if ((map = kmem_alloc(sizeof(struct vm_map))) == NULL)
462: return NULL;
463:
464: map->ref_count = 1;
465:
466: /* Allocate new page directory */
467: if ((map->pgd = mmu_newmap()) == NULL) {
468: kmem_free(map);
469: return NULL;
470: }
471: region_init(&map->head);
472: return map;
473: }
474:
475: /*
476: * Terminate specified virtual memory space.
477: * This is called when task is terminated.
478: */
479: void
480: vm_terminate(vm_map_t map)
481: {
482: struct region *reg, *tmp;
483:
484: if (--map->ref_count >= 1)
485: return;
486:
487: sched_lock();
488: reg = &map->head;
489: do {
490: if (reg->flags != REG_FREE) {
491: /* Unmap region */
492: mmu_map(map->pgd, (void *)reg->phys,
493: (void *)reg->addr, reg->size, PG_UNMAP);
494:
495: /* Free region if it is not shared and mapped */
496: if (!(reg->flags & REG_SHARED) &&
497: !(reg->flags & REG_MAPPED)) {
498: page_free((void *)reg->phys, reg->size);
499: }
500: }
501: tmp = reg;
502: reg = reg->next;
503: region_delete(&map->head, tmp);
504: } while (reg != &map->head);
505:
506: mmu_delmap(map->pgd);
507: kmem_free(map);
508: sched_unlock();
509: }
510:
511: /*
512: * Duplicate specified virtual memory space.
513: * This is called when new task is created.
514: *
515: * Returns new map id, NULL if it fails.
516: *
517: * All regions of original memory map are copied to new memory map.
518: * If the region is read-only, executable, or shared region, it is
519: * no need to copy. These regions are physically shared with the
520: * original map.
521: */
522: vm_map_t
523: vm_fork(vm_map_t org_map)
524: {
525: vm_map_t new_map;
526:
527: sched_lock();
528: new_map = do_fork(org_map);
529: sched_unlock();
530: return new_map;
531: }
532:
533: static vm_map_t
534: do_fork(vm_map_t org_map)
535: {
536: vm_map_t new_map;
537: struct region *tmp, *src, *dest;
538: int map_type;
539:
540: if ((new_map = vm_create()) == NULL)
541: return NULL;
542: /*
543: * Copy all regions
544: */
545: tmp = &new_map->head;
546: src = &org_map->head;
547:
548: /*
549: * Copy top region
550: */
551: *tmp = *src;
552: tmp->next = tmp->prev = tmp;
553:
554: if (src == src->next) /* Blank memory ? */
555: return new_map;
556:
557: do {
558: ASSERT(src != NULL);
559: ASSERT(src->next != NULL);
560:
561: if (src == &org_map->head) {
562: dest = tmp;
563: } else {
564: /* Create new region struct */
565: dest = kmem_alloc(sizeof(struct region));
566: if (dest == NULL)
567: return NULL;
568:
569: *dest = *src; /* memcpy */
570:
571: dest->prev = tmp;
572: dest->next = tmp->next;
573: tmp->next->prev = dest;
574: tmp->next = dest;
575: tmp = dest;
576: }
577: /* Skip free region */
578: if (src->flags == REG_FREE) {
579: src = src->next;
580: continue;
581: }
582: /* Check if the region can be shared */
583: if (!(src->flags & REG_WRITE) && !(src->flags & REG_MAPPED))
584: dest->flags |= REG_SHARED;
585:
586: if (!(dest->flags & REG_SHARED)) {
587: /* Allocate new physical page. */
588: dest->phys = (u_long)page_alloc(src->size);
589: if (dest->phys == 0)
590: return NULL;
591:
592: /* Copy source page */
593: memcpy(phys_to_virt(dest->phys),
594: phys_to_virt(src->phys), src->size);
595: }
596: /* Map the region to virtual address */
597: if (dest->flags & REG_WRITE)
598: map_type = PG_WRITE;
599: else
600: map_type = PG_READ;
601:
602: if (mmu_map(new_map->pgd, (void *)dest->phys,
603: (void *)dest->addr, dest->size, map_type))
604: return NULL;
605:
606: src = src->next;
607: } while (src != &org_map->head);
608:
609: /*
610: * No error. Now, link all shared regions
611: */
612: dest = &new_map->head;
613: src = &org_map->head;
614: do {
615: if (dest->flags & REG_SHARED) {
616: src->flags |= REG_SHARED;
617: dest->sh_prev = src;
618: dest->sh_next = src->sh_next;
619: src->sh_next->sh_prev = dest;
620: src->sh_next = dest;
621: }
622: dest = dest->next;
623: src = src->next;
624: } while (src != &org_map->head);
625: return new_map;
626: }
627:
628: /*
629: * SWitch VM mapping.
630: *
631: * Since a kernel task does not have user mode memory image, we
632: * don't have to setup the page directory for it. Thus, an idle
633: * thread and interrupt threads can be switched quickly.
634: */
635: void
636: vm_switch(vm_map_t map)
637: {
638:
639: if (map != &kern_map)
640: mmu_switch(map->pgd);
641: }
642:
643: /*
644: * Increment reference count of VM mapping.
645: */
646: int
647: vm_reference(vm_map_t map)
648: {
649:
650: map->ref_count++;
651: return 0;
652: }
653:
654: /*
655: * Load task image for boot task.
656: * Return 0 on success, -1 on failure.
657: */
658: int
659: vm_load(vm_map_t map, struct module *m, void **stack)
660: {
661: u_long src;
662: void *text, *data;
663:
664: printk("Loading task: %s\n", m->name);
665:
666: /*
667: * We have to switch VM mapping to touch the virtual
668: * memory space of a target task without page fault.
669: */
670: vm_switch(map);
671:
672: src = (u_long)phys_to_virt(m->phys);
673: text = (void *)m->text;
674: data = (void *)m->data;
675:
676: /*
677: * Create text segment
678: */
679: if (do_allocate(map, &text, m->textsz, 0))
680: return -1;
681: memcpy(text, (void *)src, m->textsz);
682: if (do_attribute(map, text, VMA_READ))
683: return -1;
684:
685: /*
686: * Create data & BSS segment
687: */
688: if (m->datasz + m->bsssz != 0) {
689: if (do_allocate(map, &data, m->datasz + m->bsssz, 0))
690: return -1;
691: src = src + (m->data - m->text);
692: memcpy(data, (void *)src, m->datasz);
693: }
694: /*
695: * Create stack
696: */
697: *stack = (void *)(USER_MAX - USTACK_SIZE);
698: if (do_allocate(map, stack, USTACK_SIZE, 0))
699: return -1;
700:
701: /* Free original pages */
702: page_free((void *)m->phys, m->size);
703: return 0;
704: }
705:
706: /*
707: * Translate virtual address of current task to physical address.
708: * Returns physical address on success, or NULL if no mapped memory.
709: */
710: void *
711: vm_translate(void *addr, size_t size)
712: {
713:
714: return mmu_extract(cur_task()->map->pgd, addr, size);
715: }
716:
717: /*
718: * Check if specified access can be allowed.
719: * return 0 on success, or EFAULT on failure.
720: */
721: int
722: vm_access(void *addr, size_t size, int type)
723: {
724: u_long pg, end;
725: int err;
726: char tmp;
727:
728: ASSERT(size);
729: pg = PAGE_TRUNC(addr);
730: end = PAGE_TRUNC((u_long)addr + size - 1);
731: do {
732: if ((err = umem_copyin((void *)pg, &tmp, 1)))
733: return EFAULT;
734: if (type == VMA_WRITE) {
735: if ((err = umem_copyout(&tmp, (void *)pg, 1)))
736: return EFAULT;
737: }
738: pg += PAGE_SIZE;
739: } while (pg <= end);
740: return 0;
741: }
742:
743: /*
744: * Initialize region
745: */
746: static void
747: region_init(struct region *reg)
748: {
749:
750: reg->next = reg->prev = reg;
751: reg->sh_next = reg->sh_prev = reg;
752: reg->addr = PAGE_SIZE;
753: reg->phys = 0;
754: reg->size = USER_MAX - PAGE_SIZE;
755: reg->flags = REG_FREE;
756: }
757:
758: /*
759: * Create new free region after the specified region.
760: * Returns region on success, or NULL on failure.
761: */
762: static struct region *
763: region_create(struct region *prev, u_long addr, size_t size)
764: {
765: struct region *reg;
766:
767: if ((reg = kmem_alloc(sizeof(*reg))) == NULL)
768: return NULL;
769:
770: reg->addr = addr;
771: reg->size = size;
772: reg->phys = 0;
773: reg->flags = REG_FREE;
774: reg->sh_next = reg->sh_prev = reg;
775:
776: reg->next = prev->next;
777: reg->prev = prev;
778: prev->next->prev = reg;
779: prev->next = reg;
780: return reg;
781: }
782:
783: /*
784: * Delete specified region
785: */
786: static void
787: region_delete(struct region *head, struct region *reg)
788: {
789:
790: /* If it is shared region, unlink from shared list */
791: if (reg->flags & REG_SHARED) {
792: reg->sh_prev->sh_next = reg->sh_next;
793: reg->sh_next->sh_prev = reg->sh_prev;
794: if (reg->sh_prev == reg->sh_next)
795: reg->sh_prev->flags &= ~REG_SHARED;
796: }
797: if (head != reg)
798: kmem_free(reg);
799: }
800:
801: /*
802: * Find the region at the specified area.
803: */
804: static struct region *
805: region_find(struct region *head, u_long addr, size_t size)
806: {
807: struct region *reg;
808:
809: reg = head;
810: do {
811: if (reg->addr <= addr &&
812: reg->addr + reg->size >= addr + size) {
813: return reg;
814: }
815: reg = reg->next;
816: } while (reg != head);
817: return NULL;
818: }
819:
820: /*
821: * Allocate free region for specified size.
822: */
823: static struct region *
824: region_alloc(struct region *head, size_t size)
825: {
826: struct region *reg;
827:
828: reg = head;
829: do {
830: if ((reg->flags & REG_FREE) && reg->size >= size) {
831: if (reg->size != size) {
832: /* Split this region and return its head */
833: if (region_create(reg, reg->addr + size,
834: reg->size - size) == NULL)
835: return NULL;
836: }
837: reg->size = size;
838: return reg;
839: }
840: reg = reg->next;
841: } while (reg != head);
842: return NULL;
843: }
844:
845: /*
846: * Delete specified free region
847: */
848: static void
849: region_free(struct region *head, struct region *reg)
850: {
851: struct region *prev, *next;
852:
853: ASSERT(reg->flags != REG_FREE);
854:
855: reg->flags = REG_FREE;
856:
857: /* If it is shared region, unlink from shared list */
858: if (reg->flags & REG_SHARED) {
859: reg->sh_prev->sh_next = reg->sh_next;
860: reg->sh_next->sh_prev = reg->sh_prev;
861: if (reg->sh_prev == reg->sh_next)
862: reg->sh_prev->flags &= ~REG_SHARED;
863: }
864:
865: /* If next region is free, merge with it. */
866: next = reg->next;
867: if (next != head && (next->flags & REG_FREE)) {
868: reg->next = next->next;
869: next->next->prev = reg;
870: reg->size += next->size;
871: kmem_free(next);
872: }
873:
874: /* If previous region is free, merge with it. */
875: prev = reg->prev;
876: if (reg != head && (prev->flags & REG_FREE)) {
877: prev->next = reg->next;
878: reg->next->prev = prev;
879: prev->size += reg->size;
880: kmem_free(reg);
881: }
882: }
883:
884: /*
885: * Sprit region for the specified address/size.
886: */
887: static struct region *
888: region_split(struct region *head, struct region *reg, u_long addr,
889: size_t size)
890: {
891: struct region *prev, *next;
892: size_t diff;
893:
894: /*
895: * Check previous region to split region.
896: */
897: prev = NULL;
898: if (reg->addr != addr) {
899: prev = reg;
900: diff = (size_t)(addr - reg->addr);
901: reg = region_create(prev, addr, prev->size - diff);
902: if (reg == NULL)
903: return NULL;
904: prev->size = diff;
905: }
906:
907: /*
908: * Check next region to split region.
909: */
910: if (reg->size != size) {
911: next = region_create(reg, reg->addr + size,
912: reg->size - size);
913: if (next == NULL) {
914: if (prev) {
915: /* Undo previous region_create() */
916: region_free(head, reg);
917: }
918: return NULL;
919: }
920: reg->size = size;
921: }
922: reg->flags = 0;
923: return reg;
924: }
925:
926: #if defined(DEBUG) && defined(CONFIG_KDUMP)
927: void
928: vm_dump_one(task_t task)
929: {
930: vm_map_t map;
931: struct region *reg;
932: char flags[6];
933: u_long total = 0;
934:
935: printk("task=%x map=%x name=%s\n", task, task->map,
936: task->name ? task->name : "no name");
937: printk(" region virtual physical size flags\n");
938: printk(" -------- -------- -------- -------- -----\n");
939:
940: map = task->map;
941: reg = &map->head;
942: do {
943: if (reg->flags != REG_FREE) {
944: strlcpy(flags, "-----", 6);
945: if (reg->flags & REG_READ)
946: flags[0] = 'R';
947: if (reg->flags & REG_WRITE)
948: flags[1] = 'W';
949: if (reg->flags & REG_EXEC)
950: flags[2] = 'E';
951: if (reg->flags & REG_SHARED)
952: flags[3] = 'S';
953: if (reg->flags & REG_MAPPED)
954: flags[4] = 'M';
955:
956: printk(" %08x %08x %08x %8x %s\n", reg,
957: reg->addr, reg->phys, reg->size, flags);
958: total += reg->size;
959: }
960: reg = reg->next;
961: } while (reg != &map->head); /* Process all regions */
962: printk(" *total=%dK bytes\n\n", total / 1024);
963: }
964:
965: void
966: vm_dump(void)
967: {
968: list_t n;
969: task_t task;
970:
971: printk("\nVM dump:\n");
972: n = list_first(&kern_task.link);
973: while (n != &kern_task.link) {
974: task = list_entry(n, struct task, link);
975: vm_dump_one(task);
976: n = list_next(n);
977: }
978: }
979: #endif
980:
981: #ifdef CONFIG_VMTRACE
982: static void
983: vm_error(const char *func, int err)
984: {
985:
986: printk("VM error: %s returns err=%x\n", func, err);
987: }
988: #endif
989:
990: void
991: vm_init(void)
992: {
993: pgd_t pgd;
994:
995: /*
996: * Setup vm mapping for kernel task.
997: */
998: pgd = mmu_newmap();
999: ASSERT(pgd != NULL);
1000: kern_map.pgd = pgd;
1001: mmu_switch(pgd);
1002: region_init(&kern_map.head);
1003: kern_task.map = &kern_map;
1004: }
CVSweb