Annotation of prex/sys/mem/page.c, Revision 1.1.1.1
1.1 nbrk 1: /*-
2: * Copyright (c) 2005-2006, Kohsuke Ohtani
3: * All rights reserved.
4: *
5: * Redistribution and use in source and binary forms, with or without
6: * modification, are permitted provided that the following conditions
7: * are met:
8: * 1. Redistributions of source code must retain the above copyright
9: * notice, this list of conditions and the following disclaimer.
10: * 2. Redistributions in binary form must reproduce the above copyright
11: * notice, this list of conditions and the following disclaimer in the
12: * documentation and/or other materials provided with the distribution.
13: * 3. Neither the name of the author nor the names of any co-contributors
14: * may be used to endorse or promote products derived from this software
15: * without specific prior written permission.
16: *
17: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27: * SUCH DAMAGE.
28: */
29:
30: /*
31: * page.c - physical page allocator
32: */
33:
34: /*
35: * This is a simple list-based page allocator.
36: *
37: * When the remaining page is exhausted, what should we do ?
38: * If the system can stop with panic() here, the error check of many
39: * portions in kernel is not necessary, and kernel code can become
40: * more simple. But, in general, even if a page is exhausted,
41: * a kernel can not be stopped but it should return an error and
42: * continue processing.
43: * If the memory becomes short during boot time, kernel and drivers
44: * can use panic() in that case.
45: */
46:
47: #include <kernel.h>
48: #include <page.h>
49: #include <sched.h>
50:
51: /*
52: * page_block is put on the head of the first page of
53: * each free block.
54: */
55: struct page_block {
56: struct page_block *next;
57: struct page_block *prev;
58: size_t size; /* number of bytes of this block */
59: };
60:
61: static struct page_block page_head; /* first free block */
62:
63: static size_t total_bytes;
64: static size_t used_bytes;
65:
66: /*
67: * page_alloc - allocate continuous pages of the specified size.
68: *
69: * This routine returns the physical address of a new free page
70: * block, or returns NULL on failure. The requested size is
71: * automatically round up to the page boundary. The allocated
72: * memory is _not_ filled with 0.
73: */
74: void *
75: page_alloc(size_t size)
76: {
77: struct page_block *blk, *tmp;
78:
79: ASSERT(size != 0);
80:
81: sched_lock();
82:
83: /*
84: * Find the free block that has enough size.
85: */
86: size = (size_t)PAGE_ALIGN(size);
87: blk = &page_head;
88: do {
89: blk = blk->next;
90: if (blk == &page_head) {
91: sched_unlock();
92: DPRINTF(("page_alloc: out of memory\n"));
93: return NULL; /* Not found. */
94: }
95: } while (blk->size < size);
96:
97: /*
98: * If found block size is exactly same with requested,
99: * just remove it from a free list. Otherwise, the
100: * found block is divided into two and first half is
101: * used for allocation.
102: */
103: if (blk->size == size) {
104: blk->prev->next = blk->next;
105: blk->next->prev = blk->prev;
106: } else {
107: tmp = (struct page_block *)((char *)blk + size);
108: tmp->size = blk->size - size;
109: tmp->prev = blk->prev;
110: tmp->next = blk->next;
111: blk->prev->next = tmp;
112: blk->next->prev = tmp;
113: }
114: used_bytes += size;
115: sched_unlock();
116: return virt_to_phys(blk);
117: }
118:
119: /*
120: * Free page block.
121: *
122: * This allocator does not maintain the size of allocated page
123: * block. The caller must provide the size information of the
124: * block.
125: */
126: void
127: page_free(void *addr, size_t size)
128: {
129: struct page_block *blk, *prev;
130:
131: ASSERT(addr != NULL);
132: ASSERT(size != 0);
133:
134: sched_lock();
135:
136: size = (size_t)PAGE_ALIGN(size);
137: blk = (struct page_block *)phys_to_virt(addr);
138:
139: /*
140: * Find the target position in list.
141: */
142: for (prev = &page_head; prev->next < blk; prev = prev->next) {
143: if (prev->next == &page_head)
144: break;
145: }
146:
147: #ifdef DEBUG
148: if (prev != &page_head)
149: ASSERT((char *)prev + prev->size <= (char *)blk);
150: if (prev->next != &page_head)
151: ASSERT((char *)blk + size <= (char *)prev->next);
152: #endif /* DEBUG */
153:
154: /*
155: * Insert new block into list.
156: */
157: blk->size = size;
158: blk->prev = prev;
159: blk->next = prev->next;
160: prev->next->prev = blk;
161: prev->next = blk;
162:
163: /*
164: * If the adjoining block is free, it combines and
165: * is made on block.
166: */
167: if (blk->next != &page_head &&
168: ((char *)blk + blk->size) == (char *)blk->next) {
169: blk->size += blk->next->size;
170: blk->next = blk->next->next;
171: blk->next->prev = blk;
172: }
173: if (blk->prev != &page_head &&
174: (char *)blk->prev + blk->prev->size == (char *)blk) {
175: blk->prev->size += blk->size;
176: blk->prev->next = blk->next;
177: blk->next->prev = blk->prev;
178: }
179: used_bytes -= size;
180: sched_unlock();
181: }
182:
183: /*
184: * The function to reserve pages in specific address.
185: * Return 0 on success, or -1 on failure
186: */
187: int
188: page_reserve(void *addr, size_t size)
189: {
190: struct page_block *blk, *tmp;
191: char *end;
192:
193: if (size == 0)
194: return 0;
195:
196: addr = phys_to_virt(addr);
197: end = (char *)PAGE_ALIGN((char *)addr + size);
198: addr = (void *)PAGE_TRUNC(addr);
199: size = (size_t)(end - (char *)addr);
200:
201: /*
202: * Find the block which includes specified block.
203: */
204: blk = page_head.next;
205: for (;;) {
206: if (blk == &page_head)
207: panic("failed to reserve pages");
208: if ((char *)blk <= (char *)addr
209: && end <= (char *)blk + blk->size)
210: break;
211: blk = blk->next;
212: }
213: if ((char *)blk == (char *)addr && blk->size == size) {
214: /*
215: * Unlink the block from free list.
216: */
217: blk->prev->next = blk->next;
218: blk->next->prev = blk->prev;
219: } else {
220: /*
221: * Split this block.
222: */
223: if ((char *)blk + blk->size != end) {
224: tmp = (struct page_block *)end;
225: tmp->size = (size_t)((char *)blk + blk->size - end);
226: tmp->next = blk->next;
227: tmp->prev = blk;
228:
229: blk->size -= tmp->size;
230: blk->next->prev = tmp;
231: blk->next = tmp;
232: }
233: if ((char *)blk == (char *)addr) {
234: blk->prev->next = blk->next;
235: blk->next->prev = blk->prev;
236: } else
237: blk->size = (size_t)((char *)addr - (char *)blk);
238: }
239: used_bytes += size;
240: return 0;
241: }
242:
243: void
244: page_info(size_t *total, size_t *free)
245: {
246:
247: *total = total_bytes;
248: *free = total_bytes - used_bytes;
249: }
250:
251: /*
252: * Initialize page allocator.
253: * page_init() must be called prior to other memory manager's
254: * initializations.
255: */
256: void
257: page_init(void)
258: {
259: struct page_block *blk;
260: struct mem_map *mem;
261: int i;
262:
263: DPRINTF(("Memory: base=%x size=%dK\n", boot_info->main_mem.start,
264: boot_info->main_mem.size / 1024));
265:
266: /*
267: * First, create one block containing all memory pages.
268: */
269: blk = (struct page_block *)boot_info->main_mem.start;
270: blk = phys_to_virt(blk);
271: blk->size = boot_info->main_mem.size;
272: if (blk->size == 0)
273: panic("page_init: no pages");
274: blk->prev = blk->next = &page_head;
275: page_head.next = page_head.prev = blk;
276:
277: /*
278: * Then, the system reserved pages are marked as a used block.
279: */
280: for (i = 0; i < NRESMEM; i++) {
281: mem = &boot_info->reserved[i];
282: if (mem->size != 0)
283: page_reserve((void *)mem->start, mem->size);
284: }
285: total_bytes = boot_info->main_mem.size - used_bytes;
286: used_bytes = 0;
287:
288: /*
289: * Reserve pages for all boot modules.
290: */
291: mem = &boot_info->modules;
292: page_reserve((void *)mem->start, mem->size);
293: }
CVSweb