Annotation of prex-old/sys/mem/page.c, Revision 1.2
1.1 nbrk 1: /*-
2: * Copyright (c) 2005-2006, Kohsuke Ohtani
3: * All rights reserved.
4: *
5: * Redistribution and use in source and binary forms, with or without
6: * modification, are permitted provided that the following conditions
7: * are met:
8: * 1. Redistributions of source code must retain the above copyright
9: * notice, this list of conditions and the following disclaimer.
10: * 2. Redistributions in binary form must reproduce the above copyright
11: * notice, this list of conditions and the following disclaimer in the
12: * documentation and/or other materials provided with the distribution.
13: * 3. Neither the name of the author nor the names of any co-contributors
14: * may be used to endorse or promote products derived from this software
15: * without specific prior written permission.
16: *
17: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27: * SUCH DAMAGE.
28: */
29:
30: /*
31: * page.c - physical page allocator
32: */
33:
34: /*
35: * This is a simple list-based page allocator.
36: *
37: * When the remaining page is exhausted, what should we do ?
38: * If the system can stop with panic() here, the error check of many
39: * portions in kernel is not necessary, and kernel code can become
40: * more simple. But, in general, even if a page is exhausted,
41: * a kernel can not be stopped but it should return an error and
42: * continue processing.
43: * If the memory becomes short during boot time, kernel and drivers
44: * can use panic() in that case.
45: */
46:
47: #include <kernel.h>
48: #include <page.h>
49: #include <sched.h>
50:
51: /*
52: * page_block is put on the head of the first page of
53: * each free block.
54: */
55: struct page_block {
56: struct page_block *next;
57: struct page_block *prev;
58: size_t size; /* number of bytes of this block */
59: };
60:
61: static struct page_block page_head; /* first free block */
62:
63: static size_t total_bytes;
64: static size_t used_bytes;
65:
66: /*
67: * page_alloc - allocate continuous pages of the specified size.
68: * @size: number of bytes to allocate
69: *
70: * This routine returns the physical address of a new free page block,
71: * or returns NULL on failure. The requested size is automatically
72: * round up to the page boundary.
73: * The allocated memory is _not_ filled with 0.
74: */
75: void *
76: page_alloc(size_t size)
77: {
78: struct page_block *blk, *tmp;
79:
80: ASSERT(size != 0);
81:
82: sched_lock();
83:
84: /*
85: * Find the free block that has enough size.
86: */
87: size = (size_t)PAGE_ALIGN(size);
88: blk = &page_head;
89: do {
90: blk = blk->next;
91: if (blk == &page_head) {
92: sched_unlock();
93: printk("page_alloc: out of memory\n");
94: return NULL; /* Not found. */
95: }
96: } while (blk->size < size);
97:
98: /*
99: * If found block size is exactly same with requested,
100: * just remove it from a free list. Otherwise, the
101: * found block is divided into two and first half is
102: * used for allocation.
103: */
104: if (blk->size == size) {
105: blk->prev->next = blk->next;
106: blk->next->prev = blk->prev;
107: } else {
108: tmp = (struct page_block *)((u_long)blk + size);
109: tmp->size = blk->size - size;
110: tmp->prev = blk->prev;
111: tmp->next = blk->next;
112: blk->prev->next = tmp;
113: blk->next->prev = tmp;
114: }
115: used_bytes += size;
116: sched_unlock();
117:
118: return virt_to_phys(blk);
119: }
120:
121: /*
122: * Free page block.
123: *
124: * This allocator does not maintain the size of allocated page block.
125: * The caller must provide the size information of the block.
126: */
127: void
128: page_free(void *addr, size_t size)
129: {
130: struct page_block *blk, *prev;
131:
132: ASSERT(addr != NULL);
133: ASSERT(size != 0);
134:
135: sched_lock();
136:
137: size = (size_t)PAGE_ALIGN(size);
138: blk = phys_to_virt(addr);
139:
140: /*
141: * Find the target position in list.
142: */
143: for (prev = &page_head; prev->next < blk; prev = prev->next) {
144: if (prev->next == &page_head)
145: break;
146: }
147: #ifdef DEBUG
148: if (prev != &page_head)
149: ASSERT((u_long)prev + prev->size <= (u_long)blk);
150: if (prev->next != &page_head)
151: ASSERT((u_long)blk + size <= (u_long)prev->next);
152: #endif /* DEBUG */
153:
154: /*
155: * Insert new block into list.
156: */
157: blk->size = size;
158: blk->prev = prev;
159: blk->next = prev->next;
160: prev->next->prev = blk;
161: prev->next = blk;
162:
163: /*
164: * If the adjoining block is free, it combines and
165: * is made on block.
166: */
167: if (blk->next != &page_head &&
168: ((u_long)blk + blk->size) == (u_long)blk->next) {
169: blk->size += blk->next->size;
170: blk->next = blk->next->next;
171: blk->next->prev = blk;
172: }
173: if (blk->prev != &page_head &&
174: (u_long)blk->prev + blk->prev->size == (u_long)blk) {
175: blk->prev->size += blk->size;
176: blk->prev->next = blk->next;
177: blk->next->prev = blk->prev;
178: }
179: used_bytes -= size;
180: sched_unlock();
181: }
182:
183: /*
184: * The function to reserve pages in specific address.
185: * Return 0 on success, or -1 on failure
186: */
187: int
188: page_reserve(void *addr, size_t size)
189: {
190: struct page_block *blk, *tmp;
191: u_long end;
192:
193: if (size == 0)
194: return 0;
195:
196: addr = phys_to_virt(addr);
197: end = PAGE_ALIGN((u_long)addr + size);
198: addr = (void *)PAGE_TRUNC(addr);
199: size = (size_t)(end - (u_long)addr);
200:
201: /*
202: * Find the block which includes specified block.
203: */
204: blk = page_head.next;
205: for (;;) {
206: if (blk == &page_head)
1.2 ! nbrk 207: #if 0
1.1 nbrk 208: panic("page_reserve");
1.2 ! nbrk 209: #endif
! 210: printk("page_reserve: warning, blk == &page_head\n");
! 211: break;
1.1 nbrk 212: if ((u_long)blk <= (u_long)addr
213: && end <= (u_long)blk + blk->size)
214: break;
215: blk = blk->next;
216: }
217: if ((u_long)blk == (u_long)addr && blk->size == size) {
218: /*
219: * Unlink the block from free list.
220: */
221: blk->prev->next = blk->next;
222: blk->next->prev = blk->prev;
223: } else {
224: /*
225: * Split this block.
226: */
227: if ((u_long)blk + blk->size != end) {
228: tmp = (struct page_block *)end;
229: tmp->size = (size_t)((u_long)blk + blk->size - end);
230: tmp->next = blk->next;
231: tmp->prev = blk;
232:
233: blk->size -= tmp->size;
234: blk->next->prev = tmp;
235: blk->next = tmp;
236: }
237: if ((u_long)blk == (u_long)addr) {
238: blk->prev->next = blk->next;
239: blk->next->prev = blk->prev;
240: } else
241: blk->size = (size_t)((u_long)addr - (u_long)blk);
242: }
243: used_bytes += size;
244: return 0;
245: }
246:
247: void
248: page_info(size_t *total, size_t *free)
249: {
250:
251: *total = total_bytes;
252: *free = total_bytes - used_bytes;
253: }
254:
255: #if defined(DEBUG) && defined(CONFIG_KDUMP)
256: void
257: page_dump(void)
258: {
259: struct page_block *blk;
260: void *addr;
261: struct mem_map *mem;
262: struct module *img;
263: int i;
264:
265: printk("Page dump:\n");
266: printk(" free pages:\n");
267: printk(" start end size\n");
268: printk(" -------- -------- --------\n");
269:
270: blk = page_head.next;
271: do {
272: addr = virt_to_phys(blk);
273: printk(" %08x - %08x %8x\n", addr, (u_long)addr + blk->size,
274: blk->size);
275: blk = blk->next;
276: } while (blk != &page_head);
277: printk(" used=%dK free=%dK total=%dK\n\n",
278: used_bytes / 1024, (total_bytes - used_bytes) / 1024,
279: total_bytes / 1024);
280:
281: img = (struct module *)&boot_info->kernel;
282: printk(" kernel: %08x - %08x (%dK)\n",
283: img->phys, img->phys + img->size, img->size / 1024);
284:
285: img = (struct module *)&boot_info->driver;
286: printk(" driver: %08x - %08x (%dK)\n",
287: img->phys, img->phys + img->size, img->size / 1024);
288:
289: for (i = 0; i < NRESMEM; i++) {
290: mem = &boot_info->reserved[i];
291: if (mem->size != 0) {
292: printk(" reserved: %08x - %08x (%dK)\n",
293: mem->start, mem->start + mem->size,
294: mem->size / 1024);
295: }
296: }
297: #ifdef CONFIG_RAMDISK
298: mem = (struct mem_map *)&boot_info->ram_disk;
299: printk(" RAM disk: %08x - %08x (%dK)\n",
300: mem->start, mem->start + mem->size, mem->size / 1024);
301: #endif
302: }
303: #endif
304:
305: /*
306: * Initialize page allocator.
307: * page_init() must be called prior to other memory manager's
308: * initializations.
309: */
310: void
311: page_init(void)
312: {
313: struct page_block *blk;
314: struct mem_map *mem;
315: int i;
316:
317: printk("Memory: base=%x size=%dK\n", boot_info->main_mem.start,
318: boot_info->main_mem.size / 1024);
319:
320: /*
321: * First, create one block containing all memory pages.
322: */
323: blk = (struct page_block *)boot_info->main_mem.start;
324: blk = phys_to_virt(blk);
325: blk->size = boot_info->main_mem.size;
326: if (blk->size == 0)
327: panic("page_init: no pages");
328: blk->prev = blk->next = &page_head;
329: page_head.next = page_head.prev = blk;
330:
331: /*
332: * Then, the system reserved pages are marked as a used block.
333: */
334: for (i = 0; i < NRESMEM; i++) {
335: mem = &boot_info->reserved[i];
336: if (mem->size != 0)
337: page_reserve((void *)mem->start, mem->size);
338: }
339: total_bytes = boot_info->main_mem.size - used_bytes;
340: used_bytes = 0;
341:
342: /*
343: * Reserve pages for all boot modules.
344: */
345: mem = &boot_info->modules;
346: page_reserve((void *)mem->start, mem->size);
347: }
CVSweb