Annotation of prex-old/sys/arch/arm/arm/locore.S, Revision 1.1.1.1
1.1 nbrk 1: /*-
2: * Copyright (c) 2005-2007, Kohsuke Ohtani
3: * All rights reserved.
4: *
5: * Redistribution and use in source and binary forms, with or without
6: * modification, are permitted provided that the following conditions
7: * are met:
8: * 1. Redistributions of source code must retain the above copyright
9: * notice, this list of conditions and the following disclaimer.
10: * 2. Redistributions in binary form must reproduce the above copyright
11: * notice, this list of conditions and the following disclaimer in the
12: * documentation and/or other materials provided with the distribution.
13: * 3. Neither the name of the author nor the names of any co-contributors
14: * may be used to endorse or promote products derived from this software
15: * without specific prior written permission.
16: *
17: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27: * SUCH DAMAGE.
28: */
29:
30: /*
31: * locore.S - low level platform support
32: */
33:
34: #include <conf/config.h>
35: #include <platform.h>
36: #include <cpu.h>
37:
38: #define ENTRY(x) .global x; .align; x##:
39:
40: .section ".text","ax"
41: .code 32
42: /*
43: * Kernel start point
44: */
45: ENTRY(kernel_start)
46:
47: /*
48: * Exception vector
49: *
50: * This table will be copied to an appropriate location.
51: * (the location is platform specific.)
52: */
53: ldr pc, reset_target /* 0x00 mode: svc */
54: ldr pc, undefined_target /* 0x04 mode: ? */
55: ldr pc, swi_target /* 0x08 mode: svc */
56: ldr pc, prefetch_target /* 0x0c mode: abort */
57: ldr pc, abort_target /* 0x10 mode: abort */
58: nop /* 0x14 reserved */
59: ldr pc, irq_target /* 0x18 mode: irq */
60: ldr pc, fiq_target /* 0x1c mode: fiq */
61:
62: reset_target: .word reset_entry
63: undefined_target: .word undefined_entry
64: swi_target: .word syscall_entry
65: prefetch_target: .word prefetch_entry
66: abort_target: .word abort_entry
67: irq_target: .word interrupt_entry
68: fiq_target: .word fiq_entry
69:
70: .global boot_info
71: boot_info: .word BOOT_INFO
72: boot_stack_top: .word BOOT_STACK + 0x800
73: int_stack_top: .word INT_STACK + 0x500
74: irq_mode_stack: .word INT_STACK
75: sys_mode_stack: .word SYS_STACK
76: irq_nest_count: .word irq_nesting
77:
78: reset_entry:
79: /*
80: * Clear kernel BSS
81: */
82: ldr r1, =__bss
83: ldr r2, =__end
84: mov r0, #0
85: cmp r1, r2
86: beq 2f
87: 1: str r0, [r1], #4
88: cmp r1, r2
89: bls 1b
90: 2:
91: /*
92: * Setup stack pointer for each processor mode
93: */
94: mov r0, #(PSR_IRQ_MODE|PSR_FIQ_DIS|PSR_IRQ_DIS)
95: msr cpsr_c, r0
96: ldr sp, irq_mode_stack /* Set IRQ mode stack */
97:
98: mov r0, #(PSR_SYS_MODE|PSR_FIQ_DIS|PSR_IRQ_DIS)
99: msr cpsr, r0
100: ldr sp, sys_mode_stack /* Set SYS mode stack */
101:
102: mov r0, #(PSR_SVC_MODE|PSR_FIQ_DIS|PSR_IRQ_DIS)
103: msr cpsr, r0
104: ldr sp, boot_stack_top /* Set SVC mode stack */
105:
106: /*
107: * Jump to kernel main routine
108: */
109: b kernel_main
110:
111: /*
112: * Interrupt entry point
113: */
114: /*
115: * Memo: GBA BIOS interrupt handler.
116: *
117: * stmfd sp!, {r0-r3,r12,lr}
118: * mov r0, #0x4000000
119: * adr lr, IntRet
120: * ldr pc, [r0,#-4] @ pc = [0x3007ffc]
121: *IntRet:
122: * ldmfd sp!, {r0-r3,r12,lr}
123: * subs pc, lr, #4
124: */
125: ENTRY(interrupt_entry)
126: #ifdef __gba__
127: ldmfd sp!, {r0-r3,r12,lr} /* Discard GBA BIOS's stack */
128: #endif
129: stmfd sp, {r0-r4} /* Save work registers */
130: sub r2, lr, #4 /* r2: pc */
131: mrs r3, spsr /* r3: cpsr */
132: sub r4, sp, #(4*5)
133:
134: mrs r0, cpsr /* Set processor to SVC mode */
135: bic r0, r0, #PSR_MODE
136: orr r0, r0, #PSR_SVC_MODE
137: msr cpsr_c, r0
138:
139: mov r0, sp
140: mov r1, lr
141: stmfd sp!, {r0-r3} /* Push svc_sp, svc_lr, pc, cpsr */
142: ldmfd r4, {r0-r4} /* Restore work registers */
143: sub sp, sp, #(4*15)
144: stmia sp, {r0-r14}^ /* Push r0-r14 */
145: nop /* Instruction gap for stm^ */
146:
147: ldr r4, irq_nest_count /* Increment IRQ nesting level */
148: ldr r5, [r4] /* r5: Previous nesting level */
149: add r0, r5, #1
150: str r0, [r4]
151:
152: mov r7, sp /* Save stack */
153: ldr r3, int_stack_top /* Adjust stack for IRQ */
154: cmp r5, #0 /* Outermost interrupt? */
155: moveq sp, r3 /* If outermost, switch stack */
156: bleq sched_lock /* If outermost, lock scheduler */
157: bl interrupt_handler /* Call main interrupt handler */
158: mov sp, r7 /* Restore stack */
159:
160: str r5, [r4] /* Restore IRQ nesting level */
161: cmp r5, #0 /* Outermost interrupt? */
162: bne nested_irq
163:
164: bl sched_unlock /* Try to preempt */
165: ldr r0, [sp, #(4*18)] /* Get previous mode */
166: and r0, r0, #PSR_MODE
167: cmp r0, #PSR_APP_MODE /* Return to application mode? */
168: bleq exception_deliver /* If so, check exception */
169:
170: nested_irq:
171: mov r0, sp
172: ldr sp, [r0, #(4*15)] /* Restore svc_sp */
173: ldr lr, [r0, #(4*16)] /* Restore svc_lr */
174:
175: mrs r1, cpsr /* Set processor to IRQ mode */
176: bic r1, r1, #PSR_MODE
177: orr r1, r1, #PSR_IRQ_MODE
178: msr cpsr_c, r1
179:
180: ldr lr, [r0, #(4*17)] /* Restore lr */
181: ldr r1, [r0, #(4*18)] /* Restore spsr */
182: msr spsr_all, r1
183: ldmfd r0, {r0-r14}^ /* Restore user mode registers */
184: nop /* Instruction gap for ldm^ */
185: movs pc, lr /* Exit, with restoring cpsr */
186:
187: /*
188: * System call entry
189: */
190: .global syscall_ret
191: ENTRY(syscall_entry)
192: #ifdef __gba__
193: mov r5, lr /* Syscall stub already saved r5 */
194: mrs r12, cpsr /* Set processor to SVC mode */
195: bic r12, r12, #PSR_MODE
196: orr r12, r12, #PSR_SVC_MODE
197: msr cpsr_c, r12
198: mov lr, r5
199: #endif
200: sub sp, sp, #(4*19) /* Adjust stack */
201: stmia sp, {r0-r14}^ /* Push r0-r14 */
202: nop /* Instruction gap for stm^ */
203: add r5, sp, #(4*19)
204: str r5, [sp, #(4*15)] /* Push svc_sp */
205: str lr, [sp, #(4*17)] /* Push pc */
206: mrs r5, spsr /* Push cpsr */
207: str r5, [sp, #(4*18)]
208: #ifndef __gba__
209: ldr r4, [lr, #-4] /* Get SWI number */
210: bic r4, r4, #0xff000000
211: #endif
212: ldr r5, =nr_syscalls /* Check SWI number */
213: ldr r5, [r5]
214: cmp r4, r5
215: bge bad_syscall
216:
217: ldr r5,=syscall_table
218: ldr r4, [r5, r4, lsl #2]
219: mov lr, pc
220: mov pc, r4 /* Dispatch functions */
221: str r0, [sp] /* Set return value to r0 */
222: bl exception_deliver /* Check exception */
223: syscall_ret:
224: mov r5, sp
225: ldr r1, [r5, #(4*18)] /* Restore cpsr */
226: msr spsr_all, r1
227: ldr lr, [r5, #(4*17)] /* Restore pc (lr) */
228: ldr sp, [r5, #(4*15)] /* Restore svc_sp */
229: ldmfd r5, {r0-r14}^ /* Restore user mode registers */
230: nop /* Instruction gap for ldm^ */
231: movs pc, lr /* Exit, with restoring cpsr */
232: bad_syscall:
233: mov r0, #22 /* Set EINVAL to r0 */
234: str r0, [sp, #(4*0)]
235: b syscall_ret
236:
237:
238: /*
239: * TODO: Need to handle following exceptions.
240: */
241: ENTRY(undefined_entry)
242: ENTRY(prefetch_entry)
243: ENTRY(abort_entry)
244: ENTRY(fiq_entry)
245: b fiq_entry /* hang... */
246:
247: /*
248: * Switch register context.
249: * r0 = previous kern_regs, r1 = next kern_regs
250: * Interrupts must be disabled by caller.
251: *
252: * syntax - void cpu_switch(kern_regs *prev, kern_regs *next)
253: *
254: * Note: GCC uses r0-r3,r12 as scratch registers
255: */
256: ENTRY(cpu_switch)
257: stmia r0, {r4-r11, sp, lr} /* Save previous register context */
258: ldmia r1, {r4-r11, sp, pc} /* Restore next register context */
259:
260: /*
261: * Entry point for kernel thread
262: */
263: ENTRY(kernel_thread_entry)
264: mov r0, r5 /* Set argument */
265: mov pc, r4 /* Jump to kernel thread */
266:
267:
268: /*
269: * Interrupt nest counter.
270: *
271: * This counter is incremented in the entry of interrupt handler
272: * to switch the interrupt stack. Since all interrupt handlers
273: * share same one interrupt stack, each handler must pay attention
274: * to the stack overflow.
275: */
276: .section ".bss","a"
277: irq_nesting:
278: .long 0
279: .end
CVSweb