Annotation of sys/arch/arm/include/cpufunc.h, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: cpufunc.h,v 1.3 2006/07/12 17:29:55 miod Exp $ */
2: /* $NetBSD: cpufunc.h,v 1.29 2003/09/06 09:08:35 rearnsha Exp $ */
3:
4: /*
5: * Copyright (c) 1997 Mark Brinicombe.
6: * Copyright (c) 1997 Causality Limited
7: * All rights reserved.
8: *
9: * Redistribution and use in source and binary forms, with or without
10: * modification, are permitted provided that the following conditions
11: * are met:
12: * 1. Redistributions of source code must retain the above copyright
13: * notice, this list of conditions and the following disclaimer.
14: * 2. Redistributions in binary form must reproduce the above copyright
15: * notice, this list of conditions and the following disclaimer in the
16: * documentation and/or other materials provided with the distribution.
17: * 3. All advertising materials mentioning features or use of this software
18: * must display the following acknowledgement:
19: * This product includes software developed by Causality Limited.
20: * 4. The name of Causality Limited may not be used to endorse or promote
21: * products derived from this software without specific prior written
22: * permission.
23: *
24: * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25: * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26: * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27: * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28: * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29: * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30: * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34: * SUCH DAMAGE.
35: *
36: * RiscBSD kernel project
37: *
38: * cpufunc.h
39: *
40: * Prototypes for cpu, mmu and tlb related functions.
41: */
42:
43: #ifndef _ARM32_CPUFUNC_H_
44: #define _ARM32_CPUFUNC_H_
45:
46: #ifdef _KERNEL
47:
48: #include <sys/types.h>
49: #include <arm/cpuconf.h>
50:
51: struct cpu_functions {
52:
53: /* CPU functions */
54:
55: u_int (*cf_id) (void);
56: void (*cf_cpwait) (void);
57:
58: /* MMU functions */
59:
60: u_int (*cf_control) (u_int bic, u_int eor);
61: void (*cf_domains) (u_int domains);
62: void (*cf_setttb) (u_int ttb);
63: u_int (*cf_faultstatus) (void);
64: u_int (*cf_faultaddress) (void);
65:
66: /* TLB functions */
67:
68: void (*cf_tlb_flushID) (void);
69: void (*cf_tlb_flushID_SE) (u_int va);
70: void (*cf_tlb_flushI) (void);
71: void (*cf_tlb_flushI_SE) (u_int va);
72: void (*cf_tlb_flushD) (void);
73: void (*cf_tlb_flushD_SE) (u_int va);
74:
75: /*
76: * Cache operations:
77: *
78: * We define the following primitives:
79: *
80: * icache_sync_all Synchronize I-cache
81: * icache_sync_range Synchronize I-cache range
82: *
83: * dcache_wbinv_all Write-back and Invalidate D-cache
84: * dcache_wbinv_range Write-back and Invalidate D-cache range
85: * dcache_inv_range Invalidate D-cache range
86: * dcache_wb_range Write-back D-cache range
87: *
88: * idcache_wbinv_all Write-back and Invalidate D-cache,
89: * Invalidate I-cache
90: * idcache_wbinv_range Write-back and Invalidate D-cache,
91: * Invalidate I-cache range
92: *
93: * Note that the ARM term for "write-back" is "clean". We use
94: * the term "write-back" since it's a more common way to describe
95: * the operation.
96: *
97: * There are some rules that must be followed:
98: *
99: * I-cache Synch (all or range):
100: * The goal is to synchronize the instruction stream,
101: * so you may beed to write-back dirty D-cache blocks
102: * first. If a range is requested, and you can't
103: * synchronize just a range, you have to hit the whole
104: * thing.
105: *
106: * D-cache Write-Back and Invalidate range:
107: * If you can't WB-Inv a range, you must WB-Inv the
108: * entire D-cache.
109: *
110: * D-cache Invalidate:
111: * If you can't Inv the D-cache, you must Write-Back
112: * and Invalidate. Code that uses this operation
113: * MUST NOT assume that the D-cache will not be written
114: * back to memory.
115: *
116: * D-cache Write-Back:
117: * If you can't Write-back without doing an Inv,
118: * that's fine. Then treat this as a WB-Inv.
119: * Skipping the invalidate is merely an optimization.
120: *
121: * All operations:
122: * Valid virtual addresses must be passed to each
123: * cache operation.
124: */
125: void (*cf_icache_sync_all) (void);
126: void (*cf_icache_sync_range) (vaddr_t, vsize_t);
127:
128: void (*cf_dcache_wbinv_all) (void);
129: void (*cf_dcache_wbinv_range) (vaddr_t, vsize_t);
130: void (*cf_dcache_inv_range) (vaddr_t, vsize_t);
131: void (*cf_dcache_wb_range) (vaddr_t, vsize_t);
132:
133: void (*cf_idcache_wbinv_all) (void);
134: void (*cf_idcache_wbinv_range) (vaddr_t, vsize_t);
135:
136: /* Other functions */
137:
138: void (*cf_flush_prefetchbuf) (void);
139: void (*cf_drain_writebuf) (void);
140: void (*cf_flush_brnchtgt_C) (void);
141: void (*cf_flush_brnchtgt_E) (u_int va);
142:
143: void (*cf_sleep) (int mode);
144:
145: /* Soft functions */
146:
147: int (*cf_dataabt_fixup) (void *arg);
148: int (*cf_prefetchabt_fixup) (void *arg);
149:
150: void (*cf_context_switch) (void);
151:
152: void (*cf_setup) (char *string);
153: };
154:
155: extern struct cpu_functions cpufuncs;
156: extern u_int cputype;
157:
158: #define cpu_id() cpufuncs.cf_id()
159: #define cpu_cpwait() cpufuncs.cf_cpwait()
160:
161: #define cpu_control(c, e) cpufuncs.cf_control(c, e)
162: #define cpu_domains(d) cpufuncs.cf_domains(d)
163: #define cpu_setttb(t) cpufuncs.cf_setttb(t)
164: #define cpu_faultstatus() cpufuncs.cf_faultstatus()
165: #define cpu_faultaddress() cpufuncs.cf_faultaddress()
166:
167: #define cpu_tlb_flushID() cpufuncs.cf_tlb_flushID()
168: #define cpu_tlb_flushID_SE(e) cpufuncs.cf_tlb_flushID_SE(e)
169: #define cpu_tlb_flushI() cpufuncs.cf_tlb_flushI()
170: #define cpu_tlb_flushI_SE(e) cpufuncs.cf_tlb_flushI_SE(e)
171: #define cpu_tlb_flushD() cpufuncs.cf_tlb_flushD()
172: #define cpu_tlb_flushD_SE(e) cpufuncs.cf_tlb_flushD_SE(e)
173:
174: #define cpu_icache_sync_all() cpufuncs.cf_icache_sync_all()
175: #define cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s))
176:
177: #define cpu_dcache_wbinv_all() cpufuncs.cf_dcache_wbinv_all()
178: #define cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s))
179: #define cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s))
180: #define cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s))
181:
182: #define cpu_idcache_wbinv_all() cpufuncs.cf_idcache_wbinv_all()
183: #define cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s))
184:
185: #define cpu_flush_prefetchbuf() cpufuncs.cf_flush_prefetchbuf()
186: #define cpu_drain_writebuf() cpufuncs.cf_drain_writebuf()
187: #define cpu_flush_brnchtgt_C() cpufuncs.cf_flush_brnchtgt_C()
188: #define cpu_flush_brnchtgt_E(e) cpufuncs.cf_flush_brnchtgt_E(e)
189:
190: #define cpu_sleep(m) cpufuncs.cf_sleep(m)
191:
192: #define cpu_dataabt_fixup(a) cpufuncs.cf_dataabt_fixup(a)
193: #define cpu_prefetchabt_fixup(a) cpufuncs.cf_prefetchabt_fixup(a)
194: #define ABORT_FIXUP_OK 0 /* fixup succeeded */
195: #define ABORT_FIXUP_FAILED 1 /* fixup failed */
196: #define ABORT_FIXUP_RETURN 2 /* abort handler should return */
197:
198: #define cpu_setup(a) cpufuncs.cf_setup(a)
199:
200: int set_cpufuncs (void);
201: #define ARCHITECTURE_NOT_PRESENT 1 /* known but not configured */
202: #define ARCHITECTURE_NOT_SUPPORTED 2 /* not known */
203:
204: void cpufunc_nullop (void);
205: int cpufunc_null_fixup (void *);
206: int early_abort_fixup (void *);
207: int late_abort_fixup (void *);
208: u_int cpufunc_id (void);
209: u_int cpufunc_control (u_int clear, u_int bic);
210: void cpufunc_domains (u_int domains);
211: u_int cpufunc_faultstatus (void);
212: u_int cpufunc_faultaddress (void);
213:
214: #ifdef CPU_ARM3
215: u_int arm3_control (u_int clear, u_int bic);
216: void arm3_cache_flush (void);
217: #endif /* CPU_ARM3 */
218:
219: #if defined(CPU_ARM6) || defined(CPU_ARM7)
220: void arm67_setttb (u_int ttb);
221: void arm67_tlb_flush (void);
222: void arm67_tlb_purge (u_int va);
223: void arm67_cache_flush (void);
224: void arm67_context_switch (void);
225: #endif /* CPU_ARM6 || CPU_ARM7 */
226:
227: #ifdef CPU_ARM6
228: void arm6_setup (char *string);
229: #endif /* CPU_ARM6 */
230:
231: #ifdef CPU_ARM7
232: void arm7_setup (char *string);
233: #endif /* CPU_ARM7 */
234:
235: #ifdef CPU_ARM7TDMI
236: int arm7_dataabt_fixup (void *arg);
237: void arm7tdmi_setup (char *string);
238: void arm7tdmi_setttb (u_int ttb);
239: void arm7tdmi_tlb_flushID (void);
240: void arm7tdmi_tlb_flushID_SE (u_int va);
241: void arm7tdmi_cache_flushID (void);
242: void arm7tdmi_context_switch (void);
243: #endif /* CPU_ARM7TDMI */
244:
245: #ifdef CPU_ARM8
246: void arm8_setttb (u_int ttb);
247: void arm8_tlb_flushID (void);
248: void arm8_tlb_flushID_SE (u_int va);
249: void arm8_cache_flushID (void);
250: void arm8_cache_flushID_E (u_int entry);
251: void arm8_cache_cleanID (void);
252: void arm8_cache_cleanID_E (u_int entry);
253: void arm8_cache_purgeID (void);
254: void arm8_cache_purgeID_E (u_int entry);
255:
256: void arm8_cache_syncI (void);
257: void arm8_cache_cleanID_rng (vaddr_t start, vsize_t end);
258: void arm8_cache_cleanD_rng (vaddr_t start, vsize_t end);
259: void arm8_cache_purgeID_rng (vaddr_t start, vsize_t end);
260: void arm8_cache_purgeD_rng (vaddr_t start, vsize_t end);
261: void arm8_cache_syncI_rng (vaddr_t start, vsize_t end);
262:
263: void arm8_context_switch (void);
264:
265: void arm8_setup (char *string);
266:
267: u_int arm8_clock_config (u_int, u_int);
268: #endif
269:
270: #ifdef CPU_SA110
271: void sa110_setup (char *string);
272: void sa110_context_switch (void);
273: #endif /* CPU_SA110 */
274:
275: #if defined(CPU_SA1100) || defined(CPU_SA1110)
276: void sa11x0_drain_readbuf (void);
277:
278: void sa11x0_context_switch (void);
279: void sa11x0_cpu_sleep (int mode);
280:
281: void sa11x0_setup (char *string);
282: #endif
283:
284: #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110)
285: void sa1_setttb (u_int ttb);
286:
287: void sa1_tlb_flushID_SE (u_int va);
288:
289: void sa1_cache_flushID (void);
290: void sa1_cache_flushI (void);
291: void sa1_cache_flushD (void);
292: void sa1_cache_flushD_SE (u_int entry);
293:
294: void sa1_cache_cleanID (void);
295: void sa1_cache_cleanD (void);
296: void sa1_cache_cleanD_E (u_int entry);
297:
298: void sa1_cache_purgeID (void);
299: void sa1_cache_purgeID_E (u_int entry);
300: void sa1_cache_purgeD (void);
301: void sa1_cache_purgeD_E (u_int entry);
302:
303: void sa1_cache_syncI (void);
304: void sa1_cache_cleanID_rng (vaddr_t start, vsize_t end);
305: void sa1_cache_cleanD_rng (vaddr_t start, vsize_t end);
306: void sa1_cache_purgeID_rng (vaddr_t start, vsize_t end);
307: void sa1_cache_purgeD_rng (vaddr_t start, vsize_t end);
308: void sa1_cache_syncI_rng (vaddr_t start, vsize_t end);
309:
310: #endif
311:
312: #ifdef CPU_ARM9
313: void arm9_setttb (u_int);
314:
315: void arm9_tlb_flushID_SE (u_int va);
316:
317: void arm9_cache_flushID (void);
318: void arm9_cache_flushID_SE (u_int);
319: void arm9_cache_flushI (void);
320: void arm9_cache_flushI_SE (u_int);
321: void arm9_cache_flushD (void);
322: void arm9_cache_flushD_SE (u_int);
323:
324: void arm9_cache_cleanID (void);
325:
326: void arm9_cache_syncI (void);
327: void arm9_cache_flushID_rng (vaddr_t, vsize_t);
328: void arm9_cache_flushD_rng (vaddr_t, vsize_t);
329: void arm9_cache_syncI_rng (vaddr_t, vsize_t);
330:
331: void arm9_context_switch (void);
332:
333: void arm9_setup (char *string);
334: #endif
335:
336: #ifdef CPU_ARM10
337: void arm10_setttb (u_int);
338:
339: void arm10_tlb_flushID_SE (u_int);
340: void arm10_tlb_flushI_SE (u_int);
341:
342: void arm10_icache_sync_all (void);
343: void arm10_icache_sync_range (vaddr_t, vsize_t);
344:
345: void arm10_dcache_wbinv_all (void);
346: void arm10_dcache_wbinv_range (vaddr_t, vsize_t);
347: void arm10_dcache_inv_range (vaddr_t, vsize_t);
348: void arm10_dcache_wb_range (vaddr_t, vsize_t);
349:
350: void arm10_idcache_wbinv_all (void);
351: void arm10_idcache_wbinv_range (vaddr_t, vsize_t);
352:
353: void arm10_context_switch (void);
354:
355: void arm10_setup (char *string);
356:
357: extern unsigned arm10_dcache_sets_max;
358: extern unsigned arm10_dcache_sets_inc;
359: extern unsigned arm10_dcache_index_max;
360: extern unsigned arm10_dcache_index_inc;
361: #endif
362:
363: #if defined(CPU_ARM9) || defined(CPU_ARM10) || defined(CPU_SA110) || \
364: defined(CPU_SA1100) || defined(CPU_SA1110) || \
365: defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
366: defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
367:
368: void armv4_tlb_flushID (void);
369: void armv4_tlb_flushI (void);
370: void armv4_tlb_flushD (void);
371: void armv4_tlb_flushD_SE (u_int va);
372:
373: void armv4_drain_writebuf (void);
374: #endif
375:
376: #if defined(CPU_IXP12X0)
377: void ixp12x0_drain_readbuf (void);
378: void ixp12x0_context_switch (void);
379: void ixp12x0_setup (char *string);
380: #endif
381:
382: #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
383: defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
384: (ARM_MMU_XSCALE == 1)
385: void xscale_cpwait (void);
386:
387: void xscale_cpu_sleep (int mode);
388:
389: u_int xscale_control (u_int clear, u_int bic);
390:
391: void xscale_setttb (u_int ttb);
392:
393: void xscale_tlb_flushID_SE (u_int va);
394:
395: void xscale_cache_flushID (void);
396: void xscale_cache_flushI (void);
397: void xscale_cache_flushD (void);
398: void xscale_cache_flushD_SE (u_int entry);
399:
400: void xscale_cache_cleanID (void);
401: void xscale_cache_cleanD (void);
402: void xscale_cache_cleanD_E (u_int entry);
403:
404: void xscale_cache_clean_minidata (void);
405:
406: void xscale_cache_purgeID (void);
407: void xscale_cache_purgeID_E (u_int entry);
408: void xscale_cache_purgeD (void);
409: void xscale_cache_purgeD_E (u_int entry);
410:
411: void xscale_cache_syncI (void);
412: void xscale_cache_cleanID_rng (vaddr_t start, vsize_t end);
413: void xscale_cache_cleanD_rng (vaddr_t start, vsize_t end);
414: void xscale_cache_purgeID_rng (vaddr_t start, vsize_t end);
415: void xscale_cache_purgeD_rng (vaddr_t start, vsize_t end);
416: void xscale_cache_syncI_rng (vaddr_t start, vsize_t end);
417: void xscale_cache_flushD_rng (vaddr_t start, vsize_t end);
418:
419: void xscale_context_switch (void);
420:
421: void xscale_setup (char *string);
422: #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
423:
424: #define tlb_flush cpu_tlb_flushID
425: #define setttb cpu_setttb
426: #define drain_writebuf cpu_drain_writebuf
427:
428: /*
429: * Macros for manipulating CPU interrupts
430: */
431: #ifdef __PROG32
432: /* Functions to manipulate the CPSR. */
433: static __inline u_int32_t __set_cpsr_c(u_int bic, u_int eor);
434: static __inline u_int32_t __get_cpsr(void);
435:
436: static __inline u_int32_t
437: __set_cpsr_c(u_int bic, u_int eor)
438: {
439: u_int32_t tmp, ret;
440:
441: __asm __volatile(
442: "mrs %0, cpsr\n" /* Get the CPSR */
443: "bic %1, %0, %2\n" /* Clear bits */
444: "eor %1, %1, %3\n" /* XOR bits */
445: "msr cpsr_c, %1\n" /* Set the control field of CPSR */
446: : "=&r" (ret), "=&r" (tmp)
447: : "r" (bic), "r" (eor));
448:
449: return ret;
450: }
451:
452: static __inline u_int32_t
453: __get_cpsr()
454: {
455: u_int32_t ret;
456:
457: __asm __volatile("mrs %0, cpsr" : "=&r" (ret));
458:
459: return ret;
460: }
461:
462: #define disable_interrupts(mask) \
463: (__set_cpsr_c((mask) & (I32_bit | F32_bit), \
464: (mask) & (I32_bit | F32_bit)))
465:
466: #define enable_interrupts(mask) \
467: (__set_cpsr_c((mask) & (I32_bit | F32_bit), 0))
468:
469: #define restore_interrupts(old_cpsr) \
470: (__set_cpsr_c((I32_bit | F32_bit), (old_cpsr) & (I32_bit | F32_bit)))
471: #else /* ! __PROG32 */
472: #define disable_interrupts(mask) \
473: (set_r15((mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE), \
474: (mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE)))
475:
476: #define enable_interrupts(mask) \
477: (set_r15((mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE), 0))
478:
479: #define restore_interrupts(old_r15) \
480: (set_r15((R15_IRQ_DISABLE | R15_FIQ_DISABLE), \
481: (old_r15) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE)))
482:
483: /* Functions to manipulate the processor control bits in r15. */
484: u_int set_r15(u_int bic, u_int eor);
485: u_int get_r15(void);
486: #endif /* __PROG32 */
487:
488: /*
489: * Functions to manipulate cpu r13
490: * (in arm/arm/setstack.S)
491: */
492:
493: void set_stackptr (u_int mode, u_int address);
494: u_int get_stackptr (u_int mode);
495:
496: /*
497: * Miscellany
498: */
499:
500: int get_pc_str_offset (void);
501:
502: /*
503: * CPU functions from locore.S
504: */
505:
506: void cpu_reset (void) __attribute__((__noreturn__));
507:
508: /*
509: * Cache info variables.
510: */
511:
512: /* PRIMARY CACHE VARIABLES */
513: extern int arm_picache_size;
514: extern int arm_picache_line_size;
515: extern int arm_picache_ways;
516:
517: extern int arm_pdcache_size; /* and unified */
518: extern int arm_pdcache_line_size;
519: extern int arm_pdcache_ways;
520:
521: extern int arm_pcache_type;
522: extern int arm_pcache_unified;
523:
524: extern int arm_dcache_align;
525: extern int arm_dcache_align_mask;
526:
527: #endif /* _KERNEL */
528: #endif /* _ARM32_CPUFUNC_H_ */
529:
530: /* End of cpufunc.h */
CVSweb