Annotation of sys/arch/powerpc/include/cpu.h, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: cpu.h,v 1.31 2007/03/23 21:06:05 miod Exp $ */
2: /* $NetBSD: cpu.h,v 1.1 1996/09/30 16:34:21 ws Exp $ */
3:
4: /*
5: * Copyright (C) 1995, 1996 Wolfgang Solfrank.
6: * Copyright (C) 1995, 1996 TooLs GmbH.
7: * All rights reserved.
8: *
9: * Redistribution and use in source and binary forms, with or without
10: * modification, are permitted provided that the following conditions
11: * are met:
12: * 1. Redistributions of source code must retain the above copyright
13: * notice, this list of conditions and the following disclaimer.
14: * 2. Redistributions in binary form must reproduce the above copyright
15: * notice, this list of conditions and the following disclaimer in the
16: * documentation and/or other materials provided with the distribution.
17: * 3. All advertising materials mentioning features or use of this software
18: * must display the following acknowledgement:
19: * This product includes software developed by TooLs GmbH.
20: * 4. The name of TooLs GmbH may not be used to endorse or promote products
21: * derived from this software without specific prior written permission.
22: *
23: * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
24: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26: * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27: * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28: * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29: * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30: * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31: * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32: * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33: */
34: #ifndef _POWERPC_CPU_H_
35: #define _POWERPC_CPU_H_
36:
37: #include <machine/frame.h>
38:
39: #include <sys/device.h>
40: #include <sys/lock.h>
41: #include <sys/sched.h>
42:
43: struct cpu_info {
44: struct device *ci_dev; /* our device */
45: struct schedstate_percpu ci_schedstate; /* scheduler state */
46:
47: struct proc *ci_curproc;
48:
49: struct pcb *ci_curpcb;
50: struct pmap *ci_curpm;
51: struct proc *ci_fpuproc;
52: struct proc *ci_vecproc;
53: struct pcb *ci_idle_pcb; /* PA of our idle pcb */
54: int ci_cpuid;
55:
56: volatile int ci_astpending;
57: volatile int ci_want_resched;
58: volatile int ci_cpl;
59: volatile int ci_iactive;
60: volatile int ci_ipending;
61: int ci_intrdepth;
62: char *ci_intstk;
63: #define CPUSAVE_LEN 8
64: register_t ci_tempsave[CPUSAVE_LEN];
65: register_t ci_ddbsave[CPUSAVE_LEN];
66: #define DISISAVE_LEN 4
67: register_t ci_disisave[DISISAVE_LEN];
68: };
69:
70: static __inline struct cpu_info *
71: curcpu(void)
72: {
73: struct cpu_info *ci;
74:
75: __asm volatile ("mfsprg %0,0" : "=r"(ci));
76: return ci;
77: }
78:
79: #define curpcb (curcpu()->ci_curpcb)
80: #define curpm (curcpu()->ci_curpm)
81:
82: #define CPU_INFO_UNIT(ci) ((ci)->ci_dev->dv_unit)
83:
84: #ifdef MULTIPROCESSOR
85:
86: #define PPC_MAXPROCS 4
87:
88: static __inline int
89: cpu_number(void)
90: {
91: int pir;
92:
93: __asm ("mfspr %0,1023" : "=r"(pir));
94: return pir;
95: }
96:
97: void cpu_boot_secondary_processors(void);
98:
99: #define CPU_IS_PRIMARY(ci) ((ci)->ci_cpuid == 0)
100: #define CPU_INFO_ITERATOR int
101: #define CPU_INFO_FOREACH(cii, ci) \
102: for (cii = 0, ci = &cpu_info[0]; cii < PPC_MAXPROCS; cii++, ci++)
103:
104: #else
105:
106: #define PPC_MAXPROCS 1
107:
108: #define cpu_number() 0
109:
110: #define CPU_IS_PRIMARY(ci) 1
111: #define CPU_INFO_ITERATOR int
112: #define CPU_INFO_FOREACH(cii, ci) \
113: for (cii = 0, ci = curcpu(); ci != NULL; ci = NULL)
114:
115: #endif
116:
117: extern struct cpu_info cpu_info[PPC_MAXPROCS];
118:
119: #define CLKF_USERMODE(frame) (((frame)->srr1 & PSL_PR) != 0)
120: #define CLKF_PC(frame) ((frame)->srr0)
121: #define CLKF_INTR(frame) ((frame)->depth != 0)
122:
123: /*
124: * This is used during profiling to integrate system time.
125: */
126: #define PROC_PC(p) (trapframe(p)->srr0)
127:
128: #define cpu_wait(p) do { /* nothing */ } while (0)
129:
130: void delay(unsigned);
131: #define DELAY(n) delay(n)
132:
133: #define need_resched(ci) (ci->ci_want_resched = 1, ci->ci_astpending = 1)
134: #define need_proftick(p) do { curcpu()->ci_astpending = 1; } while (0)
135: #define signotify(p) (curcpu()->ci_astpending = 1)
136:
137: extern char *bootpath;
138:
139: #ifndef CACHELINESIZE
140: #define CACHELINESIZE 32 /* For now XXX */
141: #endif
142:
143: static __inline void
144: syncicache(void *from, int len)
145: {
146: int l;
147: char *p = from;
148:
149: len = len + (((u_int32_t) from) & (CACHELINESIZE - 1));
150: l = len;
151:
152: do {
153: __asm __volatile ("dcbst 0,%0" :: "r"(p));
154: p += CACHELINESIZE;
155: } while ((l -= CACHELINESIZE) > 0);
156: __asm __volatile ("sync");
157: p = from;
158: l = len;
159: do {
160: __asm __volatile ("icbi 0,%0" :: "r"(p));
161: p += CACHELINESIZE;
162: } while ((l -= CACHELINESIZE) > 0);
163: __asm __volatile ("isync");
164: }
165:
166: static __inline void
167: invdcache(void *from, int len)
168: {
169: int l;
170: char *p = from;
171:
172: len = len + (((u_int32_t) from) & (CACHELINESIZE - 1));
173: l = len;
174:
175: do {
176: __asm __volatile ("dcbi 0,%0" :: "r"(p));
177: p += CACHELINESIZE;
178: } while ((l -= CACHELINESIZE) > 0);
179: __asm __volatile ("sync");
180: }
181:
182: #define FUNC_SPR(n, name) \
183: static __inline u_int32_t ppc_mf ## name (void) \
184: { \
185: u_int32_t ret; \
186: __asm __volatile ("mfspr %0," # n : "=r" (ret)); \
187: return ret; \
188: } \
189: static __inline void ppc_mt ## name (u_int32_t val) \
190: { \
191: __asm __volatile ("mtspr "# n ",%0" :: "r" (val)); \
192: } \
193:
194: FUNC_SPR(0, mq)
195: FUNC_SPR(1, xer)
196: FUNC_SPR(4, rtcu)
197: FUNC_SPR(5, rtcl)
198: FUNC_SPR(8, lr)
199: FUNC_SPR(9, ctr)
200: FUNC_SPR(18, dsisr)
201: FUNC_SPR(19, dar)
202: FUNC_SPR(22, dec)
203: FUNC_SPR(25, sdr1)
204: FUNC_SPR(26, srr0)
205: FUNC_SPR(27, srr1)
206: FUNC_SPR(256, vrsave)
207: FUNC_SPR(272, sprg0)
208: FUNC_SPR(273, sprg1)
209: FUNC_SPR(274, sprg2)
210: FUNC_SPR(275, sprg3)
211: FUNC_SPR(280, asr)
212: FUNC_SPR(282, ear)
213: FUNC_SPR(287, pvr)
214: FUNC_SPR(528, ibat0u)
215: FUNC_SPR(529, ibat0l)
216: FUNC_SPR(530, ibat1u)
217: FUNC_SPR(531, ibat1l)
218: FUNC_SPR(532, ibat2u)
219: FUNC_SPR(533, ibat2l)
220: FUNC_SPR(534, ibat3u)
221: FUNC_SPR(535, ibat3l)
222: FUNC_SPR(536, dbat0u)
223: FUNC_SPR(537, dbat0l)
224: FUNC_SPR(538, dbat1u)
225: FUNC_SPR(539, dbat1l)
226: FUNC_SPR(540, dbat2u)
227: FUNC_SPR(541, dbat2l)
228: FUNC_SPR(542, dbat3u)
229: FUNC_SPR(543, dbat3l)
230: FUNC_SPR(1008, hid0)
231: FUNC_SPR(1009, hid1)
232: FUNC_SPR(1010, iabr)
233: FUNC_SPR(1017, l2cr)
234: FUNC_SPR(1018, l3cr)
235: FUNC_SPR(1013, dabr)
236: FUNC_SPR(1023, pir)
237:
238: static __inline u_int32_t
239: ppc_mftbl (void)
240: {
241: int ret;
242: __asm __volatile ("mftb %0" : "=r" (ret));
243: return ret;
244: }
245:
246: static __inline u_int64_t
247: ppc_mftb(void)
248: {
249: u_long scratch;
250: u_int64_t tb;
251:
252: __asm __volatile ("1: mftbu %0; mftb %0+1; mftbu %1;"
253: " cmpw 0,%0,%1; bne 1b" : "=r"(tb), "=r"(scratch));
254: return tb;
255: }
256:
257: static __inline u_int32_t
258: ppc_mfmsr (void)
259: {
260: int ret;
261: __asm __volatile ("mfmsr %0" : "=r" (ret));
262: return ret;
263: }
264:
265: static __inline void
266: ppc_mtmsr (u_int32_t val)
267: {
268: __asm __volatile ("mtmsr %0" :: "r" (val));
269: }
270:
271: static __inline void
272: ppc_mtsrin(u_int32_t val, u_int32_t sn_shifted)
273: {
274: __asm __volatile ("mtsrin %0,%1" :: "r"(val), "r"(sn_shifted));
275: }
276:
277: u_int64_t ppc64_mfscomc(void);
278: void ppc_mtscomc(u_int32_t);
279: void ppc64_mtscomc(u_int64_t);
280: u_int64_t ppc64_mfscomd(void);
281: void ppc_mtscomd(u_int32_t);
282:
283: #include <machine/psl.h>
284:
285: /*
286: * General functions to enable and disable interrupts
287: * without having inlined assembly code in many functions.
288: */
289: static __inline void
290: ppc_intr_enable(int enable)
291: {
292: u_int32_t msr;
293: if (enable != 0) {
294: msr = ppc_mfmsr();
295: msr |= PSL_EE;
296: ppc_mtmsr(msr);
297: }
298: }
299:
300: static __inline int
301: ppc_intr_disable(void)
302: {
303: u_int32_t emsr, dmsr;
304: emsr = ppc_mfmsr();
305: dmsr = emsr & ~PSL_EE;
306: ppc_mtmsr(dmsr);
307: return (emsr & PSL_EE);
308: }
309:
310: int ppc_cpuspeed(int *);
311: void ppc_check_procid(void);
312: extern int ppc_proc_is_64b;
313:
314: /*
315: * PowerPC CPU types
316: */
317: #define PPC_CPU_MPC601 1
318: #define PPC_CPU_MPC603 3
319: #define PPC_CPU_MPC604 4
320: #define PPC_CPU_MPC603e 6
321: #define PPC_CPU_MPC603ev 7
322: #define PPC_CPU_MPC750 8
323: #define PPC_CPU_MPC604ev 9
324: #define PPC_CPU_MPC7400 12
325: #define PPC_CPU_IBM970FX 0x003c
326: #define PPC_CPU_IBM970MP 0x0044
327: #define PPC_CPU_IBM750FX 0x7000
328: #define PPC_CPU_MPC7410 0x800c
329: #define PPC_CPU_MPC7447A 0x8003
330: #define PPC_CPU_MPC7448 0x8004
331: #define PPC_CPU_MPC7450 0x8000
332: #define PPC_CPU_MPC7455 0x8001
333: #define PPC_CPU_MPC7457 0x8002
334:
335: /*
336: * This needs to be included late since it relies on definitions higher
337: * up in this file.
338: */
339: #if defined(MULTIPROCESSOR) && defined(_KERNEL)
340: #include <sys/mplock.h>
341: #endif
342:
343: #endif /* _POWERPC_CPU_H_ */
CVSweb