Annotation of sys/arch/amd64/include/intr.h, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: intr.h,v 1.11 2007/05/25 16:22:11 art Exp $ */
2: /* $NetBSD: intr.h,v 1.2 2003/05/04 22:01:56 fvdl Exp $ */
3:
4: /*-
5: * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
6: * All rights reserved.
7: *
8: * This code is derived from software contributed to The NetBSD Foundation
9: * by Charles M. Hannum, and by Jason R. Thorpe.
10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
39:
40: #ifndef _X86_INTR_H_
41: #define _X86_INTR_H_
42:
43: #include <machine/intrdefs.h>
44:
45: #ifndef _LOCORE
46: #include <machine/cpu.h>
47:
48: #include <sys/evcount.h>
49:
50: /*
51: * Struct describing an interrupt source for a CPU. struct cpu_info
52: * has an array of MAX_INTR_SOURCES of these. The index in the array
53: * is equal to the stub number of the stubcode as present in vector.s
54: *
55: * The primary CPU's array of interrupt sources has its first 16
56: * entries reserved for legacy ISA irq handlers. This means that
57: * they have a 1:1 mapping for arrayindex:irq_num. This is not
58: * true for interrupts that come in through IO APICs, to find
59: * their source, go through ci->ci_isources[index].is_pic
60: *
61: * It's possible to always maintain a 1:1 mapping, but that means
62: * limiting the total number of interrupt sources to MAX_INTR_SOURCES
63: * (32), instead of 32 per CPU. It also would mean that having multiple
64: * IO APICs which deliver interrupts from an equal pin number would
65: * overlap if they were to be sent to the same CPU.
66: */
67:
68: struct intrstub {
69: void *ist_entry;
70: void *ist_recurse;
71: void *ist_resume;
72: };
73:
74: struct intrsource {
75: int is_maxlevel; /* max. IPL for this source */
76: int is_pin; /* IRQ for legacy; pin for IO APIC */
77: struct intrhand *is_handlers; /* handler chain */
78: struct pic *is_pic; /* originating PIC */
79: void *is_recurse; /* entry for spllower */
80: void *is_resume; /* entry for doreti */
81: char is_evname[32]; /* event counter name */
82: int is_flags; /* see below */
83: int is_type; /* level, edge */
84: int is_idtvec;
85: int is_minlevel;
86: };
87:
88: #define IS_LEGACY 0x0001 /* legacy ISA irq source */
89: #define IS_IPI 0x0002
90: #define IS_LOG 0x0004
91:
92:
93: /*
94: * Interrupt handler chains. *_intr_establish() insert a handler into
95: * the list. The handler is called with its (single) argument.
96: */
97:
98: struct intrhand {
99: int (*ih_fun)(void *);
100: void *ih_arg;
101: int ih_level;
102: struct intrhand *ih_next;
103: int ih_pin;
104: int ih_slot;
105: struct cpu_info *ih_cpu;
106: int ih_irq;
107: struct evcount ih_count;
108: };
109:
110: #define IMASK(ci,level) (ci)->ci_imask[(level)]
111: #define IUNMASK(ci,level) (ci)->ci_iunmask[(level)]
112:
113: extern void Xspllower(int);
114:
115: int splraise(int);
116: int spllower(int);
117: void softintr(int);
118:
119: /*
120: * Convert spl level to local APIC level
121: */
122: #define APIC_LEVEL(l) ((l) << 4)
123:
124: /*
125: * compiler barrier: prevent reordering of instructions.
126: * XXX something similar will move to <sys/cdefs.h>
127: * or thereabouts.
128: * This prevents the compiler from reordering code around
129: * this "instruction", acting as a sequence point for code generation.
130: */
131:
132: #define __splbarrier() __asm __volatile("":::"memory")
133:
134: /*
135: * Hardware interrupt masks
136: */
137: #define splbio() splraise(IPL_BIO)
138: #define splnet() splraise(IPL_NET)
139: #define spltty() splraise(IPL_TTY)
140: #define splaudio() splraise(IPL_AUDIO)
141: #define splclock() splraise(IPL_CLOCK)
142: #define splstatclock() splclock()
143: #define splserial() splraise(IPL_SERIAL)
144: #define splipi() splraise(IPL_IPI)
145:
146: #define spllpt() spltty()
147:
148: #define spllpt() spltty()
149:
150: /*
151: * Software interrupt masks
152: */
153: #define splsoftclock() splraise(IPL_SOFTCLOCK)
154: #define splsoftnet() splraise(IPL_SOFTNET)
155: #define splsoftserial() splraise(IPL_SOFTSERIAL)
156:
157: /*
158: * Miscellaneous
159: */
160: #define splvm() splraise(IPL_VM)
161: #define splhigh() splraise(IPL_HIGH)
162: #define spl0() spllower(IPL_NONE)
163: #define splsched() splraise(IPL_SCHED)
164: #define spllock() splhigh()
165: #define splx(x) spllower(x)
166:
167: /* SPL asserts */
168: #ifdef DIAGNOSTIC
169: /*
170: * Although this function is implemented in MI code, it must be in this MD
171: * header because we don't want this header to include MI includes.
172: */
173: void splassert_fail(int, int, const char *);
174: extern int splassert_ctl;
175: void splassert_check(int, const char *);
176: #define splassert(__wantipl) do { \
177: if (splassert_ctl > 0) { \
178: splassert_check(__wantipl, __func__); \
179: } \
180: } while (0)
181: #else
182: #define splassert(wantipl) do { /* nada */ } while (0)
183: #endif
184:
185: /*
186: * XXX
187: */
188: #define setsoftnet() softintr(SIR_NET)
189:
190: #define IPLSHIFT 4 /* The upper nibble of vectors is the IPL. */
191: #define IPL(level) ((level) >> IPLSHIFT) /* Extract the IPL. */
192:
193: #include <machine/pic.h>
194:
195: /*
196: * Stub declarations.
197: */
198:
199: extern void Xsoftclock(void);
200: extern void Xsoftnet(void);
201: extern void Xsoftserial(void);
202:
203: extern struct intrstub i8259_stubs[];
204: extern struct intrstub ioapic_edge_stubs[];
205: extern struct intrstub ioapic_level_stubs[];
206:
207: struct cpu_info;
208:
209: extern char idt_allocmap[];
210:
211: void intr_default_setup(void);
212: int x86_nmi(void);
213: void intr_calculatemasks(struct cpu_info *);
214: int intr_allocate_slot_cpu(struct cpu_info *, struct pic *, int, int *);
215: int intr_allocate_slot(struct pic *, int, int, int, struct cpu_info **, int *,
216: int *);
217: void *intr_establish(int, struct pic *, int, int, int, int (*)(void *),
218: void *, char *);
219: void intr_disestablish(struct intrhand *);
220: void cpu_intr_init(struct cpu_info *);
221: int intr_find_mpmapping(int bus, int pin, int *handle);
222: void intr_printconfig(void);
223:
224: #ifdef MULTIPROCESSOR
225: int x86_send_ipi(struct cpu_info *, int);
226: int x86_fast_ipi(struct cpu_info *, int);
227: void x86_broadcast_ipi(int);
228: void x86_multicast_ipi(int, int);
229: void x86_ipi_handler(void);
230: void x86_intlock(struct intrframe);
231: void x86_intunlock(struct intrframe);
232: void x86_softintlock(void);
233: void x86_softintunlock(void);
234: void x86_setperf_ipi(struct cpu_info *);
235:
236: extern void (*ipifunc[X86_NIPI])(struct cpu_info *);
237: #endif
238:
239: #endif /* !_LOCORE */
240:
241: /*
242: * Generic software interrupt support.
243: */
244:
245: #define X86_SOFTINTR_SOFTCLOCK 0
246: #define X86_SOFTINTR_SOFTNET 1
247: #define X86_SOFTINTR_SOFTSERIAL 2
248: #define X86_NSOFTINTR 3
249:
250: #ifndef _LOCORE
251: #include <sys/queue.h>
252:
253: struct x86_soft_intrhand {
254: TAILQ_ENTRY(x86_soft_intrhand)
255: sih_q;
256: struct x86_soft_intr *sih_intrhead;
257: void (*sih_fn)(void *);
258: void *sih_arg;
259: int sih_pending;
260: };
261:
262: struct x86_soft_intr {
263: TAILQ_HEAD(, x86_soft_intrhand)
264: softintr_q;
265: int softintr_ssir;
266: struct simplelock softintr_slock;
267: };
268:
269: #define x86_softintr_lock(si, s) \
270: do { \
271: (s) = splhigh(); \
272: simple_lock(&si->softintr_slock); \
273: } while (/*CONSTCOND*/ 0)
274:
275: #define x86_softintr_unlock(si, s) \
276: do { \
277: simple_unlock(&si->softintr_slock); \
278: splx((s)); \
279: } while (/*CONSTCOND*/ 0)
280:
281: void *softintr_establish(int, void (*)(void *), void *);
282: void softintr_disestablish(void *);
283: void softintr_init(void);
284: void softintr_dispatch(int);
285:
286: #define softintr_schedule(arg) \
287: do { \
288: struct x86_soft_intrhand *__sih = (arg); \
289: struct x86_soft_intr *__si = __sih->sih_intrhead; \
290: int __s; \
291: \
292: x86_softintr_lock(__si, __s); \
293: if (__sih->sih_pending == 0) { \
294: TAILQ_INSERT_TAIL(&__si->softintr_q, __sih, sih_q); \
295: __sih->sih_pending = 1; \
296: softintr(__si->softintr_ssir); \
297: } \
298: x86_softintr_unlock(__si, __s); \
299: } while (/*CONSTCOND*/ 0)
300: #endif /* _LOCORE */
301:
302: #endif /* !_X86_INTR_H_ */
CVSweb