Annotation of sys/arch/vax/include/macros.h, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: macros.h,v 1.14 2006/11/06 21:31:36 miod Exp $ */
2: /* $NetBSD: macros.h,v 1.20 2000/07/19 01:02:52 matt Exp $ */
3:
4: /*
5: * Copyright (c) 1994, 1998, 2000 Ludd, University of Lule}, Sweden.
6: * All rights reserved.
7: *
8: * Redistribution and use in source and binary forms, with or without
9: * modification, are permitted provided that the following conditions
10: * are met:
11: * 1. Redistributions of source code must retain the above copyright
12: * notice, this list of conditions and the following disclaimer.
13: * 2. Redistributions in binary form must reproduce the above copyright
14: * notice, this list of conditions and the following disclaimer in the
15: * documentation and/or other materials provided with the distribution.
16: * 3. All advertising materials mentioning features or use of this software
17: * must display the following acknowledgement:
18: * This product includes software developed at Ludd, University of Lule}.
19: * 4. The name of the author may not be used to endorse or promote products
20: * derived from this software without specific prior written permission
21: *
22: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32: */
33:
34: /* All bugs are subject to removal without further notice */
35:
36: #if !defined(_VAX_MACROS_H_) && !defined(lint)
37: #define _VAX_MACROS_H_
38:
39: /* Here general macros are supposed to be stored */
40:
41: static __inline__ int
42: ffs(int reg)
43: {
44: register int val;
45:
46: __asm__ __volatile ("ffs $0,$32,%1,%0
47: bneq 1f
48: mnegl $1,%0
49: 1: incl %0"
50: : "=&r" (val)
51: : "r" (reg) );
52: return val;
53: }
54:
55: static __inline__ void *
56: memcpy(void *toe, const void *from, size_t len)
57: {
58: __asm__ __volatile ("movc3 %0,(%1),(%2)"
59: :
60: : "r" (len),"r" (from),"r"(toe)
61: :"r0","r1","r2","r3","r4","r5","memory","cc");
62: return toe;
63: }
64: static __inline__ void *
65: memmove(void *toe, const void *from, size_t len)
66: {
67: __asm__ __volatile ("movc3 %0,(%1),(%2)"
68: :
69: : "r" (len),"r" (from),"r"(toe)
70: :"r0","r1","r2","r3","r4","r5","memory","cc");
71: return toe;
72: }
73:
74: #ifdef notnow
75: static __inline__ void
76: bcopy(const void *from, void *toe, size_t len)
77: {
78: __asm__ __volatile ("movc3 %0,(%1),(%2)"
79: :
80: : "r" (len),"r" (from),"r"(toe)
81: :"r0","r1","r2","r3","r4","r5","memory","cc");
82: }
83: #endif
84:
85: void blkfill(void *, int, size_t);
86:
87: static __inline__ void *
88: memset(void *block, int c, size_t len)
89: {
90: if (len > 65535)
91: blkfill(block, c, len);
92: else {
93: __asm__ __volatile ("movc5 $0,(%0),%2,%1,(%0)"
94: :
95: : "r" (block), "r" (len), "r"(c)
96: :"r0","r1","r2","r3","r4","r5","memory","cc");
97: }
98: return block;
99: }
100:
101: static __inline__ void
102: bzero(void *block, size_t len)
103: {
104: if (len > 65535)
105: blkfill(block, 0, len);
106: else {
107: __asm__ __volatile ("movc5 $0,(%0),$0,%1,(%0)"
108: :
109: : "r" (block), "r" (len)
110: :"r0","r1","r2","r3","r4","r5","memory","cc");
111: }
112: }
113:
114: /* XXX - the return syntax of memcmp is wrong */
115: static __inline__ int
116: memcmp(const void *b1, const void *b2, size_t len)
117: {
118: register int ret;
119:
120: __asm__ __volatile("cmpc3 %3,(%1),(%2);movl r0,%0"
121: : "=r" (ret)
122: : "r" (b1), "r" (b2), "r" (len)
123: : "r0","r1","r2","r3" );
124: return ret;
125: }
126:
127: static __inline__ int
128: bcmp(const void *b1, const void *b2, size_t len)
129: {
130: register int ret;
131:
132: __asm__ __volatile("cmpc3 %3,(%1),(%2);movl r0,%0"
133: : "=r" (ret)
134: : "r" (b1), "r" (b2), "r" (len)
135: : "r0","r1","r2","r3" );
136: return ret;
137: }
138:
139: /* Begin nya */
140: static __inline__ size_t
141: strlen(const char *cp)
142: {
143: register size_t ret;
144:
145: __asm__ __volatile("locc $0,$65535,(%1);subl3 r0,$65535,%0"
146: : "=r" (ret)
147: : "r" (cp)
148: : "r0","r1","cc" );
149: return ret;
150: }
151:
152: static __inline__ char *
153: strncat(char *cp, const char *c2, size_t count)
154: {
155: __asm__ __volatile("locc $0,%2,(%1);subl3 r0,%2,r2;
156: locc $0,$65535,(%0);movc3 r2,(%1),(r1);movb $0,(r3)"
157: :
158: : "r" (cp), "r" (c2), "g"(count)
159: : "r0","r1","r2","r3","r4","r5","memory","cc");
160: return cp;
161: }
162:
163: static __inline__ char *
164: strncpy(char *cp, const char *c2, size_t len)
165: {
166: __asm__ __volatile("movl %2,r2;locc $0,r2,(%1);beql 1f;subl3 r0,%2,r2;
167: clrb (%0)[r2];1:;movc3 r2,(%1),(%0)"
168: :
169: : "r" (cp), "r" (c2), "g"(len)
170: : "r0","r1","r2","r3","r4","r5","memory","cc");
171: return cp;
172: }
173:
174: static __inline__ void *
175: memchr(const void *cp, int c, size_t len)
176: {
177: void *ret;
178: __asm__ __volatile("locc %2,%3,(%1);bneq 1f;clrl r1;1:movl r1,%0"
179: : "=g"(ret)
180: : "r" (cp), "r" (c), "g"(len)
181: : "r0","r1","cc");
182: return ret;
183: }
184:
185: static __inline__ int
186: strcmp(const char *cp, const char *c2)
187: {
188: register int ret;
189: __asm__ __volatile("locc $0,$65535,(%1);subl3 r0,$65535,r0;incl r0;
190: cmpc3 r0,(%1),(%2);beql 1f;movl $1,r2;
191: cmpb (r1),(r3);bcc 1f;movl $-1,r2;1:movl r2,%0"
192: : "=g"(ret)
193: : "r" (cp), "r" (c2)
194: : "r0","r1","r2","r3","cc");
195: return ret;
196: }
197: /* End nya */
198:
199: #if 0 /* unused, but no point in deleting it since it _is_ an instruction */
200: static __inline__ int locc(int mask, char *cp, size_t size){
201: register ret;
202:
203: __asm__ __volatile("locc %1,%2,(%3);movl r0,%0"
204: : "=r" (ret)
205: : "r" (mask),"r"(size),"r"(cp)
206: : "r0","r1" );
207: return ret;
208: }
209: #endif
210:
211: static __inline__ int
212: scanc(u_int size, const u_char *cp, const u_char *table, int mask)
213: {
214: register int ret;
215:
216: __asm__ __volatile("scanc %1,(%2),(%3),%4;movl r0,%0"
217: : "=g"(ret)
218: : "r"(size),"r"(cp),"r"(table),"r"(mask)
219: : "r0","r1","r2","r3" );
220: return ret;
221: }
222:
223: static __inline__ int
224: skpc(int mask, size_t size, u_char *cp)
225: {
226: register int ret;
227:
228: __asm__ __volatile("skpc %1,%2,(%3);movl r0,%0"
229: : "=g"(ret)
230: : "r"(mask),"r"(size),"r"(cp)
231: : "r0","r1" );
232: return ret;
233: }
234:
235: #define setrunqueue(p) \
236: __asm__ __volatile("movl %0,r0;jsb Setrq":: "g"(p):"r0","r1","r2");
237:
238: #define remrunqueue(p) \
239: __asm__ __volatile("movl %0,r0;jsb Remrq":: "g"(p):"r0","r1","r2");
240:
241: #define cpu_switch(p) \
242: __asm__ __volatile("movl %0,r0;movpsl -(sp);jsb Swtch" \
243: ::"g"(p):"r0","r1","r2","r3");
244:
245: /*
246: * Interlock instructions. Used both in multiprocessor environments to
247: * lock between CPUs and in uniprocessor systems when locking is required
248: * between I/O devices and the master CPU.
249: */
250: /*
251: * Insqti() locks and inserts an element into the end of a queue.
252: * Returns -1 if interlock failed, 1 if inserted OK and 0 if first in queue.
253: */
254: static __inline__ int
255: insqti(void *entry, void *header) {
256: register int ret;
257:
258: __asm__ __volatile("
259: mnegl $1,%0;
260: insqti (%1),(%2);
261: bcs 1f; # failed insert
262: beql 2f; # jump if first entry
263: movl $1,%0;
264: brb 1f;
265: 2: clrl %0;
266: 1:;"
267: : "=&g"(ret)
268: : "r"(entry), "r"(header)
269: : "memory");
270:
271: return ret;
272: }
273:
274: /*
275: * Remqhi() removes an element from the head of the queue.
276: * Returns -1 if interlock failed, 0 if queue empty, address of the
277: * removed element otherwise.
278: */
279: static __inline__ void *
280: remqhi(void *header) {
281: register void *ret;
282:
283: __asm__ __volatile("
284: remqhi (%1),%0;
285: bcs 1f; # failed interlock
286: bvs 2f; # nothing was removed
287: brb 3f;
288: 1: mnegl $1,%0;
289: brb 3f;
290: 2: clrl %0;
291: 3:;"
292: : "=&g"(ret)
293: : "r"(header)
294: : "memory");
295:
296: return ret;
297: }
298: #define ILCK_FAILED -1 /* Interlock failed */
299: #define Q_EMPTY 0 /* Queue is/was empty */
300: #define Q_OK 1 /* Inserted OK */
301:
302: #endif /* _VAX_MACROS_H_ */
CVSweb