[BACK]Return to cpufunc.h CVS log [TXT][DIR] Up to [local] / sys / arch / amd64 / include

Annotation of sys/arch/amd64/include/cpufunc.h, Revision 1.1.1.1

1.1       nbrk        1: /*     $OpenBSD: cpufunc.h,v 1.2 2007/02/17 17:35:43 tom Exp $ */
                      2: /*     $NetBSD: cpufunc.h,v 1.3 2003/05/08 10:27:43 fvdl Exp $ */
                      3:
                      4: /*-
                      5:  * Copyright (c) 1998 The NetBSD Foundation, Inc.
                      6:  * All rights reserved.
                      7:  *
                      8:  * This code is derived from software contributed to The NetBSD Foundation
                      9:  * by Charles M. Hannum.
                     10:  *
                     11:  * Redistribution and use in source and binary forms, with or without
                     12:  * modification, are permitted provided that the following conditions
                     13:  * are met:
                     14:  * 1. Redistributions of source code must retain the above copyright
                     15:  *    notice, this list of conditions and the following disclaimer.
                     16:  * 2. Redistributions in binary form must reproduce the above copyright
                     17:  *    notice, this list of conditions and the following disclaimer in the
                     18:  *    documentation and/or other materials provided with the distribution.
                     19:  * 3. All advertising materials mentioning features or use of this software
                     20:  *    must display the following acknowledgement:
                     21:  *        This product includes software developed by the NetBSD
                     22:  *        Foundation, Inc. and its contributors.
                     23:  * 4. Neither the name of The NetBSD Foundation nor the names of its
                     24:  *    contributors may be used to endorse or promote products derived
                     25:  *    from this software without specific prior written permission.
                     26:  *
                     27:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     28:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     29:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     30:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     31:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     32:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     33:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     34:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     35:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     36:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     37:  * POSSIBILITY OF SUCH DAMAGE.
                     38:  */
                     39:
                     40: #ifndef _AMD64_CPUFUNC_H_
                     41: #define        _AMD64_CPUFUNC_H_
                     42:
                     43: /*
                     44:  * Functions to provide access to i386-specific instructions.
                     45:  */
                     46:
                     47: #include <sys/cdefs.h>
                     48: #include <sys/types.h>
                     49:
                     50: #include <machine/specialreg.h>
                     51:
                     52: static __inline void
                     53: x86_pause(void)
                     54: {
                     55:        /* nothing */
                     56: }
                     57:
                     58: #ifdef _KERNEL
                     59:
                     60: extern int cpu_feature;
                     61:
                     62: static __inline void
                     63: invlpg(u_int64_t addr)
                     64: {
                     65:         __asm __volatile("invlpg (%0)" : : "r" (addr) : "memory");
                     66: }
                     67:
                     68: static __inline void
                     69: lidt(void *p)
                     70: {
                     71:        __asm __volatile("lidt (%0)" : : "r" (p));
                     72: }
                     73:
                     74: static __inline void
                     75: lldt(u_short sel)
                     76: {
                     77:        __asm __volatile("lldt %0" : : "r" (sel));
                     78: }
                     79:
                     80: static __inline void
                     81: ltr(u_short sel)
                     82: {
                     83:        __asm __volatile("ltr %0" : : "r" (sel));
                     84: }
                     85:
                     86: static __inline void
                     87: lcr8(u_int val)
                     88: {
                     89:        u_int64_t val64 = val;
                     90:        __asm __volatile("movq %0,%%cr8" : : "r" (val64));
                     91: }
                     92:
                     93: /*
                     94:  * Upper 32 bits are reserved anyway, so just keep this 32bits.
                     95:  */
                     96: static __inline void
                     97: lcr0(u_int val)
                     98: {
                     99:        u_int64_t val64 = val;
                    100:        __asm __volatile("movq %0,%%cr0" : : "r" (val64));
                    101: }
                    102:
                    103: static __inline u_int
                    104: rcr0(void)
                    105: {
                    106:        u_int64_t val64;
                    107:        u_int val;
                    108:        __asm __volatile("movq %%cr0,%0" : "=r" (val64));
                    109:        val = val64;
                    110:        return val;
                    111: }
                    112:
                    113: static __inline u_int64_t
                    114: rcr2(void)
                    115: {
                    116:        u_int64_t val;
                    117:        __asm __volatile("movq %%cr2,%0" : "=r" (val));
                    118:        return val;
                    119: }
                    120:
                    121: static __inline void
                    122: lcr3(u_int64_t val)
                    123: {
                    124:        __asm __volatile("movq %0,%%cr3" : : "r" (val));
                    125: }
                    126:
                    127: static __inline u_int64_t
                    128: rcr3(void)
                    129: {
                    130:        u_int64_t val;
                    131:        __asm __volatile("movq %%cr3,%0" : "=r" (val));
                    132:        return val;
                    133: }
                    134:
                    135: /*
                    136:  * Same as for cr0. Don't touch upper 32 bits.
                    137:  */
                    138: static __inline void
                    139: lcr4(u_int val)
                    140: {
                    141:        u_int64_t val64 = val;
                    142:
                    143:        __asm __volatile("movq %0,%%cr4" : : "r" (val64));
                    144: }
                    145:
                    146: static __inline u_int
                    147: rcr4(void)
                    148: {
                    149:        u_int val;
                    150:        u_int64_t val64;
                    151:        __asm __volatile("movq %%cr4,%0" : "=r" (val64));
                    152:        val = val64;
                    153:        return val;
                    154: }
                    155:
                    156: static __inline void
                    157: tlbflush(void)
                    158: {
                    159:        u_int64_t val;
                    160:        __asm __volatile("movq %%cr3,%0" : "=r" (val));
                    161:        __asm __volatile("movq %0,%%cr3" : : "r" (val));
                    162: }
                    163:
                    164: static __inline void
                    165: tlbflushg(void)
                    166: {
                    167:        /*
                    168:         * Big hammer: flush all TLB entries, including ones from PTE's
                    169:         * with the G bit set.  This should only be necessary if TLB
                    170:         * shootdown falls far behind.
                    171:         *
                    172:         * Intel Architecture Software Developer's Manual, Volume 3,
                    173:         *      System Programming, section 9.10, "Invalidating the
                    174:         * Translation Lookaside Buffers (TLBS)":
                    175:         * "The following operations invalidate all TLB entries, irrespective
                    176:         * of the setting of the G flag:
                    177:         * ...
                    178:         * "(P6 family processors only): Writing to control register CR4 to
                    179:         * modify the PSE, PGE, or PAE flag."
                    180:         *
                    181:         * (the alternatives not quoted above are not an option here.)
                    182:         *
                    183:         * If PGE is not in use, we reload CR3 for the benefit of
                    184:         * pre-P6-family processors.
                    185:         */
                    186:
                    187:        if (cpu_feature & CPUID_PGE) {
                    188:                u_int cr4 = rcr4();
                    189:                lcr4(cr4 & ~CR4_PGE);
                    190:                lcr4(cr4);
                    191:        } else
                    192:                tlbflush();
                    193: }
                    194:
                    195: #ifdef notyet
                    196: void   setidt(int idx, /*XXX*/caddr_t func, int typ, int dpl);
                    197: #endif
                    198:
                    199:
                    200: /* XXXX ought to be in psl.h with spl() functions */
                    201:
                    202: static __inline void
                    203: disable_intr(void)
                    204: {
                    205:        __asm __volatile("cli");
                    206: }
                    207:
                    208: static __inline void
                    209: enable_intr(void)
                    210: {
                    211:        __asm __volatile("sti");
                    212: }
                    213:
                    214: static __inline u_long
                    215: read_rflags(void)
                    216: {
                    217:        u_long  ef;
                    218:
                    219:        __asm __volatile("pushfq; popq %0" : "=r" (ef));
                    220:        return (ef);
                    221: }
                    222:
                    223: static __inline void
                    224: write_rflags(u_long ef)
                    225: {
                    226:        __asm __volatile("pushq %0; popfq" : : "r" (ef));
                    227: }
                    228:
                    229: static __inline u_int64_t
                    230: rdmsr(u_int msr)
                    231: {
                    232:        uint32_t hi, lo;
                    233:        __asm __volatile("rdmsr" : "=d" (hi), "=a" (lo) : "c" (msr));
                    234:        return (((uint64_t)hi << 32) | (uint64_t) lo);
                    235: }
                    236:
                    237: static __inline void
                    238: wrmsr(u_int msr, u_int64_t newval)
                    239: {
                    240:        __asm __volatile("wrmsr" :
                    241:            : "a" (newval & 0xffffffff), "d" (newval >> 32), "c" (msr));
                    242: }
                    243:
                    244: /*
                    245:  * Some of the undocumented AMD64 MSRs need a 'passcode' to access.
                    246:  *
                    247:  * See LinuxBIOSv2: src/cpu/amd/model_fxx/model_fxx_init.c
                    248:  */
                    249:
                    250: #define        OPTERON_MSR_PASSCODE    0x9c5a203a
                    251:
                    252: static __inline u_int64_t
                    253: rdmsr_locked(u_int msr, u_int code)
                    254: {
                    255:        uint64_t rv;
                    256:        __asm volatile("rdmsr"
                    257:            : "=A" (rv)
                    258:            : "c" (msr), "D" (code));
                    259:        return (rv);
                    260: }
                    261:
                    262: static __inline void
                    263: wrmsr_locked(u_int msr, u_int code, u_int64_t newval)
                    264: {
                    265:        __asm volatile("wrmsr"
                    266:            :
                    267:            : "A" (newval), "c" (msr), "D" (code));
                    268: }
                    269:
                    270: static __inline void
                    271: wbinvd(void)
                    272: {
                    273:        __asm __volatile("wbinvd");
                    274: }
                    275:
                    276: static __inline u_int64_t
                    277: rdtsc(void)
                    278: {
                    279:        uint32_t hi, lo;
                    280:
                    281:        __asm __volatile("rdtsc" : "=d" (hi), "=a" (lo));
                    282:        return (((uint64_t)hi << 32) | (uint64_t) lo);
                    283: }
                    284:
                    285: static __inline u_int64_t
                    286: rdpmc(u_int pmc)
                    287: {
                    288:        uint32_t hi, lo;
                    289:
                    290:        __asm __volatile("rdpmc" : "=d" (hi), "=a" (lo) : "c" (pmc));
                    291:        return (((uint64_t)hi << 32) | (uint64_t) lo);
                    292: }
                    293:
                    294: /* Break into DDB/KGDB. */
                    295: static __inline void
                    296: breakpoint(void)
                    297: {
                    298:        __asm __volatile("int $3");
                    299: }
                    300:
                    301: #define read_psl()     read_rflags()
                    302: #define write_psl(x)   write_rflags(x)
                    303:
                    304: void amd64_errata(struct cpu_info *);
                    305:
                    306: #endif /* _KERNEL */
                    307:
                    308: #endif /* !_AMD64_CPUFUNC_H_ */

CVSweb