[BACK]Return to pmap.h CVS log [TXT][DIR] Up to [local] / sys / arch / amd64 / include

Annotation of sys/arch/amd64/include/pmap.h, Revision 1.1.1.1

1.1       nbrk        1: /*     $OpenBSD: pmap.h,v 1.16 2007/07/06 11:46:48 art Exp $   */
                      2: /*     $NetBSD: pmap.h,v 1.1 2003/04/26 18:39:46 fvdl Exp $    */
                      3:
                      4: /*
                      5:  *
                      6:  * Copyright (c) 1997 Charles D. Cranor and Washington University.
                      7:  * All rights reserved.
                      8:  *
                      9:  * Redistribution and use in source and binary forms, with or without
                     10:  * modification, are permitted provided that the following conditions
                     11:  * are met:
                     12:  * 1. Redistributions of source code must retain the above copyright
                     13:  *    notice, this list of conditions and the following disclaimer.
                     14:  * 2. Redistributions in binary form must reproduce the above copyright
                     15:  *    notice, this list of conditions and the following disclaimer in the
                     16:  *    documentation and/or other materials provided with the distribution.
                     17:  * 3. All advertising materials mentioning features or use of this software
                     18:  *    must display the following acknowledgment:
                     19:  *      This product includes software developed by Charles D. Cranor and
                     20:  *      Washington University.
                     21:  * 4. The name of the author may not be used to endorse or promote products
                     22:  *    derived from this software without specific prior written permission.
                     23:  *
                     24:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
                     25:  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
                     26:  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
                     27:  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
                     28:  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
                     29:  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
                     30:  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
                     31:  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
                     32:  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
                     33:  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
                     34:  */
                     35:
                     36: /*
                     37:  * Copyright (c) 2001 Wasabi Systems, Inc.
                     38:  * All rights reserved.
                     39:  *
                     40:  * Written by Frank van der Linden for Wasabi Systems, Inc.
                     41:  *
                     42:  * Redistribution and use in source and binary forms, with or without
                     43:  * modification, are permitted provided that the following conditions
                     44:  * are met:
                     45:  * 1. Redistributions of source code must retain the above copyright
                     46:  *    notice, this list of conditions and the following disclaimer.
                     47:  * 2. Redistributions in binary form must reproduce the above copyright
                     48:  *    notice, this list of conditions and the following disclaimer in the
                     49:  *    documentation and/or other materials provided with the distribution.
                     50:  * 3. All advertising materials mentioning features or use of this software
                     51:  *    must display the following acknowledgement:
                     52:  *      This product includes software developed for the NetBSD Project by
                     53:  *      Wasabi Systems, Inc.
                     54:  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
                     55:  *    or promote products derived from this software without specific prior
                     56:  *    written permission.
                     57:  *
                     58:  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
                     59:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     60:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     61:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
                     62:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     63:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     64:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     65:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     66:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     67:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     68:  * POSSIBILITY OF SUCH DAMAGE.
                     69:  */
                     70:
                     71: /*
                     72:  * pmap.h: see pmap.c for the history of this pmap module.
                     73:  */
                     74:
                     75: #ifndef        _AMD64_PMAP_H_
                     76: #define        _AMD64_PMAP_H_
                     77:
                     78: #ifndef _LOCORE
                     79: #include <machine/cpufunc.h>
                     80: #include <machine/pte.h>
                     81: #include <machine/segments.h>
                     82: #include <uvm/uvm_object.h>
                     83: #endif
                     84:
                     85: /*
                     86:  * The x86_64 pmap module closely resembles the i386 one. It uses
                     87:  * the same recursive entry scheme, and the same alternate area
                     88:  * trick for accessing non-current pmaps. See the i386 pmap.h
                     89:  * for a description. The obvious difference is that 3 extra
                     90:  * levels of page table need to be dealt with. The level 1 page
                     91:  * table pages are at:
                     92:  *
                     93:  * l1: 0x00007f8000000000 - 0x00007fffffffffff     (39 bits, needs PML4 entry)
                     94:  *
                     95:  * The alternate space is at:
                     96:  *
                     97:  * l1: 0xffffff8000000000 - 0xffffffffffffffff     (39 bits, needs PML4 entry)
                     98:  *
                     99:  * The rest is kept as physical pages in 3 UVM objects, and is
                    100:  * temporarily mapped for virtual access when needed.
                    101:  *
                    102:  * Note that address space is signed, so the layout for 48 bits is:
                    103:  *
                    104:  *  +---------------------------------+ 0xffffffffffffffff
                    105:  *  |                                 |
                    106:  *  |    alt.L1 table (PTE pages)     |
                    107:  *  |                                 |
                    108:  *  +---------------------------------+ 0xffffff8000000000
                    109:  *  ~                                 ~
                    110:  *  |                                 |
                    111:  *  |         Kernel Space            |
                    112:  *  |                                 |
                    113:  *  |                                 |
                    114:  *  +---------------------------------+ 0xffff800000000000 = 0x0000800000000000
                    115:  *  |                                 |
                    116:  *  |    L1 table (PTE pages)        |
                    117:  *  |                                 |
                    118:  *  +---------------------------------+ 0x00007f8000000000
                    119:  *  ~                                 ~
                    120:  *  |                                 |
                    121:  *  |         User Space              |
                    122:  *  |                                 |
                    123:  *  |                                 |
                    124:  *  +---------------------------------+ 0x0000000000000000
                    125:  *
                    126:  * In other words, there is a 'VA hole' at 0x0000800000000000 -
                    127:  * 0xffff800000000000 which will trap, just as on, for example,
                    128:  * sparcv9.
                    129:  *
                    130:  * The unused space can be used if needed, but it adds a little more
                    131:  * complexity to the calculations.
                    132:  */
                    133:
                    134: /*
                    135:  * The first generation of Hammer processors can use 48 bits of
                    136:  * virtual memory, and 40 bits of physical memory. This will be
                    137:  * more for later generations. These defines can be changed to
                    138:  * variable names containing the # of bits, extracted from an
                    139:  * extended cpuid instruction (variables are harder to use during
                    140:  * bootstrap, though)
                    141:  */
                    142: #define VIRT_BITS      48
                    143: #define PHYS_BITS      40
                    144:
                    145: /*
                    146:  * Mask to get rid of the sign-extended part of addresses.
                    147:  */
                    148: #define VA_SIGN_MASK           0xffff000000000000
                    149: #define VA_SIGN_NEG(va)                ((va) | VA_SIGN_MASK)
                    150: /*
                    151:  * XXXfvdl this one's not right.
                    152:  */
                    153: #define VA_SIGN_POS(va)                ((va) & ~VA_SIGN_MASK)
                    154:
                    155: #define L4_SLOT_PTE            255
                    156: #define L4_SLOT_KERN           256
                    157: #define L4_SLOT_KERNBASE       511
                    158: #define L4_SLOT_APTE           510
                    159: #define L4_SLOT_DIRECT         509
                    160: #define L4_SLOT_DIRECT_NC      508
                    161:
                    162: #define PDIR_SLOT_KERN         L4_SLOT_KERN
                    163: #define PDIR_SLOT_PTE          L4_SLOT_PTE
                    164: #define PDIR_SLOT_APTE         L4_SLOT_APTE
                    165: #define PDIR_SLOT_DIRECT       L4_SLOT_DIRECT
                    166: #define PDIR_SLOT_DIRECT_NC    L4_SLOT_DIRECT_NC
                    167:
                    168: /*
                    169:  * the following defines give the virtual addresses of various MMU
                    170:  * data structures:
                    171:  * PTE_BASE and APTE_BASE: the base VA of the linear PTE mappings
                    172:  * PTD_BASE and APTD_BASE: the base VA of the recursive mapping of the PTD
                    173:  * PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP
                    174:  *
                    175:  */
                    176:
                    177: #define PTE_BASE  ((pt_entry_t *) (L4_SLOT_PTE * NBPD_L4))
                    178: #define APTE_BASE ((pt_entry_t *) (VA_SIGN_NEG((L4_SLOT_APTE * NBPD_L4))))
                    179: #define PMAP_DIRECT_BASE       (VA_SIGN_NEG((L4_SLOT_DIRECT * NBPD_L4)))
                    180: #define PMAP_DIRECT_END                (VA_SIGN_NEG(((L4_SLOT_DIRECT + 1) * NBPD_L4)))
                    181: #define PMAP_DIRECT_BASE_NC    (VA_SIGN_NEG((L4_SLOT_DIRECT_NC * NBPD_L4)))
                    182: #define PMAP_DIRECT_END_NC     (VA_SIGN_NEG(((L4_SLOT_DIRECT_NC + 1) * NBPD_L4)))
                    183:
                    184: #define L1_BASE                PTE_BASE
                    185: #define AL1_BASE       APTE_BASE
                    186:
                    187: #define L2_BASE ((pd_entry_t *)((char *)L1_BASE + L4_SLOT_PTE * NBPD_L3))
                    188: #define L3_BASE ((pd_entry_t *)((char *)L2_BASE + L4_SLOT_PTE * NBPD_L2))
                    189: #define L4_BASE ((pd_entry_t *)((char *)L3_BASE + L4_SLOT_PTE * NBPD_L1))
                    190:
                    191: #define AL2_BASE ((pd_entry_t *)((char *)AL1_BASE + L4_SLOT_PTE * NBPD_L3))
                    192: #define AL3_BASE ((pd_entry_t *)((char *)AL2_BASE + L4_SLOT_PTE * NBPD_L2))
                    193: #define AL4_BASE ((pd_entry_t *)((char *)AL3_BASE + L4_SLOT_PTE * NBPD_L1))
                    194:
                    195: #define PDP_PDE                (L4_BASE + PDIR_SLOT_PTE)
                    196: #define APDP_PDE       (L4_BASE + PDIR_SLOT_APTE)
                    197:
                    198: #define PDP_BASE       L4_BASE
                    199: #define APDP_BASE      AL4_BASE
                    200:
                    201: #define NKL4_MAX_ENTRIES       (unsigned long)1
                    202: #define NKL3_MAX_ENTRIES       (unsigned long)(NKL4_MAX_ENTRIES * 512)
                    203: #define NKL2_MAX_ENTRIES       (unsigned long)(NKL3_MAX_ENTRIES * 512)
                    204: #define NKL1_MAX_ENTRIES       (unsigned long)(NKL2_MAX_ENTRIES * 512)
                    205:
                    206: #define NKL4_KIMG_ENTRIES      1
                    207: #define NKL3_KIMG_ENTRIES      1
                    208: #define NKL2_KIMG_ENTRIES      8
                    209:
                    210: #define NDML4_ENTRIES          1
                    211: #define NDML3_ENTRIES          1
                    212: #define NDML2_ENTRIES          4       /* 4GB */
                    213:
                    214: /*
                    215:  * Since kva space is below the kernel in its entirety, we start off
                    216:  * with zero entries on each level.
                    217:  */
                    218: #define NKL4_START_ENTRIES     0
                    219: #define NKL3_START_ENTRIES     0
                    220: #define NKL2_START_ENTRIES     0
                    221: #define NKL1_START_ENTRIES     0       /* XXX */
                    222:
                    223: #define NTOPLEVEL_PDES         (PAGE_SIZE / (sizeof (pd_entry_t)))
                    224:
                    225: #define KERNSPACE              (NKL4_ENTRIES * NBPD_L4)
                    226:
                    227: #define NPDPG                  (PAGE_SIZE / sizeof (pd_entry_t))
                    228:
                    229: #define ptei(VA)       (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
                    230:
                    231: /*
                    232:  * pl*_pi: index in the ptp page for a pde mapping a VA.
                    233:  * (pl*_i below is the index in the virtual array of all pdes per level)
                    234:  */
                    235: #define pl1_pi(VA)     (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
                    236: #define pl2_pi(VA)     (((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT)
                    237: #define pl3_pi(VA)     (((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT)
                    238: #define pl4_pi(VA)     (((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT)
                    239:
                    240: /*
                    241:  * pl*_i: generate index into pde/pte arrays in virtual space
                    242:  */
                    243: #define pl1_i(VA)      (((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT)
                    244: #define pl2_i(VA)      (((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT)
                    245: #define pl3_i(VA)      (((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT)
                    246: #define pl4_i(VA)      (((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT)
                    247: #define pl_i(va, lvl) \
                    248:         (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1])
                    249:
                    250: #define PTP_MASK_INITIALIZER   { L1_FRAME, L2_FRAME, L3_FRAME, L4_FRAME }
                    251: #define PTP_SHIFT_INITIALIZER  { L1_SHIFT, L2_SHIFT, L3_SHIFT, L4_SHIFT }
                    252: #define NKPTP_INITIALIZER      { NKL1_START_ENTRIES, NKL2_START_ENTRIES, \
                    253:                                  NKL3_START_ENTRIES, NKL4_START_ENTRIES }
                    254: #define NKPTPMAX_INITIALIZER   { NKL1_MAX_ENTRIES, NKL2_MAX_ENTRIES, \
                    255:                                  NKL3_MAX_ENTRIES, NKL4_MAX_ENTRIES }
                    256: #define NBPD_INITIALIZER       { NBPD_L1, NBPD_L2, NBPD_L3, NBPD_L4 }
                    257: #define PDES_INITIALIZER       { L2_BASE, L3_BASE, L4_BASE }
                    258: #define APDES_INITIALIZER      { AL2_BASE, AL3_BASE, AL4_BASE }
                    259:
                    260: /*
                    261:  * PTP macros:
                    262:  *   a PTP's index is the PD index of the PDE that points to it
                    263:  *   a PTP's offset is the byte-offset in the PTE space that this PTP is at
                    264:  *   a PTP's VA is the first VA mapped by that PTP
                    265:  *
                    266:  * note that PAGE_SIZE == number of bytes in a PTP (4096 bytes == 1024 entries)
                    267:  *           NBPD == number of bytes a PTP can map (4MB)
                    268:  */
                    269:
                    270: #define ptp_va2o(va, lvl)      (pl_i(va, (lvl)+1) * PAGE_SIZE)
                    271:
                    272: #define PTP_LEVELS     4
                    273:
                    274: /*
                    275:  * PG_AVAIL usage: we make use of the ignored bits of the PTE
                    276:  */
                    277:
                    278: #define PG_W           PG_AVAIL1       /* "wired" mapping */
                    279: #define PG_PVLIST      PG_AVAIL2       /* mapping has entry on pvlist */
                    280: /* PG_AVAIL3 not used */
                    281:
                    282: /*
                    283:  * Number of PTE's per cache line.  8 byte pte, 64-byte cache line
                    284:  * Used to avoid false sharing of cache lines.
                    285:  */
                    286: #define NPTECL         8
                    287:
                    288:
                    289: #if defined(_KERNEL) && !defined(_LOCORE)
                    290: /*
                    291:  * pmap data structures: see pmap.c for details of locking.
                    292:  */
                    293:
                    294: struct pmap;
                    295: typedef struct pmap *pmap_t;
                    296:
                    297: /*
                    298:  * we maintain a list of all non-kernel pmaps
                    299:  */
                    300:
                    301: LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
                    302:
                    303: /*
                    304:  * the pmap structure
                    305:  *
                    306:  * note that the pm_obj contains the simple_lock, the reference count,
                    307:  * page list, and number of PTPs within the pmap.
                    308:  *
                    309:  * pm_lock is the same as the spinlock for vm object 0. Changes to
                    310:  * the other objects may only be made if that lock has been taken
                    311:  * (the other object locks are only used when uvm_pagealloc is called)
                    312:  */
                    313:
                    314: struct pmap {
                    315:        struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */
                    316: #define        pm_lock pm_obj[0].vmobjlock
                    317: #define pm_obj_l1 pm_obj[0]
                    318: #define pm_obj_l2 pm_obj[1]
                    319: #define pm_obj_l3 pm_obj[2]
                    320:        LIST_ENTRY(pmap) pm_list;       /* list (lck by pm_list lock) */
                    321:        pd_entry_t *pm_pdir;            /* VA of PD (lck by object lock) */
                    322:        paddr_t pm_pdirpa;              /* PA of PD (read-only after create) */
                    323:        struct vm_page *pm_ptphint[PTP_LEVELS-1];
                    324:                                        /* pointer to a PTP in our pmap */
                    325:        struct pmap_statistics pm_stats;  /* pmap stats (lck by object lock) */
                    326:
                    327:        int pm_flags;                   /* see below */
                    328:
                    329:        union descriptor *pm_ldt;       /* user-set LDT */
                    330:        int pm_ldt_len;                 /* number of LDT entries */
                    331:        int pm_ldt_sel;                 /* LDT selector */
                    332:        u_int32_t pm_cpus;              /* mask of CPUs using pmap */
                    333: };
                    334:
                    335: /* pm_flags */
                    336: #define        PMF_USER_LDT    0x01    /* pmap has user-set LDT */
                    337:
                    338: /*
                    339:  * We keep mod/ref flags in struct vm_page->pg_flags.
                    340:  */
                    341: #define PG_PMAP_MOD    PG_PMAP0
                    342: #define PG_PMAP_REF    PG_PMAP1
                    343:
                    344: /*
                    345:  * for each managed physical page we maintain a list of <PMAP,VA>'s
                    346:  * which it is mapped at.
                    347:  */
                    348: struct pv_entry {                      /* locked by its list's pvh_lock */
                    349:        struct pv_entry *pv_next;       /* next entry */
                    350:        struct pmap *pv_pmap;           /* the pmap */
                    351:        vaddr_t pv_va;                  /* the virtual address */
                    352:        struct vm_page *pv_ptp;         /* the vm_page of the PTP */
                    353: };
                    354:
                    355: /*
                    356:  * pmap_remove_record: a record of VAs that have been unmapped, used to
                    357:  * flush TLB.  if we have more than PMAP_RR_MAX then we stop recording.
                    358:  */
                    359:
                    360: #define PMAP_RR_MAX    16      /* max of 16 pages (64K) */
                    361:
                    362: struct pmap_remove_record {
                    363:        int prr_npages;
                    364:        vaddr_t prr_vas[PMAP_RR_MAX];
                    365: };
                    366:
                    367: /*
                    368:  * global kernel variables
                    369:  */
                    370:
                    371: /* PTDpaddr: is the physical address of the kernel's PDP */
                    372: extern u_long PTDpaddr;
                    373:
                    374: extern struct pmap kernel_pmap_store;  /* kernel pmap */
                    375: extern int pmap_pg_g;                  /* do we support PG_G? */
                    376:
                    377: extern paddr_t ptp_masks[];
                    378: extern int ptp_shifts[];
                    379: extern long nkptp[], nbpd[], nkptpmax[];
                    380: extern pd_entry_t *pdes[];
                    381:
                    382: /*
                    383:  * macros
                    384:  */
                    385:
                    386: #define        pmap_kernel()                   (&kernel_pmap_store)
                    387: #define        pmap_resident_count(pmap)       ((pmap)->pm_stats.resident_count)
                    388: #define        pmap_wired_count(pmap)          ((pmap)->pm_stats.wired_count)
                    389: #define        pmap_update(pmap)               /* nothing (yet) */
                    390:
                    391: #define pmap_clear_modify(pg)          pmap_clear_attrs(pg, PG_M)
                    392: #define pmap_clear_reference(pg)       pmap_clear_attrs(pg, PG_U)
                    393: #define pmap_copy(DP,SP,D,L,S)
                    394: #define pmap_is_modified(pg)           pmap_test_attrs(pg, PG_M)
                    395: #define pmap_is_referenced(pg)         pmap_test_attrs(pg, PG_U)
                    396: #define pmap_move(DP,SP,D,L,S)
                    397: #define pmap_phys_address(ppn)         ptoa(ppn)
                    398: #define pmap_valid_entry(E)            ((E) & PG_V) /* is PDE or PTE valid? */
                    399:
                    400: #define pmap_proc_iflush(p,va,len)     /* nothing */
                    401: #define pmap_unuse_final(p)            /* nothing */
                    402:
                    403:
                    404: /*
                    405:  * prototypes
                    406:  */
                    407:
                    408: void           pmap_bootstrap(vaddr_t, paddr_t);
                    409: boolean_t      pmap_clear_attrs(struct vm_page *, unsigned long);
                    410: static void    pmap_page_protect(struct vm_page *, vm_prot_t);
                    411: void           pmap_page_remove (struct vm_page *);
                    412: static void    pmap_protect(struct pmap *, vaddr_t,
                    413:                                vaddr_t, vm_prot_t);
                    414: void           pmap_remove(struct pmap *, vaddr_t, vaddr_t);
                    415: boolean_t      pmap_test_attrs(struct vm_page *, unsigned);
                    416: static void    pmap_update_pg(vaddr_t);
                    417: static void    pmap_update_2pg(vaddr_t,vaddr_t);
                    418: void           pmap_write_protect(struct pmap *, vaddr_t,
                    419:                                vaddr_t, vm_prot_t);
                    420:
                    421: vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
                    422:
                    423: void   pmap_tlb_shootpage(struct pmap *, vaddr_t);
                    424: void   pmap_tlb_shootrange(struct pmap *, vaddr_t, vaddr_t);
                    425: void   pmap_tlb_shoottlb(void);
                    426: #ifdef MULTIPROCESSOR
                    427: void   pmap_tlb_shootwait(void);
                    428: #else
                    429: #define        pmap_tlb_shootwait()
                    430: #endif
                    431:
                    432: void   pmap_prealloc_lowmem_ptps(void);
                    433:
                    434: void   pagezero(vaddr_t);
                    435:
                    436: #define        PMAP_STEAL_MEMORY       /* enable pmap_steal_memory() */
                    437: #define PMAP_GROWKERNEL                /* turn on pmap_growkernel interface */
                    438:
                    439: /*
                    440:  * Do idle page zero'ing uncached to avoid polluting the cache.
                    441:  */
                    442: boolean_t      pmap_pageidlezero(struct vm_page *);
                    443: #define        PMAP_PAGEIDLEZERO(pg)   pmap_pageidlezero((pg))
                    444:
                    445: /*
                    446:  * inline functions
                    447:  */
                    448:
                    449: static __inline void
                    450: pmap_remove_all(struct pmap *pmap)
                    451: {
                    452:        /* Nothing. */
                    453: }
                    454:
                    455: /*
                    456:  * pmap_update_pg: flush one page from the TLB (or flush the whole thing
                    457:  *     if hardware doesn't support one-page flushing)
                    458:  */
                    459:
                    460: __inline static void
                    461: pmap_update_pg(va)
                    462:        vaddr_t va;
                    463: {
                    464:        invlpg(va);
                    465: }
                    466:
                    467: /*
                    468:  * pmap_update_2pg: flush two pages from the TLB
                    469:  */
                    470:
                    471: __inline static void
                    472: pmap_update_2pg(va, vb)
                    473:        vaddr_t va, vb;
                    474: {
                    475:        invlpg(va);
                    476:        invlpg(vb);
                    477: }
                    478:
                    479: /*
                    480:  * pmap_page_protect: change the protection of all recorded mappings
                    481:  *     of a managed page
                    482:  *
                    483:  * => this function is a frontend for pmap_page_remove/pmap_clear_attrs
                    484:  * => we only have to worry about making the page more protected.
                    485:  *     unprotecting a page is done on-demand at fault time.
                    486:  */
                    487:
                    488: __inline static void
                    489: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
                    490: {
                    491:        if ((prot & VM_PROT_WRITE) == 0) {
                    492:                if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
                    493:                        (void) pmap_clear_attrs(pg, PG_RW);
                    494:                } else {
                    495:                        pmap_page_remove(pg);
                    496:                }
                    497:        }
                    498: }
                    499:
                    500: /*
                    501:  * pmap_protect: change the protection of pages in a pmap
                    502:  *
                    503:  * => this function is a frontend for pmap_remove/pmap_write_protect
                    504:  * => we only have to worry about making the page more protected.
                    505:  *     unprotecting a page is done on-demand at fault time.
                    506:  */
                    507:
                    508: __inline static void
                    509: pmap_protect(pmap, sva, eva, prot)
                    510:        struct pmap *pmap;
                    511:        vaddr_t sva, eva;
                    512:        vm_prot_t prot;
                    513: {
                    514:        if ((prot & VM_PROT_WRITE) == 0) {
                    515:                if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
                    516:                        pmap_write_protect(pmap, sva, eva, prot);
                    517:                } else {
                    518:                        pmap_remove(pmap, sva, eva);
                    519:                }
                    520:        }
                    521: }
                    522:
                    523: /*
                    524:  * various address inlines
                    525:  *
                    526:  *  vtopte: return a pointer to the PTE mapping a VA, works only for
                    527:  *  user and PT addresses
                    528:  *
                    529:  *  kvtopte: return a pointer to the PTE mapping a kernel VA
                    530:  */
                    531:
                    532: static __inline pt_entry_t *
                    533: vtopte(vaddr_t va)
                    534: {
                    535:        return (PTE_BASE + pl1_i(va));
                    536: }
                    537:
                    538: static __inline pt_entry_t *
                    539: kvtopte(vaddr_t va)
                    540: {
                    541: #ifdef LARGEPAGES
                    542:        {
                    543:                pd_entry_t *pde;
                    544:
                    545:                pde = L1_BASE + pl2_i(va);
                    546:                if (*pde & PG_PS)
                    547:                        return ((pt_entry_t *)pde);
                    548:        }
                    549: #endif
                    550:
                    551:        return (PTE_BASE + pl1_i(va));
                    552: }
                    553:
                    554: #define pmap_pte_set(p, n)             x86_atomic_testset_u64(p, n)
                    555: #define pmap_pte_clearbits(p, b)       x86_atomic_clearbits_u64(p, b)
                    556: #define pmap_pte_setbits(p, b)         x86_atomic_setbits_u64(p, b)
                    557: #define pmap_cpu_has_pg_n()            (1)
                    558: #define pmap_cpu_has_invlpg            (1)
                    559:
                    560: vaddr_t        pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t);
                    561:
                    562: #if 0   /* XXXfvdl was USER_LDT, need to check if that can be supported */
                    563: void   pmap_ldt_cleanup(struct proc *);
                    564: #define        PMAP_FORK
                    565: #endif /* USER_LDT */
                    566:
                    567: #define PMAP_DIRECT_MAP(pa)    ((vaddr_t)PMAP_DIRECT_BASE + pa)
                    568: #define PMAP_DIRECT_UNMAP(va)  ((paddr_t)va - PMAP_DIRECT_BASE)
                    569: #define pmap_map_direct(pg)    PMAP_DIRECT_MAP(VM_PAGE_TO_PHYS(pg))
                    570: #define pmap_unmap_direct(va)  PHYS_TO_VM_PAGE(PMAP_DIRECT_UNMAP(va))
                    571:
                    572: #define PMAP_DIRECT_NC_MAP(pa) ((vaddr_t)PMAP_DIRECT_BASE_NC + pa)
                    573: #define PMAP_DIRECT_NC_UNMAP(va) ((paddr_t)va - PMAP_DIRECT_BASE_NC)
                    574: #define pmap_map_nc_direct(pg)         PMAP_DIRECT_NC_MAP(VM_PAGE_TO_PHYS(pg))
                    575: #define pmap_unmap_nc_direct(va)       PHYS_TO_VM_PAGE(PMAP_DIRECT_NC_UNMAP(va))
                    576:
                    577: #define __HAVE_PMAP_DIRECT
                    578:
                    579: #endif /* _KERNEL && !_LOCORE */
                    580: #endif /* _AMD64_PMAP_H_ */

CVSweb