diff options
author | Neel Natu <neel@FreeBSD.org> | 2014-09-10 03:13:40 +0000 |
---|---|---|
committer | Neel Natu <neel@FreeBSD.org> | 2014-09-10 03:13:40 +0000 |
commit | 238b6cb761de655f2f6753884b2781b820908b06 (patch) | |
tree | 93243a1f85454582b5a98ba8201f1f3ade8f6e01 /sys/amd64/vmm/amd | |
parent | e5397c9fdd7bac013dfec8a07251465f02734d4c (diff) | |
download | src-238b6cb761de655f2f6753884b2781b820908b06.tar.gz src-238b6cb761de655f2f6753884b2781b820908b06.zip |
Allow intercepts and irq fields to be cached by the VMCB.
Provide APIs svm_enable_intercept()/svm_disable_intercept() to add/delete
VMCB intercepts. These APIs ensure that the VMCB state cache is invalidated
when intercepts are modified.
Each intercept is identified as a (index,bitmask) tuple. For e.g., the
VINTR intercept is identified as (VMCB_CTRL1_INTCPT,VMCB_INTCPT_VINTR).
The first 20 bytes in control area that are used to enable intercepts
are represented as 'uint32_t intercept[5]' in 'struct vmcb_ctrl'.
Modify svm_setcap() and svm_getcap() to use the new APIs.
Discussed with: Anish Gupta (akgupt3@gmail.com)
Notes
Notes:
svn path=/projects/bhyve_svm/; revision=271348
Diffstat (limited to 'sys/amd64/vmm/amd')
-rw-r--r-- | sys/amd64/vmm/amd/svm.c | 207 | ||||
-rw-r--r-- | sys/amd64/vmm/amd/vmcb.h | 43 |
2 files changed, 133 insertions, 117 deletions
diff --git a/sys/amd64/vmm/amd/svm.c b/sys/amd64/vmm/amd/svm.c index 6946d5cb65cd..584256b05acb 100644 --- a/sys/amd64/vmm/amd/svm.c +++ b/sys/amd64/vmm/amd/svm.c @@ -80,8 +80,11 @@ __FBSDID("$FreeBSD$"); #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ -#define VMCB_CACHE_DEFAULT \ - (VMCB_CACHE_ASID | VMCB_CACHE_IOPM | VMCB_CACHE_NP) +#define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ + VMCB_CACHE_IOPM | \ + VMCB_CACHE_I | \ + VMCB_CACHE_TPR | \ + VMCB_CACHE_NP) MALLOC_DEFINE(M_SVM, "svm", "svm"); MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); @@ -394,13 +397,61 @@ vcpu_set_dirty(struct svm_softc *sc, int vcpu, uint32_t dirtybits) vcpustate->dirty |= dirtybits; } +static __inline int +svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) +{ + struct vmcb_ctrl *ctrl; + + KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); + + ctrl = svm_get_vmcb_ctrl(sc, vcpu); + return (ctrl->intercept[idx] & bitmask ? 1 : 0); +} + +static __inline void +svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask, + int enabled) +{ + struct vmcb_ctrl *ctrl; + uint32_t oldval; + + KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); + + ctrl = svm_get_vmcb_ctrl(sc, vcpu); + oldval = ctrl->intercept[idx]; + + if (enabled) + ctrl->intercept[idx] |= bitmask; + else + ctrl->intercept[idx] &= ~bitmask; + + if (ctrl->intercept[idx] != oldval) { + vcpu_set_dirty(sc, vcpu, VMCB_CACHE_I); + VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " + "from %#x to %#x", idx, oldval, ctrl->intercept[idx]); + } +} + +static __inline void +svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) +{ + svm_set_intercept(sc, vcpu, off, bitmask, 0); +} + +static __inline void +svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) +{ + svm_set_intercept(sc, vcpu, off, bitmask, 1); +} + static void vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, uint64_t msrpm_base_pa, uint64_t np_pml4) { struct vmcb_ctrl *ctrl; struct vmcb_state *state; - uint16_t cr_shadow; + uint32_t mask; + int n; ctrl = svm_get_vmcb_ctrl(sc, vcpu); state = svm_get_vmcb_state(sc, vcpu); @@ -416,30 +467,35 @@ vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, * Intercept accesses to the control registers that are not shadowed * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. */ - cr_shadow = BIT(0) | BIT(2) | BIT(3) | BIT(4) | BIT(8); - ctrl->cr_write = ctrl->cr_read = ~cr_shadow; + for (n = 0; n < 16; n++) { + mask = (BIT(n) << 16) | BIT(n); + if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) + svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); + else + svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); + } /* Intercept Machine Check exceptions. */ - ctrl->exception = BIT(IDT_MC); + svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ - ctrl->ctrl1 = VMCB_INTCPT_IO | - VMCB_INTCPT_MSR | - VMCB_INTCPT_HLT | - VMCB_INTCPT_CPUID | - VMCB_INTCPT_INTR | - VMCB_INTCPT_VINTR | - VMCB_INTCPT_INIT | - VMCB_INTCPT_NMI | - VMCB_INTCPT_SMI | - VMCB_INTCPT_FERR_FREEZE | - VMCB_INTCPT_SHUTDOWN; + svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); + svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); + svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_HLT); + svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); + svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); + svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); + svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); + svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); + svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); + svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, + VMCB_INTCPT_FERR_FREEZE); /* * From section "Canonicalization and Consistency Checks" in APMv2 * the VMRUN intercept bit must be set to pass the consistency check. */ - ctrl->ctrl2 = VMCB_INTCPT_VMRUN; + svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); /* * The ASID will be set to a non-zero value just before VMRUN. @@ -670,7 +726,7 @@ svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) struct svm_regctx *regs; struct vm_inout_str *vis; uint64_t info1; - + state = svm_get_vmcb_state(svm_sc, vcpu); ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); regs = svm_get_guest_regctx(svm_sc, vcpu); @@ -1725,99 +1781,58 @@ svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc) static int svm_setcap(void *arg, int vcpu, int type, int val) { - struct svm_softc *svm_sc; - struct vmcb_ctrl *ctrl; - int ret = ENOENT; - - svm_sc = arg; - KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); - - ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); + struct svm_softc *sc; + int error; + sc = arg; + error = 0; switch (type) { - case VM_CAP_HALT_EXIT: - if (val) - ctrl->ctrl1 |= VMCB_INTCPT_HLT; - else - ctrl->ctrl1 &= ~VMCB_INTCPT_HLT; - ret = 0; - VCPU_CTR1(svm_sc->vm, vcpu, "SVM:Set_gap:Halt exit %s.\n", - val ? "enabled": "disabled"); - break; - - case VM_CAP_PAUSE_EXIT: - if (val) - ctrl->ctrl1 |= VMCB_INTCPT_PAUSE; - else - ctrl->ctrl1 &= ~VMCB_INTCPT_PAUSE; - ret = 0; - VCPU_CTR1(svm_sc->vm, vcpu, "SVM:Set_gap:Pause exit %s.\n", - val ? "enabled": "disabled"); - break; - - case VM_CAP_MTRAP_EXIT: - if (val) - ctrl->exception |= BIT(IDT_MC); - else - ctrl->exception &= ~BIT(IDT_MC); - ret = 0; - VCPU_CTR1(svm_sc->vm, vcpu, "SVM:Set_gap:MC exit %s.\n", - val ? "enabled": "disabled"); - break; - - case VM_CAP_UNRESTRICTED_GUEST: - /* SVM doesn't need special capability for SMP.*/ - VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Set_gap:Unrestricted " - "always enabled.\n"); - ret = 0; - break; - - default: - break; - } - - return (ret); + case VM_CAP_HALT_EXIT: + svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, + VMCB_INTCPT_HLT, val); + break; + case VM_CAP_PAUSE_EXIT: + svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, + VMCB_INTCPT_PAUSE, val); + break; + case VM_CAP_UNRESTRICTED_GUEST: + /* Unrestricted guest execution cannot be disabled in SVM */ + if (val == 0) + error = EINVAL; + break; + default: + error = ENOENT; + break; + } + return (error); } static int svm_getcap(void *arg, int vcpu, int type, int *retval) { - struct svm_softc *svm_sc; - struct vmcb_ctrl *ctrl; - - svm_sc = arg; - KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu)); + struct svm_softc *sc; + int error; - ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); + sc = arg; + error = 0; switch (type) { - case VM_CAP_HALT_EXIT: - *retval = (ctrl->ctrl1 & VMCB_INTCPT_HLT) ? 1 : 0; - VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_cap:Halt exit %s.\n", - *retval ? "enabled": "disabled"); + case VM_CAP_HALT_EXIT: + *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, + VMCB_INTCPT_HLT); break; - - case VM_CAP_PAUSE_EXIT: - *retval = (ctrl->ctrl1 & VMCB_INTCPT_PAUSE) ? 1 : 0; - VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_cap:Pause exit %s.\n", - *retval ? "enabled": "disabled"); + case VM_CAP_PAUSE_EXIT: + *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, + VMCB_INTCPT_PAUSE); break; - - case VM_CAP_MTRAP_EXIT: - *retval = (ctrl->exception & BIT(IDT_MC)) ? 1 : 0; - VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_cap:MC exit %s.\n", - *retval ? "enabled": "disabled"); - break; - case VM_CAP_UNRESTRICTED_GUEST: - VCPU_CTR0(svm_sc->vm, vcpu, "SVM:get_cap:Unrestricted.\n"); - *retval = 1; + *retval = 1; /* unrestricted guest is always enabled */ break; - default: + default: + error = ENOENT; break; } - - return (0); + return (error); } static struct vlapic * diff --git a/sys/amd64/vmm/amd/vmcb.h b/sys/amd64/vmm/amd/vmcb.h index fa7d12fd1e50..ac24718f2ae4 100644 --- a/sys/amd64/vmm/amd/vmcb.h +++ b/sys/amd64/vmm/amd/vmcb.h @@ -34,7 +34,14 @@ * Layout of VMCB: AMD64 Programmer's Manual Vol2, Appendix B */ -/* VMCB Control offset 0xC */ +/* vmcb_ctrl->intercept[] array indices */ +#define VMCB_CR_INTCPT 0 +#define VMCB_DR_INTCPT 1 +#define VMCB_EXC_INTCPT 2 +#define VMCB_CTRL1_INTCPT 3 +#define VMCB_CTRL2_INTCPT 4 + +/* intercept[VMCB_CTRL1_INTCPT] fields */ #define VMCB_INTCPT_INTR BIT(0) #define VMCB_INTCPT_NMI BIT(1) #define VMCB_INTCPT_SMI BIT(2) @@ -68,7 +75,7 @@ #define VMCB_INTCPT_FERR_FREEZE BIT(30) #define VMCB_INTCPT_SHUTDOWN BIT(31) -/* VMCB Control offset 0x10 */ +/* intercept[VMCB_CTRL2_INTCPT] fields */ #define VMCB_INTCPT_VMRUN BIT(0) #define VMCB_INTCPT_VMMCALL BIT(1) #define VMCB_INTCPT_VMLOAD BIT(2) @@ -91,18 +98,18 @@ #define VMCB_TLB_FLUSH_GUEST_NONGLOBAL 7 /* Flush guest non-PG entries */ /* VMCB state caching */ -#define VMCB_CACHE_NONE 0 /* No caching */ -#define VMCB_CACHE_I BIT(0) /* Cache vectors, TSC offset */ -#define VMCB_CACHE_IOPM BIT(1) /* I/O and MSR permission */ -#define VMCB_CACHE_ASID BIT(2) /* ASID */ -#define VMCB_CACHE_TPR BIT(3) /* V_TPR to V_INTR_VECTOR */ -#define VMCB_CACHE_NP BIT(4) /* Nested Paging */ -#define VMCB_CACHE_CR BIT(5) /* CR0, CR3, CR4 & EFER */ -#define VMCB_CACHE_DR BIT(6) /* Debug registers */ -#define VMCB_CACHE_DT BIT(7) /* GDT/IDT */ -#define VMCB_CACHE_SEG BIT(8) /* User segments, CPL */ -#define VMCB_CACHE_CR2 BIT(9) /* page fault address */ -#define VMCB_CACHE_LBR BIT(10) /* Last branch */ +#define VMCB_CACHE_NONE 0 /* No caching */ +#define VMCB_CACHE_I BIT(0) /* Intercept, TSC off, Pause filter */ +#define VMCB_CACHE_IOPM BIT(1) /* I/O and MSR permission */ +#define VMCB_CACHE_ASID BIT(2) /* ASID */ +#define VMCB_CACHE_TPR BIT(3) /* V_TPR to V_INTR_VECTOR */ +#define VMCB_CACHE_NP BIT(4) /* Nested Paging */ +#define VMCB_CACHE_CR BIT(5) /* CR0, CR3, CR4 & EFER */ +#define VMCB_CACHE_DR BIT(6) /* Debug registers */ +#define VMCB_CACHE_DT BIT(7) /* GDT/IDT */ +#define VMCB_CACHE_SEG BIT(8) /* User segments, CPL */ +#define VMCB_CACHE_CR2 BIT(9) /* page fault address */ +#define VMCB_CACHE_LBR BIT(10) /* Last branch */ /* VMCB control event injection */ #define VMCB_EVENTINJ_EC_VALID BIT(11) /* Error Code valid */ @@ -175,13 +182,7 @@ CTASSERT(sizeof(struct vmcb_segment) == 16); /* VMCB control area - padded up to 1024 bytes */ struct vmcb_ctrl { - uint16_t cr_read; /* Offset 0, CR0-15 read/write */ - uint16_t cr_write; - uint16_t dr_read; /* Offset 4, DR0-DR15 */ - uint16_t dr_write; - uint32_t exception; /* Offset 8, bit mask for exceptions. */ - uint32_t ctrl1; /* Offset 0xC, intercept events1 */ - uint32_t ctrl2; /* Offset 0x10, intercept event2 */ + uint32_t intercept[5]; /* all intercepts */ uint8_t pad1[0x28]; /* Offsets 0x14-0x3B are reserved. */ uint16_t pause_filthresh; /* Offset 0x3C, PAUSE filter threshold */ uint16_t pause_filcnt; /* Offset 0x3E, PAUSE filter count */ |