aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/include/vmm.h33
-rw-r--r--sys/amd64/include/vmm_instruction_emul.h21
-rw-r--r--sys/amd64/vmm/vmm_instruction_emul.c314
3 files changed, 162 insertions, 206 deletions
diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
index 4d6242a8134d..c3d1aa4f3866 100644
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -35,6 +35,7 @@
#include <sys/sdt.h>
#include <x86/segments.h>
+struct vcpu;
struct vm_snapshot_meta;
#ifdef _KERNEL
@@ -143,7 +144,6 @@ enum x2apic_state {
#ifdef _KERNEL
CTASSERT(VM_MAX_NAMELEN >= VM_MIN_NAMELEN);
-struct vcpu;
struct vm;
struct vm_exception;
struct seg_desc;
@@ -762,7 +762,6 @@ struct vm_exit {
};
/* APIs to inject faults into the guest */
-#ifdef _KERNEL
void vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid,
int errcode);
@@ -791,35 +790,5 @@ vm_inject_ss(struct vcpu *vcpu, int errcode)
}
void vm_inject_pf(struct vcpu *vcpu, int error_code, uint64_t cr2);
-#else
-void vm_inject_fault(void *vm, int vcpuid, int vector, int errcode_valid,
- int errcode);
-
-static __inline void
-vm_inject_ud(void *vm, int vcpuid)
-{
- vm_inject_fault(vm, vcpuid, IDT_UD, 0, 0);
-}
-
-static __inline void
-vm_inject_gp(void *vm, int vcpuid)
-{
- vm_inject_fault(vm, vcpuid, IDT_GP, 1, 0);
-}
-
-static __inline void
-vm_inject_ac(void *vm, int vcpuid, int errcode)
-{
- vm_inject_fault(vm, vcpuid, IDT_AC, 1, errcode);
-}
-
-static __inline void
-vm_inject_ss(void *vm, int vcpuid, int errcode)
-{
- vm_inject_fault(vm, vcpuid, IDT_SS, 1, errcode);
-}
-
-void vm_inject_pf(void *vm, int vcpuid, int error_code, uint64_t cr2);
-#endif
#endif /* _VMM_H_ */
diff --git a/sys/amd64/include/vmm_instruction_emul.h b/sys/amd64/include/vmm_instruction_emul.h
index 082405971fe4..49f09625ab8e 100644
--- a/sys/amd64/include/vmm_instruction_emul.h
+++ b/sys/amd64/include/vmm_instruction_emul.h
@@ -34,25 +34,12 @@
#include <sys/mman.h>
/*
- * Allow for different arguments to identify vCPUs in userspace vs the
- * kernel. Eventually we should add struct vcpu in userland and
- * always use the kernel arguments removing these macros.
- */
-#ifdef _KERNEL
-#define VCPU_DECL struct vcpu *vcpu
-#define VCPU_ARGS vcpu
-#else
-#define VCPU_DECL void *vm, int vcpuid
-#define VCPU_ARGS vm, vcpuid
-#endif
-
-/*
* Callback functions to read and write memory regions.
*/
-typedef int (*mem_region_read_t)(VCPU_DECL, uint64_t gpa,
+typedef int (*mem_region_read_t)(struct vcpu *vcpu, uint64_t gpa,
uint64_t *rval, int rsize, void *arg);
-typedef int (*mem_region_write_t)(VCPU_DECL, uint64_t gpa,
+typedef int (*mem_region_write_t)(struct vcpu *vcpu, uint64_t gpa,
uint64_t wval, int wsize, void *arg);
/*
@@ -66,11 +53,11 @@ typedef int (*mem_region_write_t)(VCPU_DECL, uint64_t gpa,
* 'struct vmctx *' when called from user context.
* s
*/
-int vmm_emulate_instruction(VCPU_DECL, uint64_t gpa, struct vie *vie,
+int vmm_emulate_instruction(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
struct vm_guest_paging *paging, mem_region_read_t mrr,
mem_region_write_t mrw, void *mrarg);
-int vie_update_register(VCPU_DECL, enum vm_reg_name reg,
+int vie_update_register(struct vcpu *vcpu, enum vm_reg_name reg,
uint64_t val, int size);
/*
diff --git a/sys/amd64/vmm/vmm_instruction_emul.c b/sys/amd64/vmm/vmm_instruction_emul.c
index 32791762c9fe..de24d4bf550a 100644
--- a/sys/amd64/vmm/vmm_instruction_emul.c
+++ b/sys/amd64/vmm/vmm_instruction_emul.c
@@ -290,11 +290,11 @@ static uint64_t size2mask[] = {
};
static int
-vie_read_register(VCPU_DECL, enum vm_reg_name reg, uint64_t *rval)
+vie_read_register(struct vcpu *vcpu, enum vm_reg_name reg, uint64_t *rval)
{
int error;
- error = vm_get_register(VCPU_ARGS, reg, rval);
+ error = vm_get_register(vcpu, reg, rval);
return (error);
}
@@ -326,14 +326,14 @@ vie_calc_bytereg(struct vie *vie, enum vm_reg_name *reg, int *lhbr)
}
static int
-vie_read_bytereg(VCPU_DECL, struct vie *vie, uint8_t *rval)
+vie_read_bytereg(struct vcpu *vcpu, struct vie *vie, uint8_t *rval)
{
uint64_t val;
int error, lhbr;
enum vm_reg_name reg;
vie_calc_bytereg(vie, &reg, &lhbr);
- error = vm_get_register(VCPU_ARGS, reg, &val);
+ error = vm_get_register(vcpu, reg, &val);
/*
* To obtain the value of a legacy high byte register shift the
@@ -347,14 +347,14 @@ vie_read_bytereg(VCPU_DECL, struct vie *vie, uint8_t *rval)
}
static int
-vie_write_bytereg(VCPU_DECL, struct vie *vie, uint8_t byte)
+vie_write_bytereg(struct vcpu *vcpu, struct vie *vie, uint8_t byte)
{
uint64_t origval, val, mask;
int error, lhbr;
enum vm_reg_name reg;
vie_calc_bytereg(vie, &reg, &lhbr);
- error = vm_get_register(VCPU_ARGS, reg, &origval);
+ error = vm_get_register(vcpu, reg, &origval);
if (error == 0) {
val = byte;
mask = 0xff;
@@ -367,13 +367,13 @@ vie_write_bytereg(VCPU_DECL, struct vie *vie, uint8_t byte)
mask <<= 8;
}
val |= origval & ~mask;
- error = vm_set_register(VCPU_ARGS, reg, val);
+ error = vm_set_register(vcpu, reg, val);
}
return (error);
}
int
-vie_update_register(VCPU_DECL, enum vm_reg_name reg,
+vie_update_register(struct vcpu *vcpu, enum vm_reg_name reg,
uint64_t val, int size)
{
int error;
@@ -382,7 +382,7 @@ vie_update_register(VCPU_DECL, enum vm_reg_name reg,
switch (size) {
case 1:
case 2:
- error = vie_read_register(VCPU_ARGS, reg, &origval);
+ error = vie_read_register(vcpu, reg, &origval);
if (error)
return (error);
val &= size2mask[size];
@@ -397,7 +397,7 @@ vie_update_register(VCPU_DECL, enum vm_reg_name reg,
return (EINVAL);
}
- error = vm_set_register(VCPU_ARGS, reg, val);
+ error = vm_set_register(vcpu, reg, val);
return (error);
}
@@ -509,7 +509,7 @@ getandflags(int opsize, uint64_t x, uint64_t y)
}
static int
-emulate_mov(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_mov(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
{
int error, size;
@@ -528,9 +528,9 @@ emulate_mov(VCPU_DECL, uint64_t gpa, struct vie *vie,
* REX + 88/r: mov r/m8, r8 (%ah, %ch, %dh, %bh not available)
*/
size = 1; /* override for byte operation */
- error = vie_read_bytereg(VCPU_ARGS, vie, &byte);
+ error = vie_read_bytereg(vcpu, vie, &byte);
if (error == 0)
- error = memwrite(VCPU_ARGS, gpa, byte, size, arg);
+ error = memwrite(vcpu, gpa, byte, size, arg);
break;
case 0x89:
/*
@@ -540,10 +540,10 @@ emulate_mov(VCPU_DECL, uint64_t gpa, struct vie *vie,
* REX.W + 89/r mov r/m64, r64
*/
reg = gpr_map[vie->reg];
- error = vie_read_register(VCPU_ARGS, reg, &val);
+ error = vie_read_register(vcpu, reg, &val);
if (error == 0) {
val &= size2mask[size];
- error = memwrite(VCPU_ARGS, gpa, val, size, arg);
+ error = memwrite(vcpu, gpa, val, size, arg);
}
break;
case 0x8A:
@@ -553,9 +553,9 @@ emulate_mov(VCPU_DECL, uint64_t gpa, struct vie *vie,
* REX + 8A/r: mov r8, r/m8
*/
size = 1; /* override for byte operation */
- error = memread(VCPU_ARGS, gpa, &val, size, arg);
+ error = memread(vcpu, gpa, &val, size, arg);
if (error == 0)
- error = vie_write_bytereg(VCPU_ARGS, vie, val);
+ error = vie_write_bytereg(vcpu, vie, val);
break;
case 0x8B:
/*
@@ -564,10 +564,10 @@ emulate_mov(VCPU_DECL, uint64_t gpa, struct vie *vie,
* 8B/r: mov r32, r/m32
* REX.W 8B/r: mov r64, r/m64
*/
- error = memread(VCPU_ARGS, gpa, &val, size, arg);
+ error = memread(vcpu, gpa, &val, size, arg);
if (error == 0) {
reg = gpr_map[vie->reg];
- error = vie_update_register(VCPU_ARGS, reg, val, size);
+ error = vie_update_register(vcpu, reg, val, size);
}
break;
case 0xA1:
@@ -577,10 +577,10 @@ emulate_mov(VCPU_DECL, uint64_t gpa, struct vie *vie,
* A1: mov EAX, moffs32
* REX.W + A1: mov RAX, moffs64
*/
- error = memread(VCPU_ARGS, gpa, &val, size, arg);
+ error = memread(vcpu, gpa, &val, size, arg);
if (error == 0) {
reg = VM_REG_GUEST_RAX;
- error = vie_update_register(VCPU_ARGS, reg, val, size);
+ error = vie_update_register(vcpu, reg, val, size);
}
break;
case 0xA3:
@@ -590,10 +590,10 @@ emulate_mov(VCPU_DECL, uint64_t gpa, struct vie *vie,
* A3: mov moffs32, EAX
* REX.W + A3: mov moffs64, RAX
*/
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RAX, &val);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RAX, &val);
if (error == 0) {
val &= size2mask[size];
- error = memwrite(VCPU_ARGS, gpa, val, size, arg);
+ error = memwrite(vcpu, gpa, val, size, arg);
}
break;
case 0xC6:
@@ -603,7 +603,7 @@ emulate_mov(VCPU_DECL, uint64_t gpa, struct vie *vie,
* REX + C6/0 mov r/m8, imm8
*/
size = 1; /* override for byte operation */
- error = memwrite(VCPU_ARGS, gpa, vie->immediate, size, arg);
+ error = memwrite(vcpu, gpa, vie->immediate, size, arg);
break;
case 0xC7:
/*
@@ -613,7 +613,7 @@ emulate_mov(VCPU_DECL, uint64_t gpa, struct vie *vie,
* REX.W + C7/0 mov r/m64, imm32 (sign-extended to 64-bits)
*/
val = vie->immediate & size2mask[size];
- error = memwrite(VCPU_ARGS, gpa, val, size, arg);
+ error = memwrite(vcpu, gpa, val, size, arg);
break;
default:
break;
@@ -623,7 +623,7 @@ emulate_mov(VCPU_DECL, uint64_t gpa, struct vie *vie,
}
static int
-emulate_movx(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_movx(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg)
{
int error, size;
@@ -645,7 +645,7 @@ emulate_movx(VCPU_DECL, uint64_t gpa, struct vie *vie,
*/
/* get the first operand */
- error = memread(VCPU_ARGS, gpa, &val, 1, arg);
+ error = memread(vcpu, gpa, &val, 1, arg);
if (error)
break;
@@ -656,7 +656,7 @@ emulate_movx(VCPU_DECL, uint64_t gpa, struct vie *vie,
val = (uint8_t)val;
/* write the result */
- error = vie_update_register(VCPU_ARGS, reg, val, size);
+ error = vie_update_register(vcpu, reg, val, size);
break;
case 0xB7:
/*
@@ -666,7 +666,7 @@ emulate_movx(VCPU_DECL, uint64_t gpa, struct vie *vie,
* 0F B7/r movzx r32, r/m16
* REX.W + 0F B7/r movzx r64, r/m16
*/
- error = memread(VCPU_ARGS, gpa, &val, 2, arg);
+ error = memread(vcpu, gpa, &val, 2, arg);
if (error)
return (error);
@@ -675,7 +675,7 @@ emulate_movx(VCPU_DECL, uint64_t gpa, struct vie *vie,
/* zero-extend word */
val = (uint16_t)val;
- error = vie_update_register(VCPU_ARGS, reg, val, size);
+ error = vie_update_register(vcpu, reg, val, size);
break;
case 0xBE:
/*
@@ -688,7 +688,7 @@ emulate_movx(VCPU_DECL, uint64_t gpa, struct vie *vie,
*/
/* get the first operand */
- error = memread(VCPU_ARGS, gpa, &val, 1, arg);
+ error = memread(vcpu, gpa, &val, 1, arg);
if (error)
break;
@@ -699,7 +699,7 @@ emulate_movx(VCPU_DECL, uint64_t gpa, struct vie *vie,
val = (int8_t)val;
/* write the result */
- error = vie_update_register(VCPU_ARGS, reg, val, size);
+ error = vie_update_register(vcpu, reg, val, size);
break;
default:
break;
@@ -711,7 +711,7 @@ emulate_movx(VCPU_DECL, uint64_t gpa, struct vie *vie,
* Helper function to calculate and validate a linear address.
*/
static int
-get_gla(VCPU_DECL, struct vie *vie __unused,
+get_gla(struct vcpu *vcpu, struct vie *vie __unused,
struct vm_guest_paging *paging, int opsize, int addrsize, int prot,
enum vm_reg_name seg, enum vm_reg_name gpr, uint64_t *gla, int *fault)
{
@@ -719,39 +719,39 @@ get_gla(VCPU_DECL, struct vie *vie __unused,
uint64_t cr0, val, rflags;
int error __diagused;
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_CR0, &cr0);
+ error = vie_read_register(vcpu, VM_REG_GUEST_CR0, &cr0);
KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error));
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
- error = vm_get_seg_desc(VCPU_ARGS, seg, &desc);
+ error = vm_get_seg_desc(vcpu, seg, &desc);
KASSERT(error == 0, ("%s: error %d getting segment descriptor %d",
__func__, error, seg));
- error = vie_read_register(VCPU_ARGS, gpr, &val);
+ error = vie_read_register(vcpu, gpr, &val);
KASSERT(error == 0, ("%s: error %d getting register %d", __func__,
error, gpr));
if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val, opsize,
addrsize, prot, gla)) {
if (seg == VM_REG_GUEST_SS)
- vm_inject_ss(VCPU_ARGS, 0);
+ vm_inject_ss(vcpu, 0);
else
- vm_inject_gp(VCPU_ARGS);
+ vm_inject_gp(vcpu);
goto guest_fault;
}
if (vie_canonical_check(paging->cpu_mode, *gla)) {
if (seg == VM_REG_GUEST_SS)
- vm_inject_ss(VCPU_ARGS, 0);
+ vm_inject_ss(vcpu, 0);
else
- vm_inject_gp(VCPU_ARGS);
+ vm_inject_gp(vcpu);
goto guest_fault;
}
if (vie_alignment_check(paging->cpl, opsize, cr0, rflags, *gla)) {
- vm_inject_ac(VCPU_ARGS, 0);
+ vm_inject_ac(vcpu, 0);
goto guest_fault;
}
@@ -764,7 +764,7 @@ guest_fault:
}
static int
-emulate_movs(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_movs(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
struct vm_guest_paging *paging, mem_region_read_t memread,
mem_region_write_t memwrite, void *arg)
{
@@ -791,7 +791,7 @@ emulate_movs(VCPU_DECL, uint64_t gpa, struct vie *vie,
repeat = vie->repz_present | vie->repnz_present;
if (repeat) {
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RCX, &rcx);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RCX, &rcx);
KASSERT(!error, ("%s: error %d getting rcx", __func__, error));
/*
@@ -821,12 +821,12 @@ emulate_movs(VCPU_DECL, uint64_t gpa, struct vie *vie,
*/
seg = vie->segment_override ? vie->segment_register : VM_REG_GUEST_DS;
- error = get_gla(VCPU_ARGS, vie, paging, opsize, vie->addrsize,
+ error = get_gla(vcpu, vie, paging, opsize, vie->addrsize,
PROT_READ, seg, VM_REG_GUEST_RSI, &srcaddr, &fault);
if (error || fault)
goto done;
- error = vm_copy_setup(VCPU_ARGS, paging, srcaddr, opsize, PROT_READ,
+ error = vm_copy_setup(vcpu, paging, srcaddr, opsize, PROT_READ,
copyinfo, nitems(copyinfo), &fault);
if (error == 0) {
if (fault)
@@ -837,7 +837,7 @@ emulate_movs(VCPU_DECL, uint64_t gpa, struct vie *vie,
*/
vm_copyin(copyinfo, &val, opsize);
vm_copy_teardown(copyinfo, nitems(copyinfo));
- error = memwrite(VCPU_ARGS, gpa, val, opsize, arg);
+ error = memwrite(vcpu, gpa, val, opsize, arg);
if (error)
goto done;
} else {
@@ -846,13 +846,13 @@ emulate_movs(VCPU_DECL, uint64_t gpa, struct vie *vie,
* if 'srcaddr' is in the mmio space.
*/
- error = get_gla(VCPU_ARGS, vie, paging, opsize, vie->addrsize,
+ error = get_gla(vcpu, vie, paging, opsize, vie->addrsize,
PROT_WRITE, VM_REG_GUEST_ES, VM_REG_GUEST_RDI, &dstaddr,
&fault);
if (error || fault)
goto done;
- error = vm_copy_setup(VCPU_ARGS, paging, dstaddr, opsize,
+ error = vm_copy_setup(vcpu, paging, dstaddr, opsize,
PROT_WRITE, copyinfo, nitems(copyinfo), &fault);
if (error == 0) {
if (fault)
@@ -867,7 +867,7 @@ emulate_movs(VCPU_DECL, uint64_t gpa, struct vie *vie,
* injected into the guest then it will happen
* before the MMIO read is attempted.
*/
- error = memread(VCPU_ARGS, gpa, &val, opsize, arg);
+ error = memread(vcpu, gpa, &val, opsize, arg);
if (error)
goto done;
@@ -882,33 +882,33 @@ emulate_movs(VCPU_DECL, uint64_t gpa, struct vie *vie,
* instruction is not going to be restarted due
* to address translation faults.
*/
- error = vm_gla2gpa(VCPU_ARGS, paging, srcaddr,
+ error = vm_gla2gpa(vcpu, paging, srcaddr,
PROT_READ, &srcgpa, &fault);
if (error || fault)
goto done;
- error = vm_gla2gpa(VCPU_ARGS, paging, dstaddr,
+ error = vm_gla2gpa(vcpu, paging, dstaddr,
PROT_WRITE, &dstgpa, &fault);
if (error || fault)
goto done;
- error = memread(VCPU_ARGS, srcgpa, &val, opsize, arg);
+ error = memread(vcpu, srcgpa, &val, opsize, arg);
if (error)
goto done;
- error = memwrite(VCPU_ARGS, dstgpa, val, opsize, arg);
+ error = memwrite(vcpu, dstgpa, val, opsize, arg);
if (error)
goto done;
}
}
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RSI, &rsi);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RSI, &rsi);
KASSERT(error == 0, ("%s: error %d getting rsi", __func__, error));
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RDI, &rdi);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RDI, &rdi);
KASSERT(error == 0, ("%s: error %d getting rdi", __func__, error));
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
if (rflags & PSL_D) {
@@ -919,17 +919,17 @@ emulate_movs(VCPU_DECL, uint64_t gpa, struct vie *vie,
rdi += opsize;
}
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RSI, rsi,
+ error = vie_update_register(vcpu, VM_REG_GUEST_RSI, rsi,
vie->addrsize);
KASSERT(error == 0, ("%s: error %d updating rsi", __func__, error));
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RDI, rdi,
+ error = vie_update_register(vcpu, VM_REG_GUEST_RDI, rdi,
vie->addrsize);
KASSERT(error == 0, ("%s: error %d updating rdi", __func__, error));
if (repeat) {
rcx = rcx - 1;
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RCX,
+ error = vie_update_register(vcpu, VM_REG_GUEST_RCX,
rcx, vie->addrsize);
KASSERT(!error, ("%s: error %d updating rcx", __func__, error));
@@ -937,7 +937,7 @@ emulate_movs(VCPU_DECL, uint64_t gpa, struct vie *vie,
* Repeat the instruction if the count register is not zero.
*/
if ((rcx & vie_size2mask(vie->addrsize)) != 0)
- vm_restart_instruction(VCPU_ARGS);
+ vm_restart_instruction(vcpu);
}
done:
KASSERT(error == 0 || error == EFAULT, ("%s: unexpected error %d",
@@ -946,7 +946,7 @@ done:
}
static int
-emulate_stos(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_stos(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
struct vm_guest_paging *paging __unused, mem_region_read_t memread __unused,
mem_region_write_t memwrite, void *arg)
{
@@ -958,7 +958,7 @@ emulate_stos(VCPU_DECL, uint64_t gpa, struct vie *vie,
repeat = vie->repz_present | vie->repnz_present;
if (repeat) {
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RCX, &rcx);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RCX, &rcx);
KASSERT(!error, ("%s: error %d getting rcx", __func__, error));
/*
@@ -969,17 +969,17 @@ emulate_stos(VCPU_DECL, uint64_t gpa, struct vie *vie,
return (0);
}
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RAX, &val);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RAX, &val);
KASSERT(!error, ("%s: error %d getting rax", __func__, error));
- error = memwrite(VCPU_ARGS, gpa, val, opsize, arg);
+ error = memwrite(vcpu, gpa, val, opsize, arg);
if (error)
return (error);
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RDI, &rdi);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RDI, &rdi);
KASSERT(error == 0, ("%s: error %d getting rdi", __func__, error));
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
if (rflags & PSL_D)
@@ -987,13 +987,13 @@ emulate_stos(VCPU_DECL, uint64_t gpa, struct vie *vie,
else
rdi += opsize;
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RDI, rdi,
+ error = vie_update_register(vcpu, VM_REG_GUEST_RDI, rdi,
vie->addrsize);
KASSERT(error == 0, ("%s: error %d updating rdi", __func__, error));
if (repeat) {
rcx = rcx - 1;
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RCX,
+ error = vie_update_register(vcpu, VM_REG_GUEST_RCX,
rcx, vie->addrsize);
KASSERT(!error, ("%s: error %d updating rcx", __func__, error));
@@ -1001,14 +1001,14 @@ emulate_stos(VCPU_DECL, uint64_t gpa, struct vie *vie,
* Repeat the instruction if the count register is not zero.
*/
if ((rcx & vie_size2mask(vie->addrsize)) != 0)
- vm_restart_instruction(VCPU_ARGS);
+ vm_restart_instruction(vcpu);
}
return (0);
}
static int
-emulate_and(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_and(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
{
int error, size;
@@ -1031,18 +1031,18 @@ emulate_and(VCPU_DECL, uint64_t gpa, struct vie *vie,
/* get the first operand */
reg = gpr_map[vie->reg];
- error = vie_read_register(VCPU_ARGS, reg, &val1);
+ error = vie_read_register(vcpu, reg, &val1);
if (error)
break;
/* get the second operand */
- error = memread(VCPU_ARGS, gpa, &val2, size, arg);
+ error = memread(vcpu, gpa, &val2, size, arg);
if (error)
break;
/* perform the operation and write the result */
result = val1 & val2;
- error = vie_update_register(VCPU_ARGS, reg, result, size);
+ error = vie_update_register(vcpu, reg, result, size);
break;
case 0x81:
case 0x83:
@@ -1060,7 +1060,7 @@ emulate_and(VCPU_DECL, uint64_t gpa, struct vie *vie,
*/
/* get the first operand */
- error = memread(VCPU_ARGS, gpa, &val1, size, arg);
+ error = memread(vcpu, gpa, &val1, size, arg);
if (error)
break;
@@ -1069,7 +1069,7 @@ emulate_and(VCPU_DECL, uint64_t gpa, struct vie *vie,
* operand and write the result
*/
result = val1 & vie->immediate;
- error = memwrite(VCPU_ARGS, gpa, result, size, arg);
+ error = memwrite(vcpu, gpa, result, size, arg);
break;
default:
break;
@@ -1077,7 +1077,7 @@ emulate_and(VCPU_DECL, uint64_t gpa, struct vie *vie,
if (error)
return (error);
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
if (error)
return (error);
@@ -1091,12 +1091,12 @@ emulate_and(VCPU_DECL, uint64_t gpa, struct vie *vie,
rflags &= ~RFLAGS_STATUS_BITS;
rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, rflags, 8);
+ error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags, 8);
return (error);
}
static int
-emulate_or(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_or(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
{
int error, size;
@@ -1119,18 +1119,18 @@ emulate_or(VCPU_DECL, uint64_t gpa, struct vie *vie,
/* get the first operand */
reg = gpr_map[vie->reg];
- error = vie_read_register(VCPU_ARGS, reg, &val1);
+ error = vie_read_register(vcpu, reg, &val1);
if (error)
break;
/* get the second operand */
- error = memread(VCPU_ARGS, gpa, &val2, size, arg);
+ error = memread(vcpu, gpa, &val2, size, arg);
if (error)
break;
/* perform the operation and write the result */
result = val1 | val2;
- error = vie_update_register(VCPU_ARGS, reg, result, size);
+ error = vie_update_register(vcpu, reg, result, size);
break;
case 0x81:
case 0x83:
@@ -1148,7 +1148,7 @@ emulate_or(VCPU_DECL, uint64_t gpa, struct vie *vie,
*/
/* get the first operand */
- error = memread(VCPU_ARGS, gpa, &val1, size, arg);
+ error = memread(vcpu, gpa, &val1, size, arg);
if (error)
break;
@@ -1157,7 +1157,7 @@ emulate_or(VCPU_DECL, uint64_t gpa, struct vie *vie,
* operand and write the result
*/
result = val1 | vie->immediate;
- error = memwrite(VCPU_ARGS, gpa, result, size, arg);
+ error = memwrite(vcpu, gpa, result, size, arg);
break;
default:
break;
@@ -1165,7 +1165,7 @@ emulate_or(VCPU_DECL, uint64_t gpa, struct vie *vie,
if (error)
return (error);
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
if (error)
return (error);
@@ -1179,12 +1179,12 @@ emulate_or(VCPU_DECL, uint64_t gpa, struct vie *vie,
rflags &= ~RFLAGS_STATUS_BITS;
rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, rflags, 8);
+ error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags, 8);
return (error);
}
static int
-emulate_cmp(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_cmp(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg)
{
int error, size;
@@ -1212,12 +1212,12 @@ emulate_cmp(VCPU_DECL, uint64_t gpa, struct vie *vie,
/* Get the register operand */
reg = gpr_map[vie->reg];
- error = vie_read_register(VCPU_ARGS, reg, &regop);
+ error = vie_read_register(vcpu, reg, &regop);
if (error)
return (error);
/* Get the memory operand */
- error = memread(VCPU_ARGS, gpa, &memop, size, arg);
+ error = memread(vcpu, gpa, &memop, size, arg);
if (error)
return (error);
@@ -1256,7 +1256,7 @@ emulate_cmp(VCPU_DECL, uint64_t gpa, struct vie *vie,
size = 1;
/* get the first operand */
- error = memread(VCPU_ARGS, gpa, &op1, size, arg);
+ error = memread(vcpu, gpa, &op1, size, arg);
if (error)
return (error);
@@ -1265,18 +1265,18 @@ emulate_cmp(VCPU_DECL, uint64_t gpa, struct vie *vie,
default:
return (EINVAL);
}
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
if (error)
return (error);
rflags &= ~RFLAGS_STATUS_BITS;
rflags |= rflags2 & RFLAGS_STATUS_BITS;
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, rflags, 8);
+ error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags, 8);
return (error);
}
static int
-emulate_test(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_test(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg)
{
int error, size;
@@ -1300,7 +1300,7 @@ emulate_test(VCPU_DECL, uint64_t gpa, struct vie *vie,
if ((vie->reg & 7) != 0)
return (EINVAL);
- error = memread(VCPU_ARGS, gpa, &op1, size, arg);
+ error = memread(vcpu, gpa, &op1, size, arg);
if (error)
return (error);
@@ -1309,7 +1309,7 @@ emulate_test(VCPU_DECL, uint64_t gpa, struct vie *vie,
default:
return (EINVAL);
}
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
if (error)
return (error);
@@ -1320,12 +1320,12 @@ emulate_test(VCPU_DECL, uint64_t gpa, struct vie *vie,
rflags &= ~RFLAGS_STATUS_BITS;
rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, rflags, 8);
+ error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags, 8);
return (error);
}
static int
-emulate_bextr(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_bextr(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
struct vm_guest_paging *paging, mem_region_read_t memread,
mem_region_write_t memwrite __unused, void *arg)
{
@@ -1353,13 +1353,13 @@ emulate_bextr(VCPU_DECL, uint64_t gpa, struct vie *vie,
* operand) using an index and length specified in the second /source/
* operand (third operand).
*/
- error = memread(VCPU_ARGS, gpa, &src1, size, arg);
+ error = memread(vcpu, gpa, &src1, size, arg);
if (error)
return (error);
- error = vie_read_register(VCPU_ARGS, gpr_map[vie->vex_reg], &src2);
+ error = vie_read_register(vcpu, gpr_map[vie->vex_reg], &src2);
if (error)
return (error);
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
if (error)
return (error);
@@ -1385,7 +1385,7 @@ emulate_bextr(VCPU_DECL, uint64_t gpa, struct vie *vie,
dst = src1;
done:
- error = vie_update_register(VCPU_ARGS, gpr_map[vie->reg], dst, size);
+ error = vie_update_register(vcpu, gpr_map[vie->reg], dst, size);
if (error)
return (error);
@@ -1396,13 +1396,13 @@ done:
rflags &= ~RFLAGS_STATUS_BITS;
if (dst == 0)
rflags |= PSL_Z;
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, rflags,
+ error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags,
8);
return (error);
}
static int
-emulate_add(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_add(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg)
{
int error, size;
@@ -1424,18 +1424,18 @@ emulate_add(VCPU_DECL, uint64_t gpa, struct vie *vie,
/* get the first operand */
reg = gpr_map[vie->reg];
- error = vie_read_register(VCPU_ARGS, reg, &val1);
+ error = vie_read_register(vcpu, reg, &val1);
if (error)
break;
/* get the second operand */
- error = memread(VCPU_ARGS, gpa, &val2, size, arg);
+ error = memread(vcpu, gpa, &val2, size, arg);
if (error)
break;
/* perform the operation and write the result */
nval = val1 + val2;
- error = vie_update_register(VCPU_ARGS, reg, nval, size);
+ error = vie_update_register(vcpu, reg, nval, size);
break;
default:
break;
@@ -1443,14 +1443,14 @@ emulate_add(VCPU_DECL, uint64_t gpa, struct vie *vie,
if (!error) {
rflags2 = getaddflags(size, val1, val2);
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS,
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS,
&rflags);
if (error)
return (error);
rflags &= ~RFLAGS_STATUS_BITS;
rflags |= rflags2 & RFLAGS_STATUS_BITS;
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS,
+ error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS,
rflags, 8);
}
@@ -1458,7 +1458,7 @@ emulate_add(VCPU_DECL, uint64_t gpa, struct vie *vie,
}
static int
-emulate_sub(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_sub(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg)
{
int error, size;
@@ -1480,18 +1480,18 @@ emulate_sub(VCPU_DECL, uint64_t gpa, struct vie *vie,
/* get the first operand */
reg = gpr_map[vie->reg];
- error = vie_read_register(VCPU_ARGS, reg, &val1);
+ error = vie_read_register(vcpu, reg, &val1);
if (error)
break;
/* get the second operand */
- error = memread(VCPU_ARGS, gpa, &val2, size, arg);
+ error = memread(vcpu, gpa, &val2, size, arg);
if (error)
break;
/* perform the operation and write the result */
nval = val1 - val2;
- error = vie_update_register(VCPU_ARGS, reg, nval, size);
+ error = vie_update_register(vcpu, reg, nval, size);
break;
default:
break;
@@ -1499,14 +1499,14 @@ emulate_sub(VCPU_DECL, uint64_t gpa, struct vie *vie,
if (!error) {
rflags2 = getcc(size, val1, val2);
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS,
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS,
&rflags);
if (error)
return (error);
rflags &= ~RFLAGS_STATUS_BITS;
rflags |= rflags2 & RFLAGS_STATUS_BITS;
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS,
+ error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS,
rflags, 8);
}
@@ -1514,7 +1514,7 @@ emulate_sub(VCPU_DECL, uint64_t gpa, struct vie *vie,
}
static int
-emulate_stack_op(VCPU_DECL, uint64_t mmio_gpa, struct vie *vie,
+emulate_stack_op(struct vcpu *vcpu, uint64_t mmio_gpa, struct vie *vie,
struct vm_guest_paging *paging, mem_region_read_t memread,
mem_region_write_t memwrite, void *arg)
{
@@ -1552,7 +1552,7 @@ emulate_stack_op(VCPU_DECL, uint64_t mmio_gpa, struct vie *vie,
* stack-segment descriptor determines the size of the
* stack pointer.
*/
- error = vm_get_seg_desc(VCPU_ARGS, VM_REG_GUEST_SS, &ss_desc);
+ error = vm_get_seg_desc(vcpu, VM_REG_GUEST_SS, &ss_desc);
KASSERT(error == 0, ("%s: error %d getting SS descriptor",
__func__, error));
if (SEG_DESC_DEF32(ss_desc.access))
@@ -1561,13 +1561,13 @@ emulate_stack_op(VCPU_DECL, uint64_t mmio_gpa, struct vie *vie,
stackaddrsize = 2;
}
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_CR0, &cr0);
+ error = vie_read_register(vcpu, VM_REG_GUEST_CR0, &cr0);
KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error));
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RSP, &rsp);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RSP, &rsp);
KASSERT(error == 0, ("%s: error %d getting rsp", __func__, error));
if (pushop) {
rsp -= size;
@@ -1576,39 +1576,39 @@ emulate_stack_op(VCPU_DECL, uint64_t mmio_gpa, struct vie *vie,
if (vie_calculate_gla(paging->cpu_mode, VM_REG_GUEST_SS, &ss_desc,
rsp, size, stackaddrsize, pushop ? PROT_WRITE : PROT_READ,
&stack_gla)) {
- vm_inject_ss(VCPU_ARGS, 0);
+ vm_inject_ss(vcpu, 0);
return (0);
}
if (vie_canonical_check(paging->cpu_mode, stack_gla)) {
- vm_inject_ss(VCPU_ARGS, 0);
+ vm_inject_ss(vcpu, 0);
return (0);
}
if (vie_alignment_check(paging->cpl, size, cr0, rflags, stack_gla)) {
- vm_inject_ac(VCPU_ARGS, 0);
+ vm_inject_ac(vcpu, 0);
return (0);
}
- error = vm_copy_setup(VCPU_ARGS, paging, stack_gla, size,
+ error = vm_copy_setup(vcpu, paging, stack_gla, size,
pushop ? PROT_WRITE : PROT_READ, copyinfo, nitems(copyinfo),
&fault);
if (error || fault)
return (error);
if (pushop) {
- error = memread(VCPU_ARGS, mmio_gpa, &val, size, arg);
+ error = memread(vcpu, mmio_gpa, &val, size, arg);
if (error == 0)
vm_copyout(&val, copyinfo, size);
} else {
vm_copyin(copyinfo, &val, size);
- error = memwrite(VCPU_ARGS, mmio_gpa, val, size, arg);
+ error = memwrite(vcpu, mmio_gpa, val, size, arg);
rsp += size;
}
vm_copy_teardown(copyinfo, nitems(copyinfo));
if (error == 0) {
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RSP, rsp,
+ error = vie_update_register(vcpu, VM_REG_GUEST_RSP, rsp,
stackaddrsize);
KASSERT(error == 0, ("error %d updating rsp", error));
}
@@ -1616,7 +1616,7 @@ emulate_stack_op(VCPU_DECL, uint64_t mmio_gpa, struct vie *vie,
}
static int
-emulate_push(VCPU_DECL, uint64_t mmio_gpa, struct vie *vie,
+emulate_push(struct vcpu *vcpu, uint64_t mmio_gpa, struct vie *vie,
struct vm_guest_paging *paging, mem_region_read_t memread,
mem_region_write_t memwrite, void *arg)
{
@@ -1631,13 +1631,13 @@ emulate_push(VCPU_DECL, uint64_t mmio_gpa, struct vie *vie,
if ((vie->reg & 7) != 6)
return (EINVAL);
- error = emulate_stack_op(VCPU_ARGS, mmio_gpa, vie, paging, memread,
+ error = emulate_stack_op(vcpu, mmio_gpa, vie, paging, memread,
memwrite, arg);
return (error);
}
static int
-emulate_pop(VCPU_DECL, uint64_t mmio_gpa, struct vie *vie,
+emulate_pop(struct vcpu *vcpu, uint64_t mmio_gpa, struct vie *vie,
struct vm_guest_paging *paging, mem_region_read_t memread,
mem_region_write_t memwrite, void *arg)
{
@@ -1652,13 +1652,13 @@ emulate_pop(VCPU_DECL, uint64_t mmio_gpa, struct vie *vie,
if ((vie->reg & 7) != 0)
return (EINVAL);
- error = emulate_stack_op(VCPU_ARGS, mmio_gpa, vie, paging, memread,
+ error = emulate_stack_op(vcpu, mmio_gpa, vie, paging, memread,
memwrite, arg);
return (error);
}
static int
-emulate_group1(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_group1(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
struct vm_guest_paging *paging __unused, mem_region_read_t memread,
mem_region_write_t memwrite, void *memarg)
{
@@ -1666,15 +1666,15 @@ emulate_group1(VCPU_DECL, uint64_t gpa, struct vie *vie,
switch (vie->reg & 7) {
case 0x1: /* OR */
- error = emulate_or(VCPU_ARGS, gpa, vie,
+ error = emulate_or(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
case 0x4: /* AND */
- error = emulate_and(VCPU_ARGS, gpa, vie,
+ error = emulate_and(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
case 0x7: /* CMP */
- error = emulate_cmp(VCPU_ARGS, gpa, vie,
+ error = emulate_cmp(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
default:
@@ -1686,7 +1686,7 @@ emulate_group1(VCPU_DECL, uint64_t gpa, struct vie *vie,
}
static int
-emulate_bittest(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_bittest(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite __unused,
void *memarg)
{
@@ -1702,10 +1702,10 @@ emulate_bittest(VCPU_DECL, uint64_t gpa, struct vie *vie,
if ((vie->reg & 7) != 4)
return (EINVAL);
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
- error = memread(VCPU_ARGS, gpa, &val, vie->opsize, memarg);
+ error = memread(vcpu, gpa, &val, vie->opsize, memarg);
if (error)
return (error);
@@ -1722,14 +1722,14 @@ emulate_bittest(VCPU_DECL, uint64_t gpa, struct vie *vie,
else
rflags &= ~PSL_C;
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, rflags, 8);
+ error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags, 8);
KASSERT(error == 0, ("%s: error %d updating rflags", __func__, error));
return (0);
}
static int
-emulate_twob_group15(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_twob_group15(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite __unused,
void *memarg)
{
@@ -1749,7 +1749,7 @@ emulate_twob_group15(VCPU_DECL, uint64_t gpa, struct vie *vie,
* CLFLUSH, CLFLUSHOPT. Only check for access
* rights.
*/
- error = memread(VCPU_ARGS, gpa, &buf, 1, memarg);
+ error = memread(vcpu, gpa, &buf, 1, memarg);
}
break;
default:
@@ -1761,7 +1761,7 @@ emulate_twob_group15(VCPU_DECL, uint64_t gpa, struct vie *vie,
}
int
-vmm_emulate_instruction(VCPU_DECL, uint64_t gpa, struct vie *vie,
+vmm_emulate_instruction(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
struct vm_guest_paging *paging, mem_region_read_t memread,
mem_region_write_t memwrite, void *memarg)
{
@@ -1772,68 +1772,68 @@ vmm_emulate_instruction(VCPU_DECL, uint64_t gpa, struct vie *vie,
switch (vie->op.op_type) {
case VIE_OP_TYPE_GROUP1:
- error = emulate_group1(VCPU_ARGS, gpa, vie, paging, memread,
+ error = emulate_group1(vcpu, gpa, vie, paging, memread,
memwrite, memarg);
break;
case VIE_OP_TYPE_POP:
- error = emulate_pop(VCPU_ARGS, gpa, vie, paging, memread,
+ error = emulate_pop(vcpu, gpa, vie, paging, memread,
memwrite, memarg);
break;
case VIE_OP_TYPE_PUSH:
- error = emulate_push(VCPU_ARGS, gpa, vie, paging, memread,
+ error = emulate_push(vcpu, gpa, vie, paging, memread,
memwrite, memarg);
break;
case VIE_OP_TYPE_CMP:
- error = emulate_cmp(VCPU_ARGS, gpa, vie,
+ error = emulate_cmp(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_MOV:
- error = emulate_mov(VCPU_ARGS, gpa, vie,
+ error = emulate_mov(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_MOVSX:
case VIE_OP_TYPE_MOVZX:
- error = emulate_movx(VCPU_ARGS, gpa, vie,
+ error = emulate_movx(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_MOVS:
- error = emulate_movs(VCPU_ARGS, gpa, vie, paging, memread,
+ error = emulate_movs(vcpu, gpa, vie, paging, memread,
memwrite, memarg);
break;
case VIE_OP_TYPE_STOS:
- error = emulate_stos(VCPU_ARGS, gpa, vie, paging, memread,
+ error = emulate_stos(vcpu, gpa, vie, paging, memread,
memwrite, memarg);
break;
case VIE_OP_TYPE_AND:
- error = emulate_and(VCPU_ARGS, gpa, vie,
+ error = emulate_and(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_OR:
- error = emulate_or(VCPU_ARGS, gpa, vie,
+ error = emulate_or(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_SUB:
- error = emulate_sub(VCPU_ARGS, gpa, vie,
+ error = emulate_sub(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_BITTEST:
- error = emulate_bittest(VCPU_ARGS, gpa, vie,
+ error = emulate_bittest(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_TWOB_GRP15:
- error = emulate_twob_group15(VCPU_ARGS, gpa, vie,
+ error = emulate_twob_group15(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_ADD:
- error = emulate_add(VCPU_ARGS, gpa, vie, memread,
+ error = emulate_add(vcpu, gpa, vie, memread,
memwrite, memarg);
break;
case VIE_OP_TYPE_TEST:
- error = emulate_test(VCPU_ARGS, gpa, vie,
+ error = emulate_test(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_BEXTR:
- error = emulate_bextr(VCPU_ARGS, gpa, vie, paging,
+ error = emulate_bextr(vcpu, gpa, vie, paging,
memread, memwrite, memarg);
break;
default: