aboutsummaryrefslogtreecommitdiff
path: root/usr.sbin/bhyve
diff options
context:
space:
mode:
Diffstat (limited to 'usr.sbin/bhyve')
-rw-r--r--usr.sbin/bhyve/bhyverun.c251
-rw-r--r--usr.sbin/bhyve/bhyverun.h3
-rw-r--r--usr.sbin/bhyve/bootrom.c5
-rw-r--r--usr.sbin/bhyve/gdb.c119
-rw-r--r--usr.sbin/bhyve/gdb.h8
-rw-r--r--usr.sbin/bhyve/inout.c16
-rw-r--r--usr.sbin/bhyve/inout.h3
-rw-r--r--usr.sbin/bhyve/kernemu_dev.c4
-rw-r--r--usr.sbin/bhyve/mem.c58
-rw-r--r--usr.sbin/bhyve/mem.h12
-rw-r--r--usr.sbin/bhyve/pci_emul.c11
-rw-r--r--usr.sbin/bhyve/pci_passthru.c6
-rw-r--r--usr.sbin/bhyve/pctestdev.c4
-rw-r--r--usr.sbin/bhyve/snapshot.c4
-rw-r--r--usr.sbin/bhyve/spinup_ap.c21
-rw-r--r--usr.sbin/bhyve/spinup_ap.h2
-rw-r--r--usr.sbin/bhyve/task_switch.c247
-rw-r--r--usr.sbin/bhyve/vga.c69
-rw-r--r--usr.sbin/bhyve/xmsr.c6
-rw-r--r--usr.sbin/bhyve/xmsr.h4
20 files changed, 427 insertions, 426 deletions
diff --git a/usr.sbin/bhyve/bhyverun.c b/usr.sbin/bhyve/bhyverun.c
index dd30eb4405ef..57d8dd7aea60 100644
--- a/usr.sbin/bhyve/bhyverun.c
+++ b/usr.sbin/bhyve/bhyverun.c
@@ -183,7 +183,7 @@ static const char * const vmx_exit_reason_desc[] = {
[EXIT_REASON_XRSTORS] = "XRSTORS"
};
-typedef int (*vmexit_handler_t)(struct vmctx *, struct vm_exit *, int *vcpu);
+typedef int (*vmexit_handler_t)(struct vmctx *, struct vcpu *, struct vm_exit *);
int guest_ncpus;
uint16_t cpu_cores, cpu_sockets, cpu_threads;
@@ -195,7 +195,7 @@ static const int BSP = 0;
static cpuset_t cpumask;
-static void vm_loop(struct vmctx *ctx, int vcpu);
+static void vm_loop(struct vmctx *ctx, struct vcpu *vcpu);
static struct bhyvestats {
uint64_t vmexit_bogus;
@@ -208,11 +208,11 @@ static struct bhyvestats {
uint64_t cpu_switch_direct;
} stats;
-static struct mt_vmm_info {
- pthread_t mt_thr;
- struct vmctx *mt_ctx;
- int mt_vcpu;
-} *mt_vmm_info;
+static struct vcpu_info {
+ struct vmctx *ctx;
+ struct vcpu *vcpu;
+ int vcpuid;
+} *vcpu_info;
static cpuset_t **vcpumap;
@@ -485,16 +485,14 @@ build_vcpumaps(void)
}
void
-vm_inject_fault(void *arg, int vcpu, int vector, int errcode_valid,
+vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid,
int errcode)
{
- struct vmctx *ctx;
int error, restart_instruction;
- ctx = arg;
restart_instruction = 1;
- error = vm_inject_exception(ctx, vcpu, vector, errcode_valid, errcode,
+ error = vm_inject_exception(vcpu, vector, errcode_valid, errcode,
restart_instruction);
assert(error == 0);
}
@@ -525,27 +523,24 @@ static void *
fbsdrun_start_thread(void *param)
{
char tname[MAXCOMLEN + 1];
- struct mt_vmm_info *mtp;
- int error, vcpu;
-
- mtp = param;
- vcpu = mtp->mt_vcpu;
+ struct vcpu_info *vi = param;
+ int error;
- snprintf(tname, sizeof(tname), "vcpu %d", vcpu);
- pthread_set_name_np(mtp->mt_thr, tname);
+ snprintf(tname, sizeof(tname), "vcpu %d", vi->vcpuid);
+ pthread_set_name_np(pthread_self(), tname);
- if (vcpumap[vcpu] != NULL) {
- error = pthread_setaffinity_np(mtp->mt_thr, sizeof(cpuset_t),
- vcpumap[vcpu]);
+ if (vcpumap[vi->vcpuid] != NULL) {
+ error = pthread_setaffinity_np(pthread_self(),
+ sizeof(cpuset_t), vcpumap[vi->vcpuid]);
assert(error == 0);
}
#ifdef BHYVE_SNAPSHOT
- checkpoint_cpu_add(vcpu);
+ checkpoint_cpu_add(vi->vcpuid);
#endif
- gdb_cpu_add(vcpu);
+ gdb_cpu_add(vi->vcpu);
- vm_loop(mtp->mt_ctx, vcpu);
+ vm_loop(vi->ctx, vi->vcpu);
/* not reached */
exit(1);
@@ -553,23 +548,20 @@ fbsdrun_start_thread(void *param)
}
static void
-fbsdrun_addcpu(struct vmctx *ctx, int newcpu)
+fbsdrun_addcpu(struct vcpu_info *vi)
{
+ pthread_t thr;
int error;
- error = vm_activate_cpu(ctx, newcpu);
+ error = vm_activate_cpu(vi->vcpu);
if (error != 0)
- err(EX_OSERR, "could not activate CPU %d", newcpu);
-
- CPU_SET_ATOMIC(newcpu, &cpumask);
+ err(EX_OSERR, "could not activate CPU %d", vi->vcpuid);
- vm_suspend_cpu(ctx, newcpu);
+ CPU_SET_ATOMIC(vi->vcpuid, &cpumask);
- mt_vmm_info[newcpu].mt_ctx = ctx;
- mt_vmm_info[newcpu].mt_vcpu = newcpu;
+ vm_suspend_cpu(vi->vcpu);
- error = pthread_create(&mt_vmm_info[newcpu].mt_thr, NULL,
- fbsdrun_start_thread, &mt_vmm_info[newcpu]);
+ error = pthread_create(&thr, NULL, fbsdrun_start_thread, vi);
assert(error == 0);
}
@@ -587,8 +579,8 @@ fbsdrun_deletecpu(int vcpu)
}
static int
-vmexit_handle_notify(struct vmctx *ctx __unused, struct vm_exit *vme __unused,
- int *pvcpu __unused, uint32_t eax __unused)
+vmexit_handle_notify(struct vmctx *ctx __unused, struct vcpu *vcpu __unused,
+ struct vm_exit *vme __unused, uint32_t eax __unused)
{
#if BHYVE_DEBUG
/*
@@ -599,13 +591,10 @@ vmexit_handle_notify(struct vmctx *ctx __unused, struct vm_exit *vme __unused,
}
static int
-vmexit_inout(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
+vmexit_inout(struct vmctx *ctx, struct vcpu *vcpu, struct vm_exit *vme)
{
int error;
int bytes, port, in, out;
- int vcpu;
-
- vcpu = *pvcpu;
port = vme->u.inout.port;
bytes = vme->u.inout.bytes;
@@ -614,7 +603,7 @@ vmexit_inout(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
/* Extra-special case of host notifications */
if (out && port == GUEST_NIO_PORT) {
- error = vmexit_handle_notify(ctx, vme, pvcpu, vme->u.inout.eax);
+ error = vmexit_handle_notify(ctx, vcpu, vme, vme->u.inout.eax);
return (error);
}
@@ -631,45 +620,45 @@ vmexit_inout(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
}
static int
-vmexit_rdmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
+vmexit_rdmsr(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_exit *vme)
{
uint64_t val;
uint32_t eax, edx;
int error;
val = 0;
- error = emulate_rdmsr(ctx, *pvcpu, vme->u.msr.code, &val);
+ error = emulate_rdmsr(vcpu, vme->u.msr.code, &val);
if (error != 0) {
fprintf(stderr, "rdmsr to register %#x on vcpu %d\n",
- vme->u.msr.code, *pvcpu);
+ vme->u.msr.code, vcpu_id(vcpu));
if (get_config_bool("x86.strictmsr")) {
- vm_inject_gp(ctx, *pvcpu);
+ vm_inject_gp(vcpu);
return (VMEXIT_CONTINUE);
}
}
eax = val;
- error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RAX, eax);
+ error = vm_set_register(vcpu, VM_REG_GUEST_RAX, eax);
assert(error == 0);
edx = val >> 32;
- error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RDX, edx);
+ error = vm_set_register(vcpu, VM_REG_GUEST_RDX, edx);
assert(error == 0);
return (VMEXIT_CONTINUE);
}
static int
-vmexit_wrmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
+vmexit_wrmsr(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_exit *vme)
{
int error;
- error = emulate_wrmsr(ctx, *pvcpu, vme->u.msr.code, vme->u.msr.wval);
+ error = emulate_wrmsr(vcpu, vme->u.msr.code, vme->u.msr.wval);
if (error != 0) {
fprintf(stderr, "wrmsr to register %#x(%#lx) on vcpu %d\n",
- vme->u.msr.code, vme->u.msr.wval, *pvcpu);
+ vme->u.msr.code, vme->u.msr.wval, vcpu_id(vcpu));
if (get_config_bool("x86.strictmsr")) {
- vm_inject_gp(ctx, *pvcpu);
+ vm_inject_gp(vcpu);
return (VMEXIT_CONTINUE);
}
}
@@ -695,10 +684,10 @@ vmexit_vmx_desc(uint32_t exit_reason)
}
static int
-vmexit_vmx(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
+vmexit_vmx(struct vmctx *ctx, struct vcpu *vcpu, struct vm_exit *vme)
{
- fprintf(stderr, "vm exit[%d]\n", *pvcpu);
+ fprintf(stderr, "vm exit[%d]\n", vcpu_id(vcpu));
fprintf(stderr, "\treason\t\tVMX\n");
fprintf(stderr, "\trip\t\t0x%016lx\n", vme->rip);
fprintf(stderr, "\tinst_length\t%d\n", vme->inst_length);
@@ -711,7 +700,7 @@ vmexit_vmx(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
fprintf(stderr, "\tinst_error\t\t%d\n", vme->u.vmx.inst_error);
#ifdef DEBUG_EPT_MISCONFIG
if (vme->u.vmx.exit_reason == EXIT_REASON_EPT_MISCONFIG) {
- vm_get_register(ctx, *pvcpu,
+ vm_get_register(vcpu,
VMCS_IDENT(VMCS_GUEST_PHYSICAL_ADDRESS),
&ept_misconfig_gpa);
vm_get_gpa_pmap(ctx, ept_misconfig_gpa, ept_misconfig_pte,
@@ -728,10 +717,10 @@ vmexit_vmx(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
}
static int
-vmexit_svm(struct vmctx *ctx __unused, struct vm_exit *vme, int *pvcpu)
+vmexit_svm(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_exit *vme)
{
- fprintf(stderr, "vm exit[%d]\n", *pvcpu);
+ fprintf(stderr, "vm exit[%d]\n", vcpu_id(vcpu));
fprintf(stderr, "\treason\t\tSVM\n");
fprintf(stderr, "\trip\t\t0x%016lx\n", vme->rip);
fprintf(stderr, "\tinst_length\t%d\n", vme->inst_length);
@@ -742,8 +731,8 @@ vmexit_svm(struct vmctx *ctx __unused, struct vm_exit *vme, int *pvcpu)
}
static int
-vmexit_bogus(struct vmctx *ctx __unused, struct vm_exit *vme,
- int *pvcpu __unused)
+vmexit_bogus(struct vmctx *ctx __unused, struct vcpu *vcpu __unused,
+ struct vm_exit *vme)
{
assert(vme->inst_length == 0);
@@ -754,8 +743,8 @@ vmexit_bogus(struct vmctx *ctx __unused, struct vm_exit *vme,
}
static int
-vmexit_reqidle(struct vmctx *ctx __unused, struct vm_exit *vme,
- int *pvcpu __unused)
+vmexit_reqidle(struct vmctx *ctx __unused, struct vcpu *vcpu __unused,
+ struct vm_exit *vme)
{
assert(vme->inst_length == 0);
@@ -766,8 +755,8 @@ vmexit_reqidle(struct vmctx *ctx __unused, struct vm_exit *vme,
}
static int
-vmexit_hlt(struct vmctx *ctx __unused, struct vm_exit *vme __unused,
- int *pvcpu __unused)
+vmexit_hlt(struct vmctx *ctx __unused, struct vcpu *vcpu __unused,
+ struct vm_exit *vme __unused)
{
stats.vmexit_hlt++;
@@ -781,8 +770,8 @@ vmexit_hlt(struct vmctx *ctx __unused, struct vm_exit *vme __unused,
}
static int
-vmexit_pause(struct vmctx *ctx __unused, struct vm_exit *vme __unused,
- int *pvcpu __unused)
+vmexit_pause(struct vmctx *ctx __unused, struct vcpu *vcpu __unused,
+ struct vm_exit *vme __unused)
{
stats.vmexit_pause++;
@@ -791,7 +780,8 @@ vmexit_pause(struct vmctx *ctx __unused, struct vm_exit *vme __unused,
}
static int
-vmexit_mtrap(struct vmctx *ctx __unused, struct vm_exit *vme, int *pvcpu)
+vmexit_mtrap(struct vmctx *ctx __unused, struct vcpu *vcpu,
+ struct vm_exit *vme)
{
assert(vme->inst_length == 0);
@@ -799,18 +789,19 @@ vmexit_mtrap(struct vmctx *ctx __unused, struct vm_exit *vme, int *pvcpu)
stats.vmexit_mtrap++;
#ifdef BHYVE_SNAPSHOT
- checkpoint_cpu_suspend(*pvcpu);
+ checkpoint_cpu_suspend(vcpu_id(vcpu));
#endif
- gdb_cpu_mtrap(*pvcpu);
+ gdb_cpu_mtrap(vcpu);
#ifdef BHYVE_SNAPSHOT
- checkpoint_cpu_resume(*pvcpu);
+ checkpoint_cpu_resume(vcpu_id(vcpu));
#endif
return (VMEXIT_CONTINUE);
}
static int
-vmexit_inst_emul(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
+vmexit_inst_emul(struct vmctx *ctx __unused, struct vcpu *vcpu,
+ struct vm_exit *vme)
{
int err, i, cs_d;
struct vie *vie;
@@ -831,13 +822,13 @@ vmexit_inst_emul(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
cs_d = vme->u.inst_emul.cs_d;
if (vmm_decode_instruction(mode, cs_d, vie) != 0)
goto fail;
- if (vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RIP,
+ if (vm_set_register(vcpu, VM_REG_GUEST_RIP,
vme->rip + vie->num_processed) != 0)
goto fail;
}
- err = emulate_mem(ctx, *pvcpu, vme->u.inst_emul.gpa,
- vie, &vme->u.inst_emul.paging);
+ err = emulate_mem(vcpu, vme->u.inst_emul.gpa, vie,
+ &vme->u.inst_emul.paging);
if (err) {
if (err == ESRCH) {
EPRINTLN("Unhandled memory access to 0x%lx\n",
@@ -860,15 +851,16 @@ static pthread_mutex_t resetcpu_mtx = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t resetcpu_cond = PTHREAD_COND_INITIALIZER;
static int
-vmexit_suspend(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
+vmexit_suspend(struct vmctx *ctx, struct vcpu *vcpu, struct vm_exit *vme)
{
enum vm_suspend_how how;
+ int vcpuid = vcpu_id(vcpu);
how = vme->u.suspended.how;
- fbsdrun_deletecpu(*pvcpu);
+ fbsdrun_deletecpu(vcpuid);
- if (*pvcpu != BSP) {
+ if (vcpuid != BSP) {
pthread_mutex_lock(&resetcpu_mtx);
pthread_cond_signal(&resetcpu_cond);
pthread_mutex_unlock(&resetcpu_mtx);
@@ -900,16 +892,16 @@ vmexit_suspend(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
}
static int
-vmexit_debug(struct vmctx *ctx __unused, struct vm_exit *vme __unused,
- int *pvcpu)
+vmexit_debug(struct vmctx *ctx __unused, struct vcpu *vcpu,
+ struct vm_exit *vme __unused)
{
#ifdef BHYVE_SNAPSHOT
- checkpoint_cpu_suspend(*pvcpu);
+ checkpoint_cpu_suspend(vcpu_id(vcpu));
#endif
- gdb_cpu_suspend(*pvcpu);
+ gdb_cpu_suspend(vcpu);
#ifdef BHYVE_SNAPSHOT
- checkpoint_cpu_resume(*pvcpu);
+ checkpoint_cpu_resume(vcpu_id(vcpu));
#endif
/*
* XXX-MJ sleep for a short period to avoid chewing up the CPU in the
@@ -920,22 +912,24 @@ vmexit_debug(struct vmctx *ctx __unused, struct vm_exit *vme __unused,
}
static int
-vmexit_breakpoint(struct vmctx *ctx __unused, struct vm_exit *vme, int *pvcpu)
+vmexit_breakpoint(struct vmctx *ctx __unused, struct vcpu *vcpu,
+ struct vm_exit *vme)
{
- gdb_cpu_breakpoint(*pvcpu, vme);
+ gdb_cpu_breakpoint(vcpu, vme);
return (VMEXIT_CONTINUE);
}
static int
-vmexit_ipi(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu __unused)
+vmexit_ipi(struct vmctx *ctx __unused, struct vcpu *vcpu __unused,
+ struct vm_exit *vme)
{
int error = -1;
int i;
switch (vme->u.ipi.mode) {
case APIC_DELMODE_INIT:
CPU_FOREACH_ISSET(i, &vme->u.ipi.dmask) {
- error = vm_suspend_cpu(ctx, i);
+ error = vm_suspend_cpu(vcpu_info[i].vcpu);
if (error) {
warnx("%s: failed to suspend cpu %d\n",
__func__, i);
@@ -945,7 +939,8 @@ vmexit_ipi(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu __unused)
break;
case APIC_DELMODE_STARTUP:
CPU_FOREACH_ISSET(i, &vme->u.ipi.dmask) {
- spinup_ap(ctx, i, vme->u.ipi.vector << PAGE_SHIFT);
+ spinup_ap(vcpu_info[i].vcpu,
+ vme->u.ipi.vector << PAGE_SHIFT);
}
error = 0;
break;
@@ -975,7 +970,7 @@ static vmexit_handler_t handler[VM_EXITCODE_MAX] = {
};
static void
-vm_loop(struct vmctx *ctx, int vcpu)
+vm_loop(struct vmctx *ctx, struct vcpu *vcpu)
{
struct vm_exit vme;
int error, rc;
@@ -983,10 +978,10 @@ vm_loop(struct vmctx *ctx, int vcpu)
cpuset_t active_cpus;
error = vm_active_cpus(ctx, &active_cpus);
- assert(CPU_ISSET(vcpu, &active_cpus));
+ assert(CPU_ISSET(vcpu_id(vcpu), &active_cpus));
while (1) {
- error = vm_run(ctx, vcpu, &vme);
+ error = vm_run(vcpu, &vme);
if (error != 0)
break;
@@ -997,7 +992,7 @@ vm_loop(struct vmctx *ctx, int vcpu)
exit(4);
}
- rc = (*handler[exitcode])(ctx, &vme, &vcpu);
+ rc = (*handler[exitcode])(ctx, vcpu, &vme);
switch (rc) {
case VMEXIT_CONTINUE:
@@ -1012,7 +1007,7 @@ vm_loop(struct vmctx *ctx, int vcpu)
}
static int
-num_vcpus_allowed(struct vmctx *ctx)
+num_vcpus_allowed(struct vmctx *ctx, struct vcpu *vcpu)
{
uint16_t sockets, cores, threads, maxcpus;
int tmp, error;
@@ -1021,7 +1016,7 @@ num_vcpus_allowed(struct vmctx *ctx)
* The guest is allowed to spinup more than one processor only if the
* UNRESTRICTED_GUEST capability is available.
*/
- error = vm_get_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, &tmp);
+ error = vm_get_capability(vcpu, VM_CAP_UNRESTRICTED_GUEST, &tmp);
if (error != 0)
return (1);
@@ -1033,18 +1028,18 @@ num_vcpus_allowed(struct vmctx *ctx)
}
static void
-fbsdrun_set_capabilities(struct vmctx *ctx, int cpu)
+fbsdrun_set_capabilities(struct vcpu *vcpu, bool bsp)
{
int err, tmp;
if (get_config_bool_default("x86.vmexit_on_hlt", false)) {
- err = vm_get_capability(ctx, cpu, VM_CAP_HALT_EXIT, &tmp);
+ err = vm_get_capability(vcpu, VM_CAP_HALT_EXIT, &tmp);
if (err < 0) {
fprintf(stderr, "VM exit on HLT not supported\n");
exit(4);
}
- vm_set_capability(ctx, cpu, VM_CAP_HALT_EXIT, 1);
- if (cpu == BSP)
+ vm_set_capability(vcpu, VM_CAP_HALT_EXIT, 1);
+ if (bsp)
handler[VM_EXITCODE_HLT] = vmexit_hlt;
}
@@ -1052,30 +1047,30 @@ fbsdrun_set_capabilities(struct vmctx *ctx, int cpu)
/*
* pause exit support required for this mode
*/
- err = vm_get_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, &tmp);
+ err = vm_get_capability(vcpu, VM_CAP_PAUSE_EXIT, &tmp);
if (err < 0) {
fprintf(stderr,
"SMP mux requested, no pause support\n");
exit(4);
}
- vm_set_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, 1);
- if (cpu == BSP)
+ vm_set_capability(vcpu, VM_CAP_PAUSE_EXIT, 1);
+ if (bsp)
handler[VM_EXITCODE_PAUSE] = vmexit_pause;
}
if (get_config_bool_default("x86.x2apic", false))
- err = vm_set_x2apic_state(ctx, cpu, X2APIC_ENABLED);
+ err = vm_set_x2apic_state(vcpu, X2APIC_ENABLED);
else
- err = vm_set_x2apic_state(ctx, cpu, X2APIC_DISABLED);
+ err = vm_set_x2apic_state(vcpu, X2APIC_DISABLED);
if (err) {
fprintf(stderr, "Unable to set x2apic state (%d)\n", err);
exit(4);
}
- vm_set_capability(ctx, cpu, VM_CAP_ENABLE_INVPCID, 1);
+ vm_set_capability(vcpu, VM_CAP_ENABLE_INVPCID, 1);
- err = vm_set_capability(ctx, cpu, VM_CAP_IPI_EXIT, 1);
+ err = vm_set_capability(vcpu, VM_CAP_IPI_EXIT, 1);
assert(err == 0);
}
@@ -1143,23 +1138,23 @@ do_open(const char *vmname)
}
static void
-spinup_vcpu(struct vmctx *ctx, int vcpu)
+spinup_vcpu(struct vcpu_info *vi, bool bsp)
{
int error;
- if (vcpu != BSP) {
- fbsdrun_set_capabilities(ctx, vcpu);
+ if (!bsp) {
+ fbsdrun_set_capabilities(vi->vcpu, false);
/*
* Enable the 'unrestricted guest' mode for APs.
*
* APs startup in power-on 16-bit mode.
*/
- error = vm_set_capability(ctx, vcpu, VM_CAP_UNRESTRICTED_GUEST, 1);
+ error = vm_set_capability(vi->vcpu, VM_CAP_UNRESTRICTED_GUEST, 1);
assert(error == 0);
}
- fbsdrun_addcpu(ctx, vcpu);
+ fbsdrun_addcpu(vi);
}
static bool
@@ -1245,6 +1240,7 @@ main(int argc, char *argv[])
{
int c, error;
int max_vcpus, memflags;
+ struct vcpu *bsp;
struct vmctx *ctx;
uint64_t rip;
size_t memsize;
@@ -1429,14 +1425,26 @@ main(int argc, char *argv[])
}
#endif
- max_vcpus = num_vcpus_allowed(ctx);
+ bsp = vm_vcpu_open(ctx, BSP);
+ max_vcpus = num_vcpus_allowed(ctx, bsp);
if (guest_ncpus > max_vcpus) {
fprintf(stderr, "%d vCPUs requested but only %d available\n",
guest_ncpus, max_vcpus);
exit(4);
}
- fbsdrun_set_capabilities(ctx, BSP);
+ fbsdrun_set_capabilities(bsp, true);
+
+ /* Allocate per-VCPU resources. */
+ vcpu_info = calloc(guest_ncpus, sizeof(*vcpu_info));
+ for (int vcpuid = 0; vcpuid < guest_ncpus; vcpuid++) {
+ vcpu_info[vcpuid].ctx = ctx;
+ vcpu_info[vcpuid].vcpuid = vcpuid;
+ if (vcpuid == BSP)
+ vcpu_info[vcpuid].vcpu = bsp;
+ else
+ vcpu_info[vcpuid].vcpu = vm_vcpu_open(ctx, vcpuid);
+ }
memflags = 0;
if (get_config_bool_default("memory.wired", false))
@@ -1496,24 +1504,20 @@ main(int argc, char *argv[])
init_gdb(ctx);
if (lpc_bootrom()) {
- if (vm_set_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, 1)) {
+ if (vm_set_capability(bsp, VM_CAP_UNRESTRICTED_GUEST, 1)) {
fprintf(stderr, "ROM boot failed: unrestricted guest "
"capability not available\n");
exit(4);
}
- error = vcpu_reset(ctx, BSP);
+ error = vcpu_reset(bsp);
assert(error == 0);
}
- /* Allocate per-VCPU resources. */
- mt_vmm_info = calloc(guest_ncpus, sizeof(*mt_vmm_info));
-
/*
* Add all vCPUs.
*/
- for (int vcpu = 0; vcpu < guest_ncpus; vcpu++) {
- spinup_vcpu(ctx, vcpu);
- }
+ for (int vcpuid = 0; vcpuid < guest_ncpus; vcpuid++)
+ spinup_vcpu(&vcpu_info[vcpuid], vcpuid == BSP);
#ifdef BHYVE_SNAPSHOT
if (restore_file != NULL) {
@@ -1549,7 +1553,7 @@ main(int argc, char *argv[])
}
#endif
- error = vm_get_register(ctx, BSP, VM_REG_GUEST_RIP, &rip);
+ error = vm_get_register(bsp, VM_REG_GUEST_RIP, &rip);
assert(error == 0);
/*
@@ -1608,14 +1612,11 @@ main(int argc, char *argv[])
if (vm_restore_time(ctx) < 0)
err(EX_OSERR, "Unable to restore time");
- for (int i = 0; i < guest_ncpus; i++) {
- if (i == BSP)
- continue;
- vm_resume_cpu(ctx, i);
- }
- }
+ for (int vcpuid = 0; vcpuid < guest_ncpus; vcpuid++)
+ vm_resume_cpu(vcpu_info[vcpuid].vcpu);
+ } else
#endif
- vm_resume_cpu(ctx, BSP);
+ vm_resume_cpu(bsp);
/*
* Head off to the main event dispatch loop
diff --git a/usr.sbin/bhyve/bhyverun.h b/usr.sbin/bhyve/bhyverun.h
index d3eb8c8b23da..dfc7d7463519 100644
--- a/usr.sbin/bhyve/bhyverun.h
+++ b/usr.sbin/bhyve/bhyverun.h
@@ -37,6 +37,7 @@
extern int guest_ncpus;
extern uint16_t cpu_cores, cpu_sockets, cpu_threads;
+struct vcpu;
struct vmctx;
struct vm_exit;
@@ -47,6 +48,6 @@ uintptr_t paddr_host2guest(struct vmctx *ctx, void *addr);
int fbsdrun_virtio_msix(void);
-int vmexit_task_switch(struct vmctx *, struct vm_exit *, int *vcpu);
+int vmexit_task_switch(struct vmctx *, struct vcpu *, struct vm_exit *);
#endif
diff --git a/usr.sbin/bhyve/bootrom.c b/usr.sbin/bhyve/bootrom.c
index b851d39b485c..e671b32286db 100644
--- a/usr.sbin/bhyve/bootrom.c
+++ b/usr.sbin/bhyve/bootrom.c
@@ -84,9 +84,8 @@ static struct bootrom_var_state {
* that the Firmware Volume area is writable and persistent.
*/
static int
-bootrom_var_mem_handler(struct vmctx *ctx __unused, int vcpu __unused, int dir,
- uint64_t addr, int size, uint64_t *val, void *arg1 __unused,
- long arg2 __unused)
+bootrom_var_mem_handler(struct vcpu *vcpu __unused, int dir, uint64_t addr,
+ int size, uint64_t *val, void *arg1 __unused, long arg2 __unused)
{
off_t offset;
diff --git a/usr.sbin/bhyve/gdb.c b/usr.sbin/bhyve/gdb.c
index 6368d5cc79a9..9e50602e92be 100644
--- a/usr.sbin/bhyve/gdb.c
+++ b/usr.sbin/bhyve/gdb.c
@@ -132,6 +132,7 @@ static struct vmctx *ctx;
static int cur_fd = -1;
static TAILQ_HEAD(, breakpoint) breakpoints;
static struct vcpu_state *vcpu_state;
+static struct vcpu **vcpus;
static int cur_vcpu, stopped_vcpu;
static bool gdb_active = false;
@@ -223,7 +224,7 @@ debug(const char *fmt, ...)
static void remove_all_sw_breakpoints(void);
static int
-guest_paging_info(int vcpu, struct vm_guest_paging *paging)
+guest_paging_info(struct vcpu *vcpu, struct vm_guest_paging *paging)
{
uint64_t regs[4];
const int regset[4] = {
@@ -233,7 +234,7 @@ guest_paging_info(int vcpu, struct vm_guest_paging *paging)
VM_REG_GUEST_EFER
};
- if (vm_get_register_set(ctx, vcpu, nitems(regset), regset, regs) == -1)
+ if (vm_get_register_set(vcpu, nitems(regset), regset, regs) == -1)
return (-1);
/*
@@ -268,7 +269,7 @@ guest_paging_info(int vcpu, struct vm_guest_paging *paging)
* return -1.
*/
static int
-guest_vaddr2paddr(int vcpu, uint64_t vaddr, uint64_t *paddr)
+guest_vaddr2paddr(struct vcpu *vcpu, uint64_t vaddr, uint64_t *paddr)
{
struct vm_guest_paging paging;
int fault;
@@ -280,7 +281,7 @@ guest_vaddr2paddr(int vcpu, uint64_t vaddr, uint64_t *paddr)
* Always use PROT_READ. We really care if the VA is
* accessible, not if the current vCPU can write.
*/
- if (vm_gla2gpa_nofault(ctx, vcpu, &paging, vaddr, PROT_READ, paddr,
+ if (vm_gla2gpa_nofault(vcpu, &paging, vaddr, PROT_READ, paddr,
&fault) == -1)
return (-1);
if (fault)
@@ -730,17 +731,18 @@ gdb_finish_suspend_vcpus(void)
* as long as the debug server keeps them suspended.
*/
static void
-_gdb_cpu_suspend(int vcpu, bool report_stop)
+_gdb_cpu_suspend(struct vcpu *vcpu, bool report_stop)
{
+ int vcpuid = vcpu_id(vcpu);
- debug("$vCPU %d suspending\n", vcpu);
- CPU_SET(vcpu, &vcpus_waiting);
+ debug("$vCPU %d suspending\n", vcpuid);
+ CPU_SET(vcpuid, &vcpus_waiting);
if (report_stop && CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
gdb_finish_suspend_vcpus();
- while (CPU_ISSET(vcpu, &vcpus_suspended))
+ while (CPU_ISSET(vcpuid, &vcpus_suspended))
pthread_cond_wait(&idle_vcpus, &gdb_lock);
- CPU_CLR(vcpu, &vcpus_waiting);
- debug("$vCPU %d resuming\n", vcpu);
+ CPU_CLR(vcpuid, &vcpus_waiting);
+ debug("$vCPU %d resuming\n", vcpuid);
}
/*
@@ -748,17 +750,21 @@ _gdb_cpu_suspend(int vcpu, bool report_stop)
* debug server about the new thread.
*/
void
-gdb_cpu_add(int vcpu)
+gdb_cpu_add(struct vcpu *vcpu)
{
+ int vcpuid;
if (!gdb_active)
return;
- debug("$vCPU %d starting\n", vcpu);
+ vcpuid = vcpu_id(vcpu);
+ debug("$vCPU %d starting\n", vcpuid);
pthread_mutex_lock(&gdb_lock);
- assert(vcpu < guest_ncpus);
- CPU_SET(vcpu, &vcpus_active);
+ assert(vcpuid < guest_ncpus);
+ assert(vcpus[vcpuid] == NULL);
+ vcpus[vcpuid] = vcpu;
+ CPU_SET(vcpuid, &vcpus_active);
if (!TAILQ_EMPTY(&breakpoints)) {
- vm_set_capability(ctx, vcpu, VM_CAP_BPT_EXIT, 1);
+ vm_set_capability(vcpu, VM_CAP_BPT_EXIT, 1);
debug("$vCPU %d enabled breakpoint exits\n", vcpu);
}
@@ -768,7 +774,7 @@ gdb_cpu_add(int vcpu)
* executing the first instruction.
*/
if (!CPU_EMPTY(&vcpus_suspended)) {
- CPU_SET(vcpu, &vcpus_suspended);
+ CPU_SET(vcpuid, &vcpus_suspended);
_gdb_cpu_suspend(vcpu, false);
}
pthread_mutex_unlock(&gdb_lock);
@@ -779,12 +785,12 @@ gdb_cpu_add(int vcpu)
* if the vCPU is marked as stepping.
*/
static void
-gdb_cpu_resume(int vcpu)
+gdb_cpu_resume(struct vcpu *vcpu)
{
struct vcpu_state *vs;
int error;
- vs = &vcpu_state[vcpu];
+ vs = &vcpu_state[vcpu_id(vcpu)];
/*
* Any pending event should already be reported before
@@ -793,7 +799,7 @@ gdb_cpu_resume(int vcpu)
assert(vs->hit_swbreak == false);
assert(vs->stepped == false);
if (vs->stepping) {
- error = vm_set_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, 1);
+ error = vm_set_capability(vcpu, VM_CAP_MTRAP_EXIT, 1);
assert(error == 0);
}
}
@@ -804,7 +810,7 @@ gdb_cpu_resume(int vcpu)
* to a guest-wide suspend such as Ctrl-C or the stop on attach.
*/
void
-gdb_cpu_suspend(int vcpu)
+gdb_cpu_suspend(struct vcpu *vcpu)
{
if (!gdb_active)
@@ -822,7 +828,7 @@ gdb_suspend_vcpus(void)
assert(pthread_mutex_isowned_np(&gdb_lock));
debug("suspending all CPUs\n");
vcpus_suspended = vcpus_active;
- vm_suspend_cpu(ctx, -1);
+ vm_suspend_all_cpus(ctx);
if (CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
gdb_finish_suspend_vcpus();
}
@@ -832,23 +838,25 @@ gdb_suspend_vcpus(void)
* the VT-x-specific MTRAP exit.
*/
void
-gdb_cpu_mtrap(int vcpu)
+gdb_cpu_mtrap(struct vcpu *vcpu)
{
struct vcpu_state *vs;
+ int vcpuid;
if (!gdb_active)
return;
- debug("$vCPU %d MTRAP\n", vcpu);
+ vcpuid = vcpu_id(vcpu);
+ debug("$vCPU %d MTRAP\n", vcpuid);
pthread_mutex_lock(&gdb_lock);
- vs = &vcpu_state[vcpu];
+ vs = &vcpu_state[vcpuid];
if (vs->stepping) {
vs->stepping = false;
vs->stepped = true;
- vm_set_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, 0);
+ vm_set_capability(vcpu, VM_CAP_MTRAP_EXIT, 0);
while (vs->stepped) {
if (stopped_vcpu == -1) {
- debug("$vCPU %d reporting step\n", vcpu);
- stopped_vcpu = vcpu;
+ debug("$vCPU %d reporting step\n", vcpuid);
+ stopped_vcpu = vcpuid;
gdb_suspend_vcpus();
}
_gdb_cpu_suspend(vcpu, true);
@@ -871,33 +879,34 @@ find_breakpoint(uint64_t gpa)
}
void
-gdb_cpu_breakpoint(int vcpu, struct vm_exit *vmexit)
+gdb_cpu_breakpoint(struct vcpu *vcpu, struct vm_exit *vmexit)
{
struct breakpoint *bp;
struct vcpu_state *vs;
uint64_t gpa;
- int error;
+ int error, vcpuid;
if (!gdb_active) {
fprintf(stderr, "vm_loop: unexpected VMEXIT_DEBUG\n");
exit(4);
}
+ vcpuid = vcpu_id(vcpu);
pthread_mutex_lock(&gdb_lock);
error = guest_vaddr2paddr(vcpu, vmexit->rip, &gpa);
assert(error == 1);
bp = find_breakpoint(gpa);
if (bp != NULL) {
- vs = &vcpu_state[vcpu];
+ vs = &vcpu_state[vcpuid];
assert(vs->stepping == false);
assert(vs->stepped == false);
assert(vs->hit_swbreak == false);
vs->hit_swbreak = true;
- vm_set_register(ctx, vcpu, VM_REG_GUEST_RIP, vmexit->rip);
+ vm_set_register(vcpu, VM_REG_GUEST_RIP, vmexit->rip);
for (;;) {
if (stopped_vcpu == -1) {
- debug("$vCPU %d reporting breakpoint at rip %#lx\n", vcpu,
- vmexit->rip);
- stopped_vcpu = vcpu;
+ debug("$vCPU %d reporting breakpoint at rip %#lx\n",
+ vcpuid, vmexit->rip);
+ stopped_vcpu = vcpuid;
gdb_suspend_vcpus();
}
_gdb_cpu_suspend(vcpu, true);
@@ -914,31 +923,32 @@ gdb_cpu_breakpoint(int vcpu, struct vm_exit *vmexit)
}
gdb_cpu_resume(vcpu);
} else {
- debug("$vCPU %d injecting breakpoint at rip %#lx\n", vcpu,
+ debug("$vCPU %d injecting breakpoint at rip %#lx\n", vcpuid,
vmexit->rip);
- error = vm_set_register(ctx, vcpu,
- VM_REG_GUEST_ENTRY_INST_LENGTH, vmexit->u.bpt.inst_length);
+ error = vm_set_register(vcpu, VM_REG_GUEST_ENTRY_INST_LENGTH,
+ vmexit->u.bpt.inst_length);
assert(error == 0);
- error = vm_inject_exception(ctx, vcpu, IDT_BP, 0, 0, 0);
+ error = vm_inject_exception(vcpu, IDT_BP, 0, 0, 0);
assert(error == 0);
}
pthread_mutex_unlock(&gdb_lock);
}
static bool
-gdb_step_vcpu(int vcpu)
+gdb_step_vcpu(struct vcpu *vcpu)
{
- int error, val;
+ int error, val, vcpuid;
- debug("$vCPU %d step\n", vcpu);
- error = vm_get_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, &val);
+ vcpuid = vcpu_id(vcpu);
+ debug("$vCPU %d step\n", vcpuid);
+ error = vm_get_capability(vcpu, VM_CAP_MTRAP_EXIT, &val);
if (error < 0)
return (false);
discard_stop();
- vcpu_state[vcpu].stepping = true;
- vm_resume_cpu(ctx, vcpu);
- CPU_CLR(vcpu, &vcpus_suspended);
+ vcpu_state[vcpuid].stepping = true;
+ vm_resume_cpu(vcpu);
+ CPU_CLR(vcpuid, &vcpus_suspended);
pthread_cond_broadcast(&idle_vcpus);
return (true);
}
@@ -948,7 +958,7 @@ gdb_resume_vcpus(void)
{
assert(pthread_mutex_isowned_np(&gdb_lock));
- vm_resume_cpu(ctx, -1);
+ vm_resume_all_cpus(ctx);
debug("resuming all CPUs\n");
CPU_ZERO(&vcpus_suspended);
pthread_cond_broadcast(&idle_vcpus);
@@ -959,7 +969,7 @@ gdb_read_regs(void)
{
uint64_t regvals[nitems(gdb_regset)];
- if (vm_get_register_set(ctx, cur_vcpu, nitems(gdb_regset),
+ if (vm_get_register_set(vcpus[cur_vcpu], nitems(gdb_regset),
gdb_regset, regvals) == -1) {
send_error(errno);
return;
@@ -998,7 +1008,7 @@ gdb_read_mem(const uint8_t *data, size_t len)
started = false;
while (resid > 0) {
- error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
+ error = guest_vaddr2paddr(vcpus[cur_vcpu], gva, &gpa);
if (error == -1) {
if (started)
finish_packet();
@@ -1050,7 +1060,7 @@ gdb_read_mem(const uint8_t *data, size_t len)
bytes = 2;
else
bytes = 4;
- error = read_mem(ctx, cur_vcpu, gpa, &val,
+ error = read_mem(vcpus[cur_vcpu], gpa, &val,
bytes);
if (error == 0) {
if (!started) {
@@ -1121,7 +1131,7 @@ gdb_write_mem(const uint8_t *data, size_t len)
}
while (resid > 0) {
- error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
+ error = guest_vaddr2paddr(vcpus[cur_vcpu], gva, &gpa);
if (error == -1) {
send_error(errno);
return;
@@ -1170,7 +1180,7 @@ gdb_write_mem(const uint8_t *data, size_t len)
bytes = 4;
val = be32toh(parse_integer(data, 8));
}
- error = write_mem(ctx, cur_vcpu, gpa, val,
+ error = write_mem(vcpus[cur_vcpu], gpa, val,
bytes);
if (error == 0) {
gpa += bytes;
@@ -1201,7 +1211,7 @@ set_breakpoint_caps(bool enable)
while (!CPU_EMPTY(&mask)) {
vcpu = CPU_FFS(&mask) - 1;
CPU_CLR(vcpu, &mask);
- if (vm_set_capability(ctx, vcpu, VM_CAP_BPT_EXIT,
+ if (vm_set_capability(vcpus[vcpu], VM_CAP_BPT_EXIT,
enable ? 1 : 0) < 0)
return (false);
debug("$vCPU %d %sabled breakpoint exits\n", vcpu,
@@ -1243,7 +1253,7 @@ update_sw_breakpoint(uint64_t gva, int kind, bool insert)
return;
}
- error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
+ error = guest_vaddr2paddr(vcpus[cur_vcpu], gva, &gpa);
if (error == -1) {
send_error(errno);
return;
@@ -1587,7 +1597,7 @@ handle_command(const uint8_t *data, size_t len)
}
/* Don't send a reply until a stop occurs. */
- if (!gdb_step_vcpu(cur_vcpu)) {
+ if (!gdb_step_vcpu(vcpus[cur_vcpu])) {
send_error(EOPNOTSUPP);
break;
}
@@ -1880,6 +1890,7 @@ init_gdb(struct vmctx *_ctx)
stopped_vcpu = -1;
TAILQ_INIT(&breakpoints);
+ vcpus = calloc(guest_ncpus, sizeof(*vcpus));
vcpu_state = calloc(guest_ncpus, sizeof(*vcpu_state));
if (wait) {
/*
diff --git a/usr.sbin/bhyve/gdb.h b/usr.sbin/bhyve/gdb.h
index c5fa522c63e0..f132707cce3f 100644
--- a/usr.sbin/bhyve/gdb.h
+++ b/usr.sbin/bhyve/gdb.h
@@ -30,10 +30,10 @@
#ifndef __GDB_H__
#define __GDB_H__
-void gdb_cpu_add(int vcpu);
-void gdb_cpu_breakpoint(int vcpu, struct vm_exit *vmexit);
-void gdb_cpu_mtrap(int vcpu);
-void gdb_cpu_suspend(int vcpu);
+void gdb_cpu_add(struct vcpu *vcpu);
+void gdb_cpu_breakpoint(struct vcpu *vcpu, struct vm_exit *vmexit);
+void gdb_cpu_mtrap(struct vcpu *vcpu);
+void gdb_cpu_suspend(struct vcpu *vcpu);
void init_gdb(struct vmctx *ctx);
#endif /* !__GDB_H__ */
diff --git a/usr.sbin/bhyve/inout.c b/usr.sbin/bhyve/inout.c
index 225697906393..043fd5eef7af 100644
--- a/usr.sbin/bhyve/inout.c
+++ b/usr.sbin/bhyve/inout.c
@@ -104,7 +104,7 @@ register_default_iohandler(int start, int size)
}
int
-emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit)
+emulate_inout(struct vmctx *ctx, struct vcpu *vcpu, struct vm_exit *vmexit)
{
int addrsize, bytes, flags, in, port, prot, rep;
uint32_t eax, val;
@@ -162,11 +162,11 @@ emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit)
if (vie_calculate_gla(vis->paging.cpu_mode,
vis->seg_name, &vis->seg_desc, index, bytes,
addrsize, prot, &gla)) {
- vm_inject_gp(ctx, vcpu);
+ vm_inject_gp(vcpu);
break;
}
- error = vm_copy_setup(ctx, vcpu, &vis->paging, gla,
+ error = vm_copy_setup(vcpu, &vis->paging, gla,
bytes, prot, iov, nitems(iov), &fault);
if (error) {
retval = -1; /* Unrecoverable error */
@@ -178,7 +178,7 @@ emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit)
if (vie_alignment_check(vis->paging.cpl, bytes,
vis->cr0, vis->rflags, gla)) {
- vm_inject_ac(ctx, vcpu, 0);
+ vm_inject_ac(vcpu, 0);
break;
}
@@ -204,7 +204,7 @@ emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit)
}
/* Update index register */
- error = vie_update_register(ctx, vcpu, idxreg, index, addrsize);
+ error = vie_update_register(vcpu, idxreg, index, addrsize);
assert(error == 0);
/*
@@ -212,14 +212,14 @@ emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit)
* prefix.
*/
if (rep) {
- error = vie_update_register(ctx, vcpu, VM_REG_GUEST_RCX,
+ error = vie_update_register(vcpu, VM_REG_GUEST_RCX,
count, addrsize);
assert(error == 0);
}
/* Restart the instruction if more iterations remain */
if (retval == 0 && count != 0) {
- error = vm_restart_instruction(ctx, vcpu);
+ error = vm_restart_instruction(vcpu);
assert(error == 0);
}
} else {
@@ -229,7 +229,7 @@ emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit)
if (retval == 0 && in) {
eax &= ~vie_size2mask(bytes);
eax |= val & vie_size2mask(bytes);
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RAX,
+ error = vm_set_register(vcpu, VM_REG_GUEST_RAX,
eax);
assert(error == 0);
}
diff --git a/usr.sbin/bhyve/inout.h b/usr.sbin/bhyve/inout.h
index a3a37e03ff68..e21bfe7fba19 100644
--- a/usr.sbin/bhyve/inout.h
+++ b/usr.sbin/bhyve/inout.h
@@ -33,6 +33,7 @@
#include <sys/linker_set.h>
+struct vcpu;
struct vmctx;
struct vm_exit;
@@ -72,7 +73,7 @@ struct inout_port {
DATA_SET(inout_port_set, __CONCAT(__inout_port, __LINE__))
void init_inout(void);
-int emulate_inout(struct vmctx *, int vcpu, struct vm_exit *vmexit);
+int emulate_inout(struct vmctx *ctx, struct vcpu *vcpu, struct vm_exit *vmexit);
int register_inout(struct inout_port *iop);
int unregister_inout(struct inout_port *iop);
diff --git a/usr.sbin/bhyve/kernemu_dev.c b/usr.sbin/bhyve/kernemu_dev.c
index 2fa0c3dc1f35..84f096a16c3a 100644
--- a/usr.sbin/bhyve/kernemu_dev.c
+++ b/usr.sbin/bhyve/kernemu_dev.c
@@ -46,10 +46,10 @@ struct vm_hpet_cap;
#include "mem.h"
static int
-apic_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr, int size,
+apic_handler(struct vcpu *vcpu, int dir, uint64_t addr, int size,
uint64_t *val, void *arg1 __unused, long arg2 __unused)
{
- if (vm_readwrite_kernemu_device(ctx, vcpu, addr, (dir == MEM_F_WRITE),
+ if (vm_readwrite_kernemu_device(vcpu, addr, (dir == MEM_F_WRITE),
size, val) != 0)
return (errno);
return (0);
diff --git a/usr.sbin/bhyve/mem.c b/usr.sbin/bhyve/mem.c
index 10cc7bbaa7fc..ccb0c69c5a8e 100644
--- a/usr.sbin/bhyve/mem.c
+++ b/usr.sbin/bhyve/mem.c
@@ -48,6 +48,7 @@ __FBSDID("$FreeBSD$");
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
+#include <vmmapi.h>
#include "mem.h"
@@ -142,53 +143,53 @@ mmio_rb_dump(struct mmio_rb_tree *rbt)
RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
-typedef int (mem_cb_t)(struct vmctx *ctx, int vcpu, uint64_t gpa,
- struct mem_range *mr, void *arg);
+typedef int (mem_cb_t)(struct vcpu *vcpu, uint64_t gpa, struct mem_range *mr,
+ void *arg);
static int
-mem_read(void *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg)
+mem_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg)
{
int error;
struct mem_range *mr = arg;
- error = (*mr->handler)(ctx, vcpu, MEM_F_READ, gpa, size,
- rval, mr->arg1, mr->arg2);
+ error = (*mr->handler)(vcpu, MEM_F_READ, gpa, size, rval, mr->arg1,
+ mr->arg2);
return (error);
}
static int
-mem_write(void *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size, void *arg)
+mem_write(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size, void *arg)
{
int error;
struct mem_range *mr = arg;
- error = (*mr->handler)(ctx, vcpu, MEM_F_WRITE, gpa, size,
- &wval, mr->arg1, mr->arg2);
+ error = (*mr->handler)(vcpu, MEM_F_WRITE, gpa, size, &wval, mr->arg1,
+ mr->arg2);
return (error);
}
static int
-access_memory(struct vmctx *ctx, int vcpu, uint64_t paddr, mem_cb_t *cb,
- void *arg)
+access_memory(struct vcpu *vcpu, uint64_t paddr, mem_cb_t *cb, void *arg)
{
struct mmio_rb_range *entry;
- int err, perror, immutable;
+ int err, perror, immutable, vcpuid;
+ vcpuid = vcpu_id(vcpu);
pthread_rwlock_rdlock(&mmio_rwlock);
/*
* First check the per-vCPU cache
*/
- if (mmio_hint[vcpu] &&
- paddr >= mmio_hint[vcpu]->mr_base &&
- paddr <= mmio_hint[vcpu]->mr_end) {
- entry = mmio_hint[vcpu];
+ if (mmio_hint[vcpuid] &&
+ paddr >= mmio_hint[vcpuid]->mr_base &&
+ paddr <= mmio_hint[vcpuid]->mr_end) {
+ entry = mmio_hint[vcpuid];
} else
entry = NULL;
if (entry == NULL) {
if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0) {
/* Update the per-vCPU cache */
- mmio_hint[vcpu] = entry;
+ mmio_hint[vcpuid] = entry;
} else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
perror = pthread_rwlock_unlock(&mmio_rwlock);
assert(perror == 0);
@@ -215,14 +216,13 @@ access_memory(struct vmctx *ctx, int vcpu, uint64_t paddr, mem_cb_t *cb,
assert(perror == 0);
}
- err = cb(ctx, vcpu, paddr, &entry->mr_param, arg);
+ err = cb(vcpu, paddr, &entry->mr_param, arg);
if (!immutable) {
perror = pthread_rwlock_unlock(&mmio_rwlock);
assert(perror == 0);
}
-
return (err);
}
@@ -232,26 +232,25 @@ struct emulate_mem_args {
};
static int
-emulate_mem_cb(struct vmctx *ctx, int vcpu, uint64_t paddr, struct mem_range *mr,
+emulate_mem_cb(struct vcpu *vcpu, uint64_t paddr, struct mem_range *mr,
void *arg)
{
struct emulate_mem_args *ema;
ema = arg;
- return (vmm_emulate_instruction(ctx, vcpu, paddr, ema->vie, ema->paging,
+ return (vmm_emulate_instruction(vcpu, paddr, ema->vie, ema->paging,
mem_read, mem_write, mr));
}
int
-emulate_mem(struct vmctx *ctx, int vcpu, uint64_t paddr, struct vie *vie,
+emulate_mem(struct vcpu *vcpu, uint64_t paddr, struct vie *vie,
struct vm_guest_paging *paging)
-
{
struct emulate_mem_args ema;
ema.vie = vie;
ema.paging = paging;
- return (access_memory(ctx, vcpu, paddr, emulate_mem_cb, &ema));
+ return (access_memory(vcpu, paddr, emulate_mem_cb, &ema));
}
struct rw_mem_args {
@@ -261,36 +260,35 @@ struct rw_mem_args {
};
static int
-rw_mem_cb(struct vmctx *ctx, int vcpu, uint64_t paddr, struct mem_range *mr,
- void *arg)
+rw_mem_cb(struct vcpu *vcpu, uint64_t paddr, struct mem_range *mr, void *arg)
{
struct rw_mem_args *rma;
rma = arg;
- return (mr->handler(ctx, vcpu, rma->operation, paddr, rma->size,
+ return (mr->handler(vcpu, rma->operation, paddr, rma->size,
rma->val, mr->arg1, mr->arg2));
}
int
-read_mem(struct vmctx *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size)
+read_mem(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size)
{
struct rw_mem_args rma;
rma.val = rval;
rma.size = size;
rma.operation = MEM_F_READ;
- return (access_memory(ctx, vcpu, gpa, rw_mem_cb, &rma));
+ return (access_memory(vcpu, gpa, rw_mem_cb, &rma));
}
int
-write_mem(struct vmctx *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size)
+write_mem(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size)
{
struct rw_mem_args rma;
rma.val = &wval;
rma.size = size;
rma.operation = MEM_F_WRITE;
- return (access_memory(ctx, vcpu, gpa, rw_mem_cb, &rma));
+ return (access_memory(vcpu, gpa, rw_mem_cb, &rma));
}
static int
diff --git a/usr.sbin/bhyve/mem.h b/usr.sbin/bhyve/mem.h
index 965079107476..c5ed70070fb9 100644
--- a/usr.sbin/bhyve/mem.h
+++ b/usr.sbin/bhyve/mem.h
@@ -33,9 +33,9 @@
#include <sys/linker_set.h>
-struct vmctx;
+struct vcpu;
-typedef int (*mem_func_t)(struct vmctx *ctx, int vcpu, int dir, uint64_t addr,
+typedef int (*mem_func_t)(struct vcpu *vcpu, int dir, uint64_t addr,
int size, uint64_t *val, void *arg1, long arg2);
struct mem_range {
@@ -53,15 +53,13 @@ struct mem_range {
#define MEM_F_IMMUTABLE 0x4 /* mem_range cannot be unregistered */
void init_mem(int ncpu);
-int emulate_mem(struct vmctx *, int vcpu, uint64_t paddr, struct vie *vie,
+int emulate_mem(struct vcpu *vcpu, uint64_t paddr, struct vie *vie,
struct vm_guest_paging *paging);
-int read_mem(struct vmctx *ctx, int vcpu, uint64_t gpa, uint64_t *rval,
- int size);
+int read_mem(struct vcpu *vpu, uint64_t gpa, uint64_t *rval, int size);
int register_mem(struct mem_range *memp);
int register_mem_fallback(struct mem_range *memp);
int unregister_mem(struct mem_range *memp);
-int write_mem(struct vmctx *ctx, int vcpu, uint64_t gpa, uint64_t wval,
- int size);
+int write_mem(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size);
#endif /* _MEM_H_ */
diff --git a/usr.sbin/bhyve/pci_emul.c b/usr.sbin/bhyve/pci_emul.c
index 93411751d635..48d80a63ea51 100644
--- a/usr.sbin/bhyve/pci_emul.c
+++ b/usr.sbin/bhyve/pci_emul.c
@@ -476,7 +476,7 @@ pci_emul_io_handler(struct vmctx *ctx __unused, int in, int port,
}
static int
-pci_emul_mem_handler(struct vmctx *ctx __unused, int vcpu __unused, int dir,
+pci_emul_mem_handler(struct vcpu *vcpu __unused, int dir,
uint64_t addr, int size, uint64_t *val, void *arg1, long arg2)
{
struct pci_devinst *pdi = arg1;
@@ -1278,8 +1278,8 @@ pci_emul_iscap(struct pci_devinst *pi, int offset)
}
static int
-pci_emul_fallback_handler(struct vmctx *ctx __unused, int vcpu __unused,
- int dir, uint64_t addr __unused, int size __unused, uint64_t *val,
+pci_emul_fallback_handler(struct vcpu *vcpu __unused, int dir,
+ uint64_t addr __unused, int size __unused, uint64_t *val,
void *arg1 __unused, long arg2 __unused)
{
/*
@@ -1294,9 +1294,8 @@ pci_emul_fallback_handler(struct vmctx *ctx __unused, int vcpu __unused,
}
static int
-pci_emul_ecfg_handler(struct vmctx *ctx __unused, int vcpu __unused, int dir,
- uint64_t addr, int bytes, uint64_t *val, void *arg1 __unused,
- long arg2 __unused)
+pci_emul_ecfg_handler(struct vcpu *vcpu __unused, int dir, uint64_t addr,
+ int bytes, uint64_t *val, void *arg1 __unused, long arg2 __unused)
{
int bus, slot, func, coff, in;
diff --git a/usr.sbin/bhyve/pci_passthru.c b/usr.sbin/bhyve/pci_passthru.c
index f42bbbda655f..d4382f6cc20e 100644
--- a/usr.sbin/bhyve/pci_passthru.c
+++ b/usr.sbin/bhyve/pci_passthru.c
@@ -445,7 +445,7 @@ msix_table_write(struct passthru_softc *sc, uint64_t offset, int size,
/* If the entry is masked, don't set it up */
if ((entry->vector_control & PCIM_MSIX_VCTRL_MASK) == 0 ||
(vector_control & PCIM_MSIX_VCTRL_MASK) == 0) {
- (void)vm_setup_pptdev_msix(sc->psc_pi->pi_vmctx, 0,
+ (void)vm_setup_pptdev_msix(sc->psc_pi->pi_vmctx,
sc->psc_sel.pc_bus, sc->psc_sel.pc_dev,
sc->psc_sel.pc_func, index, entry->addr,
entry->msg_data, entry->vector_control);
@@ -966,7 +966,7 @@ passthru_cfgwrite(struct pci_devinst *pi, int coff, int bytes, uint32_t val)
if (msicap_access(sc, coff)) {
pci_emul_capwrite(pi, coff, bytes, val, sc->psc_msi.capoff,
PCIY_MSI);
- error = vm_setup_pptdev_msi(pi->pi_vmctx, 0, sc->psc_sel.pc_bus,
+ error = vm_setup_pptdev_msi(pi->pi_vmctx, sc->psc_sel.pc_bus,
sc->psc_sel.pc_dev, sc->psc_sel.pc_func,
pi->pi_msi.addr, pi->pi_msi.msg_data,
pi->pi_msi.maxmsgnum);
@@ -981,7 +981,7 @@ passthru_cfgwrite(struct pci_devinst *pi, int coff, int bytes, uint32_t val)
if (pi->pi_msix.enabled) {
msix_table_entries = pi->pi_msix.table_count;
for (i = 0; i < msix_table_entries; i++) {
- error = vm_setup_pptdev_msix(pi->pi_vmctx, 0,
+ error = vm_setup_pptdev_msix(pi->pi_vmctx,
sc->psc_sel.pc_bus, sc->psc_sel.pc_dev,
sc->psc_sel.pc_func, i,
pi->pi_msix.table[i].addr,
diff --git a/usr.sbin/bhyve/pctestdev.c b/usr.sbin/bhyve/pctestdev.c
index 8b810e641bbc..bd62c2fdb003 100644
--- a/usr.sbin/bhyve/pctestdev.c
+++ b/usr.sbin/bhyve/pctestdev.c
@@ -74,7 +74,7 @@ static uint32_t pctestdev_ioport_data;
static int pctestdev_debugexit_io(struct vmctx *ctx, int in,
int port, int bytes, uint32_t *eax, void *arg);
-static int pctestdev_iomem_io(struct vmctx *ctx, int vcpu, int dir,
+static int pctestdev_iomem_io(struct vcpu *vcpu, int dir,
uint64_t addr, int size, uint64_t *val, void *arg1,
long arg2);
static int pctestdev_ioport_io(struct vmctx *ctx, int in,
@@ -190,7 +190,7 @@ pctestdev_debugexit_io(struct vmctx *ctx __unused, int in,
}
static int
-pctestdev_iomem_io(struct vmctx *ctx __unused, int vcpu __unused, int dir,
+pctestdev_iomem_io(struct vcpu *vcpu __unused, int dir,
uint64_t addr, int size, uint64_t *val, void *arg1 __unused,
long arg2 __unused)
{
diff --git a/usr.sbin/bhyve/snapshot.c b/usr.sbin/bhyve/snapshot.c
index 37aba32a1929..72b63c506c41 100644
--- a/usr.sbin/bhyve/snapshot.c
+++ b/usr.sbin/bhyve/snapshot.c
@@ -1296,7 +1296,7 @@ vm_vcpu_pause(struct vmctx *ctx)
pthread_mutex_lock(&vcpu_lock);
checkpoint_active = true;
- vm_suspend_cpu(ctx, -1);
+ vm_suspend_all_cpus(ctx);
while (CPU_CMP(&vcpus_active, &vcpus_suspended) != 0)
pthread_cond_wait(&vcpus_idle, &vcpu_lock);
pthread_mutex_unlock(&vcpu_lock);
@@ -1309,7 +1309,7 @@ vm_vcpu_resume(struct vmctx *ctx)
pthread_mutex_lock(&vcpu_lock);
checkpoint_active = false;
pthread_mutex_unlock(&vcpu_lock);
- vm_resume_cpu(ctx, -1);
+ vm_resume_all_cpus(ctx);
pthread_cond_broadcast(&vcpus_can_run);
}
diff --git a/usr.sbin/bhyve/spinup_ap.c b/usr.sbin/bhyve/spinup_ap.c
index 0498ee91232d..61cbef96ed6a 100644
--- a/usr.sbin/bhyve/spinup_ap.c
+++ b/usr.sbin/bhyve/spinup_ap.c
@@ -45,7 +45,7 @@ __FBSDID("$FreeBSD$");
#include "spinup_ap.h"
static void
-spinup_ap_realmode(struct vmctx *ctx, int newcpu, uint64_t rip)
+spinup_ap_realmode(struct vcpu *newcpu, uint64_t rip)
{
int vector, error;
uint16_t cs;
@@ -58,35 +58,32 @@ spinup_ap_realmode(struct vmctx *ctx, int newcpu, uint64_t rip)
* Update the %cs and %rip of the guest so that it starts
* executing real mode code at at 'vector << 12'.
*/
- error = vm_set_register(ctx, newcpu, VM_REG_GUEST_RIP, 0);
+ error = vm_set_register(newcpu, VM_REG_GUEST_RIP, 0);
assert(error == 0);
- error = vm_get_desc(ctx, newcpu, VM_REG_GUEST_CS, &desc_base,
+ error = vm_get_desc(newcpu, VM_REG_GUEST_CS, &desc_base,
&desc_limit, &desc_access);
assert(error == 0);
desc_base = vector << PAGE_SHIFT;
- error = vm_set_desc(ctx, newcpu, VM_REG_GUEST_CS,
+ error = vm_set_desc(newcpu, VM_REG_GUEST_CS,
desc_base, desc_limit, desc_access);
assert(error == 0);
cs = (vector << PAGE_SHIFT) >> 4;
- error = vm_set_register(ctx, newcpu, VM_REG_GUEST_CS, cs);
+ error = vm_set_register(newcpu, VM_REG_GUEST_CS, cs);
assert(error == 0);
}
void
-spinup_ap(struct vmctx *ctx, int newcpu, uint64_t rip)
+spinup_ap(struct vcpu *newcpu, uint64_t rip)
{
int error;
- assert(newcpu != 0);
- assert(newcpu < guest_ncpus);
-
- error = vcpu_reset(ctx, newcpu);
+ error = vcpu_reset(newcpu);
assert(error == 0);
- spinup_ap_realmode(ctx, newcpu, rip);
+ spinup_ap_realmode(newcpu, rip);
- vm_resume_cpu(ctx, newcpu);
+ vm_resume_cpu(newcpu);
}
diff --git a/usr.sbin/bhyve/spinup_ap.h b/usr.sbin/bhyve/spinup_ap.h
index ee201427c585..db98edccbe82 100644
--- a/usr.sbin/bhyve/spinup_ap.h
+++ b/usr.sbin/bhyve/spinup_ap.h
@@ -31,6 +31,6 @@
#ifndef _SPINUP_AP_H_
#define _SPINUP_AP_H_
-void spinup_ap(struct vmctx *ctx, int newcpu, uint64_t rip);
+void spinup_ap(struct vcpu *newcpu, uint64_t rip);
#endif
diff --git a/usr.sbin/bhyve/task_switch.c b/usr.sbin/bhyve/task_switch.c
index 0dfb536f09f8..b29478ea5b01 100644
--- a/usr.sbin/bhyve/task_switch.c
+++ b/usr.sbin/bhyve/task_switch.c
@@ -101,22 +101,22 @@ static_assert(sizeof(struct tss32) == 104, "compile-time assertion failed");
#define TSS_BUSY(type) (((type) & 0x2) != 0)
static uint64_t
-GETREG(struct vmctx *ctx, int vcpu, int reg)
+GETREG(struct vcpu *vcpu, int reg)
{
uint64_t val;
int error;
- error = vm_get_register(ctx, vcpu, reg, &val);
+ error = vm_get_register(vcpu, reg, &val);
assert(error == 0);
return (val);
}
static void
-SETREG(struct vmctx *ctx, int vcpu, int reg, uint64_t val)
+SETREG(struct vcpu *vcpu, int reg, uint64_t val)
{
int error;
- error = vm_set_register(ctx, vcpu, reg, val);
+ error = vm_set_register(vcpu, reg, val);
assert(error == 0);
}
@@ -152,7 +152,7 @@ usd_to_seg_desc(struct user_segment_descriptor *usd)
* Bit 2(GDT/LDT) has the usual interpretation of Table Indicator (TI).
*/
static void
-sel_exception(struct vmctx *ctx, int vcpu, int vector, uint16_t sel, int ext)
+sel_exception(struct vcpu *vcpu, int vector, uint16_t sel, int ext)
{
/*
* Bit 2 from the selector is retained as-is in the error code.
@@ -166,7 +166,7 @@ sel_exception(struct vmctx *ctx, int vcpu, int vector, uint16_t sel, int ext)
sel &= ~0x3;
if (ext)
sel |= 0x1;
- vm_inject_fault(ctx, vcpu, vector, 1, sel);
+ vm_inject_fault(vcpu, vector, 1, sel);
}
/*
@@ -174,14 +174,14 @@ sel_exception(struct vmctx *ctx, int vcpu, int vector, uint16_t sel, int ext)
* and non-zero otherwise.
*/
static int
-desc_table_limit_check(struct vmctx *ctx, int vcpu, uint16_t sel)
+desc_table_limit_check(struct vcpu *vcpu, uint16_t sel)
{
uint64_t base;
uint32_t limit, access;
int error, reg;
reg = ISLDT(sel) ? VM_REG_GUEST_LDTR : VM_REG_GUEST_GDTR;
- error = vm_get_desc(ctx, vcpu, reg, &base, &limit, &access);
+ error = vm_get_desc(vcpu, reg, &base, &limit, &access);
assert(error == 0);
if (reg == VM_REG_GUEST_LDTR) {
@@ -204,7 +204,7 @@ desc_table_limit_check(struct vmctx *ctx, int vcpu, uint16_t sel)
* Returns -1 otherwise.
*/
static int
-desc_table_rw(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
+desc_table_rw(struct vcpu *vcpu, struct vm_guest_paging *paging,
uint16_t sel, struct user_segment_descriptor *desc, bool doread,
int *faultptr)
{
@@ -214,11 +214,11 @@ desc_table_rw(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
int error, reg;
reg = ISLDT(sel) ? VM_REG_GUEST_LDTR : VM_REG_GUEST_GDTR;
- error = vm_get_desc(ctx, vcpu, reg, &base, &limit, &access);
+ error = vm_get_desc(vcpu, reg, &base, &limit, &access);
assert(error == 0);
assert(limit >= SEL_LIMIT(sel));
- error = vm_copy_setup(ctx, vcpu, paging, base + SEL_START(sel),
+ error = vm_copy_setup(vcpu, paging, base + SEL_START(sel),
sizeof(*desc), doread ? PROT_READ : PROT_WRITE, iov, nitems(iov),
faultptr);
if (error || *faultptr)
@@ -232,17 +232,17 @@ desc_table_rw(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
}
static int
-desc_table_read(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
+desc_table_read(struct vcpu *vcpu, struct vm_guest_paging *paging,
uint16_t sel, struct user_segment_descriptor *desc, int *faultptr)
{
- return (desc_table_rw(ctx, vcpu, paging, sel, desc, true, faultptr));
+ return (desc_table_rw(vcpu, paging, sel, desc, true, faultptr));
}
static int
-desc_table_write(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
+desc_table_write(struct vcpu *vcpu, struct vm_guest_paging *paging,
uint16_t sel, struct user_segment_descriptor *desc, int *faultptr)
{
- return (desc_table_rw(ctx, vcpu, paging, sel, desc, false, faultptr));
+ return (desc_table_rw(vcpu, paging, sel, desc, false, faultptr));
}
/*
@@ -253,7 +253,7 @@ desc_table_write(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
* Returns -1 otherwise.
*/
static int
-read_tss_descriptor(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
+read_tss_descriptor(struct vcpu *vcpu, struct vm_task_switch *ts,
uint16_t sel, struct user_segment_descriptor *desc, int *faultptr)
{
struct vm_guest_paging sup_paging;
@@ -263,17 +263,17 @@ read_tss_descriptor(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
assert(IDXSEL(sel) != 0);
/* Fetch the new TSS descriptor */
- if (desc_table_limit_check(ctx, vcpu, sel)) {
+ if (desc_table_limit_check(vcpu, sel)) {
if (ts->reason == TSR_IRET)
- sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
+ sel_exception(vcpu, IDT_TS, sel, ts->ext);
else
- sel_exception(ctx, vcpu, IDT_GP, sel, ts->ext);
+ sel_exception(vcpu, IDT_GP, sel, ts->ext);
return (1);
}
sup_paging = ts->paging;
sup_paging.cpl = 0; /* implicit supervisor mode */
- error = desc_table_read(ctx, vcpu, &sup_paging, sel, desc, faultptr);
+ error = desc_table_read(vcpu, &sup_paging, sel, desc, faultptr);
return (error);
}
@@ -309,7 +309,7 @@ ldt_desc(int sd_type)
* Validate the descriptor 'seg_desc' associated with 'segment'.
*/
static int
-validate_seg_desc(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
+validate_seg_desc(struct vcpu *vcpu, struct vm_task_switch *ts,
int segment, struct seg_desc *seg_desc, int *faultptr)
{
struct vm_guest_paging sup_paging;
@@ -341,17 +341,17 @@ validate_seg_desc(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
}
/* Get the segment selector */
- sel = GETREG(ctx, vcpu, segment);
+ sel = GETREG(vcpu, segment);
/* LDT selector must point into the GDT */
if (ldtseg && ISLDT(sel)) {
- sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
+ sel_exception(vcpu, IDT_TS, sel, ts->ext);
return (1);
}
/* Descriptor table limit check */
- if (desc_table_limit_check(ctx, vcpu, sel)) {
- sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
+ if (desc_table_limit_check(vcpu, sel)) {
+ sel_exception(vcpu, IDT_TS, sel, ts->ext);
return (1);
}
@@ -359,7 +359,7 @@ validate_seg_desc(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
if (IDXSEL(sel) == 0) {
/* Code and stack segment selectors cannot be NULL */
if (codeseg || stackseg) {
- sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
+ sel_exception(vcpu, IDT_TS, sel, ts->ext);
return (1);
}
seg_desc->base = 0;
@@ -371,7 +371,7 @@ validate_seg_desc(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
/* Read the descriptor from the GDT/LDT */
sup_paging = ts->paging;
sup_paging.cpl = 0; /* implicit supervisor mode */
- error = desc_table_read(ctx, vcpu, &sup_paging, sel, &usd, faultptr);
+ error = desc_table_read(vcpu, &sup_paging, sel, &usd, faultptr);
if (error || *faultptr)
return (error);
@@ -380,7 +380,7 @@ validate_seg_desc(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
(codeseg && !code_desc(usd.sd_type)) ||
(dataseg && !data_desc(usd.sd_type)) ||
(stackseg && !stack_desc(usd.sd_type))) {
- sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
+ sel_exception(vcpu, IDT_TS, sel, ts->ext);
return (1);
}
@@ -392,17 +392,17 @@ validate_seg_desc(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
idtvec = IDT_SS;
else
idtvec = IDT_NP;
- sel_exception(ctx, vcpu, idtvec, sel, ts->ext);
+ sel_exception(vcpu, idtvec, sel, ts->ext);
return (1);
}
- cs = GETREG(ctx, vcpu, VM_REG_GUEST_CS);
+ cs = GETREG(vcpu, VM_REG_GUEST_CS);
cpl = cs & SEL_RPL_MASK;
rpl = sel & SEL_RPL_MASK;
dpl = usd.sd_dpl;
if (stackseg && (rpl != cpl || dpl != cpl)) {
- sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
+ sel_exception(vcpu, IDT_TS, sel, ts->ext);
return (1);
}
@@ -410,7 +410,7 @@ validate_seg_desc(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
conforming = (usd.sd_type & 0x4) ? true : false;
if ((conforming && (cpl < dpl)) ||
(!conforming && (cpl != dpl))) {
- sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
+ sel_exception(vcpu, IDT_TS, sel, ts->ext);
return (1);
}
}
@@ -426,7 +426,7 @@ validate_seg_desc(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
conforming = false;
if (!conforming && (rpl > dpl || cpl > dpl)) {
- sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
+ sel_exception(vcpu, IDT_TS, sel, ts->ext);
return (1);
}
}
@@ -435,30 +435,30 @@ validate_seg_desc(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
}
static void
-tss32_save(struct vmctx *ctx, int vcpu, struct vm_task_switch *task_switch,
+tss32_save(struct vcpu *vcpu, struct vm_task_switch *task_switch,
uint32_t eip, struct tss32 *tss, struct iovec *iov)
{
/* General purpose registers */
- tss->tss_eax = GETREG(ctx, vcpu, VM_REG_GUEST_RAX);
- tss->tss_ecx = GETREG(ctx, vcpu, VM_REG_GUEST_RCX);
- tss->tss_edx = GETREG(ctx, vcpu, VM_REG_GUEST_RDX);
- tss->tss_ebx = GETREG(ctx, vcpu, VM_REG_GUEST_RBX);
- tss->tss_esp = GETREG(ctx, vcpu, VM_REG_GUEST_RSP);
- tss->tss_ebp = GETREG(ctx, vcpu, VM_REG_GUEST_RBP);
- tss->tss_esi = GETREG(ctx, vcpu, VM_REG_GUEST_RSI);
- tss->tss_edi = GETREG(ctx, vcpu, VM_REG_GUEST_RDI);
+ tss->tss_eax = GETREG(vcpu, VM_REG_GUEST_RAX);
+ tss->tss_ecx = GETREG(vcpu, VM_REG_GUEST_RCX);
+ tss->tss_edx = GETREG(vcpu, VM_REG_GUEST_RDX);
+ tss->tss_ebx = GETREG(vcpu, VM_REG_GUEST_RBX);
+ tss->tss_esp = GETREG(vcpu, VM_REG_GUEST_RSP);
+ tss->tss_ebp = GETREG(vcpu, VM_REG_GUEST_RBP);
+ tss->tss_esi = GETREG(vcpu, VM_REG_GUEST_RSI);
+ tss->tss_edi = GETREG(vcpu, VM_REG_GUEST_RDI);
/* Segment selectors */
- tss->tss_es = GETREG(ctx, vcpu, VM_REG_GUEST_ES);
- tss->tss_cs = GETREG(ctx, vcpu, VM_REG_GUEST_CS);
- tss->tss_ss = GETREG(ctx, vcpu, VM_REG_GUEST_SS);
- tss->tss_ds = GETREG(ctx, vcpu, VM_REG_GUEST_DS);
- tss->tss_fs = GETREG(ctx, vcpu, VM_REG_GUEST_FS);
- tss->tss_gs = GETREG(ctx, vcpu, VM_REG_GUEST_GS);
+ tss->tss_es = GETREG(vcpu, VM_REG_GUEST_ES);
+ tss->tss_cs = GETREG(vcpu, VM_REG_GUEST_CS);
+ tss->tss_ss = GETREG(vcpu, VM_REG_GUEST_SS);
+ tss->tss_ds = GETREG(vcpu, VM_REG_GUEST_DS);
+ tss->tss_fs = GETREG(vcpu, VM_REG_GUEST_FS);
+ tss->tss_gs = GETREG(vcpu, VM_REG_GUEST_GS);
/* eflags and eip */
- tss->tss_eflags = GETREG(ctx, vcpu, VM_REG_GUEST_RFLAGS);
+ tss->tss_eflags = GETREG(vcpu, VM_REG_GUEST_RFLAGS);
if (task_switch->reason == TSR_IRET)
tss->tss_eflags &= ~PSL_NT;
tss->tss_eip = eip;
@@ -468,11 +468,11 @@ tss32_save(struct vmctx *ctx, int vcpu, struct vm_task_switch *task_switch,
}
static void
-update_seg_desc(struct vmctx *ctx, int vcpu, int reg, struct seg_desc *sd)
+update_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *sd)
{
int error;
- error = vm_set_desc(ctx, vcpu, reg, sd->base, sd->limit, sd->access);
+ error = vm_set_desc(vcpu, reg, sd->base, sd->limit, sd->access);
assert(error == 0);
}
@@ -480,7 +480,7 @@ update_seg_desc(struct vmctx *ctx, int vcpu, int reg, struct seg_desc *sd)
* Update the vcpu registers to reflect the state of the new task.
*/
static int
-tss32_restore(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
+tss32_restore(struct vmctx *ctx, struct vcpu *vcpu, struct vm_task_switch *ts,
uint16_t ot_sel, struct tss32 *tss, struct iovec *iov, int *faultptr)
{
struct seg_desc seg_desc, seg_desc2;
@@ -500,7 +500,7 @@ tss32_restore(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
eflags |= PSL_NT;
/* LDTR */
- SETREG(ctx, vcpu, VM_REG_GUEST_LDTR, tss->tss_ldt);
+ SETREG(vcpu, VM_REG_GUEST_LDTR, tss->tss_ldt);
/* PBDR */
if (ts->paging.paging_mode != PAGING_MODE_FLAT) {
@@ -520,40 +520,40 @@ tss32_restore(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
*/
reserved = ~maxphyaddr | 0x1E6;
if (pdpte[i] & reserved) {
- vm_inject_gp(ctx, vcpu);
+ vm_inject_gp(vcpu);
return (1);
}
}
- SETREG(ctx, vcpu, VM_REG_GUEST_PDPTE0, pdpte[0]);
- SETREG(ctx, vcpu, VM_REG_GUEST_PDPTE1, pdpte[1]);
- SETREG(ctx, vcpu, VM_REG_GUEST_PDPTE2, pdpte[2]);
- SETREG(ctx, vcpu, VM_REG_GUEST_PDPTE3, pdpte[3]);
+ SETREG(vcpu, VM_REG_GUEST_PDPTE0, pdpte[0]);
+ SETREG(vcpu, VM_REG_GUEST_PDPTE1, pdpte[1]);
+ SETREG(vcpu, VM_REG_GUEST_PDPTE2, pdpte[2]);
+ SETREG(vcpu, VM_REG_GUEST_PDPTE3, pdpte[3]);
}
- SETREG(ctx, vcpu, VM_REG_GUEST_CR3, tss->tss_cr3);
+ SETREG(vcpu, VM_REG_GUEST_CR3, tss->tss_cr3);
ts->paging.cr3 = tss->tss_cr3;
}
/* eflags and eip */
- SETREG(ctx, vcpu, VM_REG_GUEST_RFLAGS, eflags);
- SETREG(ctx, vcpu, VM_REG_GUEST_RIP, tss->tss_eip);
+ SETREG(vcpu, VM_REG_GUEST_RFLAGS, eflags);
+ SETREG(vcpu, VM_REG_GUEST_RIP, tss->tss_eip);
/* General purpose registers */
- SETREG(ctx, vcpu, VM_REG_GUEST_RAX, tss->tss_eax);
- SETREG(ctx, vcpu, VM_REG_GUEST_RCX, tss->tss_ecx);
- SETREG(ctx, vcpu, VM_REG_GUEST_RDX, tss->tss_edx);
- SETREG(ctx, vcpu, VM_REG_GUEST_RBX, tss->tss_ebx);
- SETREG(ctx, vcpu, VM_REG_GUEST_RSP, tss->tss_esp);
- SETREG(ctx, vcpu, VM_REG_GUEST_RBP, tss->tss_ebp);
- SETREG(ctx, vcpu, VM_REG_GUEST_RSI, tss->tss_esi);
- SETREG(ctx, vcpu, VM_REG_GUEST_RDI, tss->tss_edi);
+ SETREG(vcpu, VM_REG_GUEST_RAX, tss->tss_eax);
+ SETREG(vcpu, VM_REG_GUEST_RCX, tss->tss_ecx);
+ SETREG(vcpu, VM_REG_GUEST_RDX, tss->tss_edx);
+ SETREG(vcpu, VM_REG_GUEST_RBX, tss->tss_ebx);
+ SETREG(vcpu, VM_REG_GUEST_RSP, tss->tss_esp);
+ SETREG(vcpu, VM_REG_GUEST_RBP, tss->tss_ebp);
+ SETREG(vcpu, VM_REG_GUEST_RSI, tss->tss_esi);
+ SETREG(vcpu, VM_REG_GUEST_RDI, tss->tss_edi);
/* Segment selectors */
- SETREG(ctx, vcpu, VM_REG_GUEST_ES, tss->tss_es);
- SETREG(ctx, vcpu, VM_REG_GUEST_CS, tss->tss_cs);
- SETREG(ctx, vcpu, VM_REG_GUEST_SS, tss->tss_ss);
- SETREG(ctx, vcpu, VM_REG_GUEST_DS, tss->tss_ds);
- SETREG(ctx, vcpu, VM_REG_GUEST_FS, tss->tss_fs);
- SETREG(ctx, vcpu, VM_REG_GUEST_GS, tss->tss_gs);
+ SETREG(vcpu, VM_REG_GUEST_ES, tss->tss_es);
+ SETREG(vcpu, VM_REG_GUEST_CS, tss->tss_cs);
+ SETREG(vcpu, VM_REG_GUEST_SS, tss->tss_ss);
+ SETREG(vcpu, VM_REG_GUEST_DS, tss->tss_ds);
+ SETREG(vcpu, VM_REG_GUEST_FS, tss->tss_fs);
+ SETREG(vcpu, VM_REG_GUEST_GS, tss->tss_gs);
/*
* If this is a nested task then write out the new TSS to update
@@ -563,11 +563,11 @@ tss32_restore(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
vm_copyout(tss, iov, sizeof(*tss));
/* Validate segment descriptors */
- error = validate_seg_desc(ctx, vcpu, ts, VM_REG_GUEST_LDTR, &seg_desc,
+ error = validate_seg_desc(vcpu, ts, VM_REG_GUEST_LDTR, &seg_desc,
faultptr);
if (error || *faultptr)
return (error);
- update_seg_desc(ctx, vcpu, VM_REG_GUEST_LDTR, &seg_desc);
+ update_seg_desc(vcpu, VM_REG_GUEST_LDTR, &seg_desc);
/*
* Section "Checks on Guest Segment Registers", Intel SDM, Vol 3.
@@ -578,42 +578,42 @@ tss32_restore(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
* VM-entry checks so the guest can handle any exception injected
* during task switch emulation.
*/
- error = validate_seg_desc(ctx, vcpu, ts, VM_REG_GUEST_CS, &seg_desc,
+ error = validate_seg_desc(vcpu, ts, VM_REG_GUEST_CS, &seg_desc,
faultptr);
if (error || *faultptr)
return (error);
- error = validate_seg_desc(ctx, vcpu, ts, VM_REG_GUEST_SS, &seg_desc2,
+ error = validate_seg_desc(vcpu, ts, VM_REG_GUEST_SS, &seg_desc2,
faultptr);
if (error || *faultptr)
return (error);
- update_seg_desc(ctx, vcpu, VM_REG_GUEST_CS, &seg_desc);
- update_seg_desc(ctx, vcpu, VM_REG_GUEST_SS, &seg_desc2);
+ update_seg_desc(vcpu, VM_REG_GUEST_CS, &seg_desc);
+ update_seg_desc(vcpu, VM_REG_GUEST_SS, &seg_desc2);
ts->paging.cpl = tss->tss_cs & SEL_RPL_MASK;
- error = validate_seg_desc(ctx, vcpu, ts, VM_REG_GUEST_DS, &seg_desc,
+ error = validate_seg_desc(vcpu, ts, VM_REG_GUEST_DS, &seg_desc,
faultptr);
if (error || *faultptr)
return (error);
- update_seg_desc(ctx, vcpu, VM_REG_GUEST_DS, &seg_desc);
+ update_seg_desc(vcpu, VM_REG_GUEST_DS, &seg_desc);
- error = validate_seg_desc(ctx, vcpu, ts, VM_REG_GUEST_ES, &seg_desc,
+ error = validate_seg_desc(vcpu, ts, VM_REG_GUEST_ES, &seg_desc,
faultptr);
if (error || *faultptr)
return (error);
- update_seg_desc(ctx, vcpu, VM_REG_GUEST_ES, &seg_desc);
+ update_seg_desc(vcpu, VM_REG_GUEST_ES, &seg_desc);
- error = validate_seg_desc(ctx, vcpu, ts, VM_REG_GUEST_FS, &seg_desc,
+ error = validate_seg_desc(vcpu, ts, VM_REG_GUEST_FS, &seg_desc,
faultptr);
if (error || *faultptr)
return (error);
- update_seg_desc(ctx, vcpu, VM_REG_GUEST_FS, &seg_desc);
+ update_seg_desc(vcpu, VM_REG_GUEST_FS, &seg_desc);
- error = validate_seg_desc(ctx, vcpu, ts, VM_REG_GUEST_GS, &seg_desc,
+ error = validate_seg_desc(vcpu, ts, VM_REG_GUEST_GS, &seg_desc,
faultptr);
if (error || *faultptr)
return (error);
- update_seg_desc(ctx, vcpu, VM_REG_GUEST_GS, &seg_desc);
+ update_seg_desc(vcpu, VM_REG_GUEST_GS, &seg_desc);
return (0);
}
@@ -624,7 +624,7 @@ tss32_restore(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
* code to be saved (e.g. #PF).
*/
static int
-push_errcode(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
+push_errcode(struct vcpu *vcpu, struct vm_guest_paging *paging,
int task_type, uint32_t errcode, int *faultptr)
{
struct iovec iov[2];
@@ -636,11 +636,11 @@ push_errcode(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
*faultptr = 0;
- cr0 = GETREG(ctx, vcpu, VM_REG_GUEST_CR0);
- rflags = GETREG(ctx, vcpu, VM_REG_GUEST_RFLAGS);
- stacksel = GETREG(ctx, vcpu, VM_REG_GUEST_SS);
+ cr0 = GETREG(vcpu, VM_REG_GUEST_CR0);
+ rflags = GETREG(vcpu, VM_REG_GUEST_RFLAGS);
+ stacksel = GETREG(vcpu, VM_REG_GUEST_SS);
- error = vm_get_desc(ctx, vcpu, VM_REG_GUEST_SS, &seg_desc.base,
+ error = vm_get_desc(vcpu, VM_REG_GUEST_SS, &seg_desc.base,
&seg_desc.limit, &seg_desc.access);
assert(error == 0);
@@ -664,29 +664,29 @@ push_errcode(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
else
stacksize = 2;
- esp = GETREG(ctx, vcpu, VM_REG_GUEST_RSP);
+ esp = GETREG(vcpu, VM_REG_GUEST_RSP);
esp -= bytes;
if (vie_calculate_gla(paging->cpu_mode, VM_REG_GUEST_SS,
&seg_desc, esp, bytes, stacksize, PROT_WRITE, &gla)) {
- sel_exception(ctx, vcpu, IDT_SS, stacksel, 1);
+ sel_exception(vcpu, IDT_SS, stacksel, 1);
*faultptr = 1;
return (0);
}
if (vie_alignment_check(paging->cpl, bytes, cr0, rflags, gla)) {
- vm_inject_ac(ctx, vcpu, 1);
+ vm_inject_ac(vcpu, 1);
*faultptr = 1;
return (0);
}
- error = vm_copy_setup(ctx, vcpu, paging, gla, bytes, PROT_WRITE,
+ error = vm_copy_setup(vcpu, paging, gla, bytes, PROT_WRITE,
iov, nitems(iov), faultptr);
if (error || *faultptr)
return (error);
vm_copyout(&errcode, iov, bytes);
- SETREG(ctx, vcpu, VM_REG_GUEST_RSP, esp);
+ SETREG(vcpu, VM_REG_GUEST_RSP, esp);
return (0);
}
@@ -704,7 +704,7 @@ push_errcode(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
} while (0)
int
-vmexit_task_switch(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
+vmexit_task_switch(struct vmctx *ctx, struct vcpu *vcpu, struct vm_exit *vmexit)
{
struct seg_desc nt;
struct tss32 oldtss, newtss;
@@ -714,7 +714,7 @@ vmexit_task_switch(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
struct iovec nt_iov[2], ot_iov[2];
uint64_t cr0, ot_base;
uint32_t eip, ot_lim, access;
- int error, ext, fault, minlimit, nt_type, ot_type, vcpu;
+ int error, ext, fault, minlimit, nt_type, ot_type;
enum task_switch_reason reason;
uint16_t nt_sel, ot_sel;
@@ -723,7 +723,6 @@ vmexit_task_switch(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
ext = vmexit->u.task_switch.ext;
reason = vmexit->u.task_switch.reason;
paging = &vmexit->u.task_switch.paging;
- vcpu = *pvcpu;
assert(paging->cpu_mode == CPU_MODE_PROTECTED);
@@ -742,7 +741,7 @@ vmexit_task_switch(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
sup_paging.cpl = 0; /* implicit supervisor mode */
/* Fetch the new TSS descriptor */
- error = read_tss_descriptor(ctx, vcpu, task_switch, nt_sel, &nt_desc,
+ error = read_tss_descriptor(vcpu, task_switch, nt_sel, &nt_desc,
&fault);
CHKERR(error, fault);
@@ -752,13 +751,13 @@ vmexit_task_switch(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
nt_type = SEG_DESC_TYPE(nt.access);
if (nt_type != SDT_SYS386BSY && nt_type != SDT_SYS386TSS &&
nt_type != SDT_SYS286BSY && nt_type != SDT_SYS286TSS) {
- sel_exception(ctx, vcpu, IDT_TS, nt_sel, ext);
+ sel_exception(vcpu, IDT_TS, nt_sel, ext);
goto done;
}
/* TSS descriptor must have present bit set */
if (!SEG_DESC_PRESENT(nt.access)) {
- sel_exception(ctx, vcpu, IDT_NP, nt_sel, ext);
+ sel_exception(vcpu, IDT_NP, nt_sel, ext);
goto done;
}
@@ -775,13 +774,13 @@ vmexit_task_switch(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
assert(minlimit > 0);
if (nt.limit < (unsigned int)minlimit) {
- sel_exception(ctx, vcpu, IDT_TS, nt_sel, ext);
+ sel_exception(vcpu, IDT_TS, nt_sel, ext);
goto done;
}
/* TSS must be busy if task switch is due to IRET */
if (reason == TSR_IRET && !TSS_BUSY(nt_type)) {
- sel_exception(ctx, vcpu, IDT_TS, nt_sel, ext);
+ sel_exception(vcpu, IDT_TS, nt_sel, ext);
goto done;
}
@@ -790,18 +789,18 @@ vmexit_task_switch(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
* CALL, JMP, exception or interrupt.
*/
if (reason != TSR_IRET && TSS_BUSY(nt_type)) {
- sel_exception(ctx, vcpu, IDT_GP, nt_sel, ext);
+ sel_exception(vcpu, IDT_GP, nt_sel, ext);
goto done;
}
/* Fetch the new TSS */
- error = vm_copy_setup(ctx, vcpu, &sup_paging, nt.base, minlimit + 1,
+ error = vm_copy_setup(vcpu, &sup_paging, nt.base, minlimit + 1,
PROT_READ | PROT_WRITE, nt_iov, nitems(nt_iov), &fault);
CHKERR(error, fault);
vm_copyin(nt_iov, &newtss, minlimit + 1);
/* Get the old TSS selector from the guest's task register */
- ot_sel = GETREG(ctx, vcpu, VM_REG_GUEST_TR);
+ ot_sel = GETREG(vcpu, VM_REG_GUEST_TR);
if (ISLDT(ot_sel) || IDXSEL(ot_sel) == 0) {
/*
* This might happen if a task switch was attempted without
@@ -809,12 +808,12 @@ vmexit_task_switch(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
* TR would contain the values from power-on:
* (sel = 0, base = 0, limit = 0xffff).
*/
- sel_exception(ctx, vcpu, IDT_TS, ot_sel, task_switch->ext);
+ sel_exception(vcpu, IDT_TS, ot_sel, task_switch->ext);
goto done;
}
/* Get the old TSS base and limit from the guest's task register */
- error = vm_get_desc(ctx, vcpu, VM_REG_GUEST_TR, &ot_base, &ot_lim,
+ error = vm_get_desc(vcpu, VM_REG_GUEST_TR, &ot_base, &ot_lim,
&access);
assert(error == 0);
assert(!SEG_DESC_UNUSABLE(access) && SEG_DESC_PRESENT(access));
@@ -822,12 +821,12 @@ vmexit_task_switch(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
assert(ot_type == SDT_SYS386BSY || ot_type == SDT_SYS286BSY);
/* Fetch the old TSS descriptor */
- error = read_tss_descriptor(ctx, vcpu, task_switch, ot_sel, &ot_desc,
+ error = read_tss_descriptor(vcpu, task_switch, ot_sel, &ot_desc,
&fault);
CHKERR(error, fault);
/* Get the old TSS */
- error = vm_copy_setup(ctx, vcpu, &sup_paging, ot_base, minlimit + 1,
+ error = vm_copy_setup(vcpu, &sup_paging, ot_base, minlimit + 1,
PROT_READ | PROT_WRITE, ot_iov, nitems(ot_iov), &fault);
CHKERR(error, fault);
vm_copyin(ot_iov, &oldtss, minlimit + 1);
@@ -838,7 +837,7 @@ vmexit_task_switch(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
*/
if (reason == TSR_IRET || reason == TSR_JMP) {
ot_desc.sd_type &= ~0x2;
- error = desc_table_write(ctx, vcpu, &sup_paging, ot_sel,
+ error = desc_table_write(vcpu, &sup_paging, ot_sel,
&ot_desc, &fault);
CHKERR(error, fault);
}
@@ -849,7 +848,7 @@ vmexit_task_switch(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
}
/* Save processor state in old TSS */
- tss32_save(ctx, vcpu, task_switch, eip, &oldtss, ot_iov);
+ tss32_save(vcpu, task_switch, eip, &oldtss, ot_iov);
/*
* If the task switch was triggered for any reason other than IRET
@@ -857,28 +856,28 @@ vmexit_task_switch(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
*/
if (reason != TSR_IRET) {
nt_desc.sd_type |= 0x2;
- error = desc_table_write(ctx, vcpu, &sup_paging, nt_sel,
+ error = desc_table_write(vcpu, &sup_paging, nt_sel,
&nt_desc, &fault);
CHKERR(error, fault);
}
/* Update task register to point at the new TSS */
- SETREG(ctx, vcpu, VM_REG_GUEST_TR, nt_sel);
+ SETREG(vcpu, VM_REG_GUEST_TR, nt_sel);
/* Update the hidden descriptor state of the task register */
nt = usd_to_seg_desc(&nt_desc);
- update_seg_desc(ctx, vcpu, VM_REG_GUEST_TR, &nt);
+ update_seg_desc(vcpu, VM_REG_GUEST_TR, &nt);
/* Set CR0.TS */
- cr0 = GETREG(ctx, vcpu, VM_REG_GUEST_CR0);
- SETREG(ctx, vcpu, VM_REG_GUEST_CR0, cr0 | CR0_TS);
+ cr0 = GETREG(vcpu, VM_REG_GUEST_CR0);
+ SETREG(vcpu, VM_REG_GUEST_CR0, cr0 | CR0_TS);
/*
* We are now committed to the task switch. Any exceptions encountered
* after this point will be handled in the context of the new task and
* the saved instruction pointer will belong to the new task.
*/
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RIP, newtss.tss_eip);
+ error = vm_set_register(vcpu, VM_REG_GUEST_RIP, newtss.tss_eip);
assert(error == 0);
/* Load processor state from new TSS */
@@ -894,7 +893,7 @@ vmexit_task_switch(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
if (task_switch->errcode_valid) {
assert(task_switch->ext);
assert(task_switch->reason == TSR_IDT_GATE);
- error = push_errcode(ctx, vcpu, &task_switch->paging, nt_type,
+ error = push_errcode(vcpu, &task_switch->paging, nt_type,
task_switch->errcode, &fault);
CHKERR(error, fault);
}
@@ -930,7 +929,7 @@ vmexit_task_switch(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
* exitintinfo.
*/
if (task_switch->reason == TSR_IDT_GATE) {
- error = vm_set_intinfo(ctx, vcpu, 0);
+ error = vm_set_intinfo(vcpu, 0);
assert(error == 0);
}
diff --git a/usr.sbin/bhyve/vga.c b/usr.sbin/bhyve/vga.c
index f139dc38937b..a63943efe486 100644
--- a/usr.sbin/bhyve/vga.c
+++ b/usr.sbin/bhyve/vga.c
@@ -339,7 +339,7 @@ vga_render(struct bhyvegc *gc, void *arg)
}
static uint64_t
-vga_mem_rd_handler(struct vmctx *ctx __unused, uint64_t addr, void *arg1)
+vga_mem_rd_handler(uint64_t addr, void *arg1)
{
struct vga_softc *sc = arg1;
uint8_t map_sel;
@@ -399,8 +399,7 @@ vga_mem_rd_handler(struct vmctx *ctx __unused, uint64_t addr, void *arg1)
}
static void
-vga_mem_wr_handler(struct vmctx *ctx __unused, uint64_t addr, uint8_t val,
- void *arg1)
+vga_mem_wr_handler(uint64_t addr, uint8_t val, void *arg1)
{
struct vga_softc *sc = arg1;
uint8_t c0, c1, c2, c3;
@@ -654,59 +653,59 @@ vga_mem_wr_handler(struct vmctx *ctx __unused, uint64_t addr, uint8_t val,
}
static int
-vga_mem_handler(struct vmctx *ctx, int vcpu __unused, int dir, uint64_t addr,
- int size, uint64_t *val, void *arg1, long arg2 __unused)
+vga_mem_handler(struct vcpu *vcpu __unused, int dir, uint64_t addr, int size,
+ uint64_t *val, void *arg1, long arg2 __unused)
{
if (dir == MEM_F_WRITE) {
switch (size) {
case 1:
- vga_mem_wr_handler(ctx, addr, *val, arg1);
+ vga_mem_wr_handler(addr, *val, arg1);
break;
case 2:
- vga_mem_wr_handler(ctx, addr, *val, arg1);
- vga_mem_wr_handler(ctx, addr + 1, *val >> 8, arg1);
+ vga_mem_wr_handler(addr, *val, arg1);
+ vga_mem_wr_handler(addr + 1, *val >> 8, arg1);
break;
case 4:
- vga_mem_wr_handler(ctx, addr, *val, arg1);
- vga_mem_wr_handler(ctx, addr + 1, *val >> 8, arg1);
- vga_mem_wr_handler(ctx, addr + 2, *val >> 16, arg1);
- vga_mem_wr_handler(ctx, addr + 3, *val >> 24, arg1);
+ vga_mem_wr_handler(addr, *val, arg1);
+ vga_mem_wr_handler(addr + 1, *val >> 8, arg1);
+ vga_mem_wr_handler(addr + 2, *val >> 16, arg1);
+ vga_mem_wr_handler(addr + 3, *val >> 24, arg1);
break;
case 8:
- vga_mem_wr_handler(ctx, addr, *val, arg1);
- vga_mem_wr_handler(ctx, addr + 1, *val >> 8, arg1);
- vga_mem_wr_handler(ctx, addr + 2, *val >> 16, arg1);
- vga_mem_wr_handler(ctx, addr + 3, *val >> 24, arg1);
- vga_mem_wr_handler(ctx, addr + 4, *val >> 32, arg1);
- vga_mem_wr_handler(ctx, addr + 5, *val >> 40, arg1);
- vga_mem_wr_handler(ctx, addr + 6, *val >> 48, arg1);
- vga_mem_wr_handler(ctx, addr + 7, *val >> 56, arg1);
+ vga_mem_wr_handler(addr, *val, arg1);
+ vga_mem_wr_handler(addr + 1, *val >> 8, arg1);
+ vga_mem_wr_handler(addr + 2, *val >> 16, arg1);
+ vga_mem_wr_handler(addr + 3, *val >> 24, arg1);
+ vga_mem_wr_handler(addr + 4, *val >> 32, arg1);
+ vga_mem_wr_handler(addr + 5, *val >> 40, arg1);
+ vga_mem_wr_handler(addr + 6, *val >> 48, arg1);
+ vga_mem_wr_handler(addr + 7, *val >> 56, arg1);
break;
}
} else {
switch (size) {
case 1:
- *val = vga_mem_rd_handler(ctx, addr, arg1);
+ *val = vga_mem_rd_handler(addr, arg1);
break;
case 2:
- *val = vga_mem_rd_handler(ctx, addr, arg1);
- *val |= vga_mem_rd_handler(ctx, addr + 1, arg1) << 8;
+ *val = vga_mem_rd_handler(addr, arg1);
+ *val |= vga_mem_rd_handler(addr + 1, arg1) << 8;
break;
case 4:
- *val = vga_mem_rd_handler(ctx, addr, arg1);
- *val |= vga_mem_rd_handler(ctx, addr + 1, arg1) << 8;
- *val |= vga_mem_rd_handler(ctx, addr + 2, arg1) << 16;
- *val |= vga_mem_rd_handler(ctx, addr + 3, arg1) << 24;
+ *val = vga_mem_rd_handler(addr, arg1);
+ *val |= vga_mem_rd_handler(addr + 1, arg1) << 8;
+ *val |= vga_mem_rd_handler(addr + 2, arg1) << 16;
+ *val |= vga_mem_rd_handler(addr + 3, arg1) << 24;
break;
case 8:
- *val = vga_mem_rd_handler(ctx, addr, arg1);
- *val |= vga_mem_rd_handler(ctx, addr + 1, arg1) << 8;
- *val |= vga_mem_rd_handler(ctx, addr + 2, arg1) << 16;
- *val |= vga_mem_rd_handler(ctx, addr + 3, arg1) << 24;
- *val |= vga_mem_rd_handler(ctx, addr + 4, arg1) << 32;
- *val |= vga_mem_rd_handler(ctx, addr + 5, arg1) << 40;
- *val |= vga_mem_rd_handler(ctx, addr + 6, arg1) << 48;
- *val |= vga_mem_rd_handler(ctx, addr + 7, arg1) << 56;
+ *val = vga_mem_rd_handler(addr, arg1);
+ *val |= vga_mem_rd_handler(addr + 1, arg1) << 8;
+ *val |= vga_mem_rd_handler(addr + 2, arg1) << 16;
+ *val |= vga_mem_rd_handler(addr + 3, arg1) << 24;
+ *val |= vga_mem_rd_handler(addr + 4, arg1) << 32;
+ *val |= vga_mem_rd_handler(addr + 5, arg1) << 40;
+ *val |= vga_mem_rd_handler(addr + 6, arg1) << 48;
+ *val |= vga_mem_rd_handler(addr + 7, arg1) << 56;
break;
}
}
diff --git a/usr.sbin/bhyve/xmsr.c b/usr.sbin/bhyve/xmsr.c
index 5f735152e92c..8672752a0cde 100644
--- a/usr.sbin/bhyve/xmsr.c
+++ b/usr.sbin/bhyve/xmsr.c
@@ -49,8 +49,7 @@ __FBSDID("$FreeBSD$");
static int cpu_vendor_intel, cpu_vendor_amd, cpu_vendor_hygon;
int
-emulate_wrmsr(struct vmctx *ctx __unused, int vcpu __unused, uint32_t num,
- uint64_t val __unused)
+emulate_wrmsr(struct vcpu *vcpu __unused, uint32_t num, uint64_t val __unused)
{
if (cpu_vendor_intel) {
@@ -104,8 +103,7 @@ emulate_wrmsr(struct vmctx *ctx __unused, int vcpu __unused, uint32_t num,
}
int
-emulate_rdmsr(struct vmctx *ctx __unused, int vcpu __unused, uint32_t num,
- uint64_t *val)
+emulate_rdmsr(struct vcpu *vcpu __unused, uint32_t num, uint64_t *val)
{
int error = 0;
diff --git a/usr.sbin/bhyve/xmsr.h b/usr.sbin/bhyve/xmsr.h
index 1fb47c3ae2f0..31349c4621d7 100644
--- a/usr.sbin/bhyve/xmsr.h
+++ b/usr.sbin/bhyve/xmsr.h
@@ -32,7 +32,7 @@
#define _XMSR_H_
int init_msr(void);
-int emulate_wrmsr(struct vmctx *ctx, int vcpu, uint32_t code, uint64_t val);
-int emulate_rdmsr(struct vmctx *ctx, int vcpu, uint32_t code, uint64_t *val);
+int emulate_wrmsr(struct vcpu *vcpu, uint32_t code, uint64_t val);
+int emulate_rdmsr(struct vcpu *vcpu, uint32_t code, uint64_t *val);
#endif