aboutsummaryrefslogtreecommitdiff
path: root/sys/x86
diff options
context:
space:
mode:
authorScott Long <scottl@FreeBSD.org>2019-11-16 00:26:42 +0000
committerScott Long <scottl@FreeBSD.org>2019-11-16 00:26:42 +0000
commite3721601771396c5e731920d2a716b8a7cbbe176 (patch)
tree4c3be410b6e4f63b4807d65d64b717abaff0be35 /sys/x86
parent808c432f62665c5cdb51ded306ab79949d6f8678 (diff)
downloadsrc-e3721601771396c5e731920d2a716b8a7cbbe176.tar.gz
src-e3721601771396c5e731920d2a716b8a7cbbe176.zip
TSX Asynchronous Abort mitigation for Intel CVE-2019-11135.
This CVE has already been announced in FreeBSD SA-19:26.mcu. Mitigation for TAA involves either turning off TSX or turning on the VERW mitigation used for MDS. Some CPUs will also be self-mitigating for TAA and require no software workaround. Control knobs are: machdep.mitigations.taa.enable: 0 - no software mitigation is enabled 1 - attempt to disable TSX 2 - use the VERW mitigation 3 - automatically select the mitigation based on processor features. machdep.mitigations.taa.state: inactive - no mitigation is active/enabled TSX disable - TSX is disabled in the bare metal CPU as well as - any virtualized CPUs VERW - VERW instruction clears CPU buffers not vulnerable - The CPU has identified itself as not being vulnerable Nothing in the base FreeBSD system uses TSX. However, the instructions are straight-forward to add to custom applications and require no kernel support, so the mitigation is provided for users with untrusted applications and tenants. Reviewed by: emaste, imp, kib, scottph Sponsored by: Intel Differential Revision: 22374
Notes
Notes: svn path=/head/; revision=354759
Diffstat (limited to 'sys/x86')
-rw-r--r--sys/x86/include/x86_var.h2
-rw-r--r--sys/x86/x86/cpu_machdep.c196
2 files changed, 198 insertions, 0 deletions
diff --git a/sys/x86/include/x86_var.h b/sys/x86/include/x86_var.h
index 678366dead26..82498bfa3fea 100644
--- a/sys/x86/include/x86_var.h
+++ b/sys/x86/include/x86_var.h
@@ -93,6 +93,7 @@ extern int pti;
extern int hw_ibrs_active;
extern int hw_mds_disable;
extern int hw_ssb_active;
+extern int x86_taa_enable;
struct pcb;
struct thread;
@@ -136,6 +137,7 @@ void handle_ibrs_exit(void);
void hw_ibrs_recalculate(void);
void hw_mds_recalculate(void);
void hw_ssb_recalculate(bool all_cpus);
+void x86_taa_recalculate(void);
void nmi_call_kdb(u_int cpu, u_int type, struct trapframe *frame);
void nmi_call_kdb_smp(u_int type, struct trapframe *frame);
void nmi_handle_intr(u_int type, struct trapframe *frame);
diff --git a/sys/x86/x86/cpu_machdep.c b/sys/x86/x86/cpu_machdep.c
index 273bd461f053..0cf80181a488 100644
--- a/sys/x86/x86/cpu_machdep.c
+++ b/sys/x86/x86/cpu_machdep.c
@@ -1135,6 +1135,202 @@ SYSCTL_PROC(_hw, OID_AUTO, mds_disable, CTLTYPE_INT |
"Microarchitectural Data Sampling Mitigation "
"(0 - off, 1 - on VERW, 2 - on SW, 3 - on AUTO");
+
+/*
+ * Intel Transactional Memory Asynchronous Abort Mitigation
+ * CVE-2019-11135
+ */
+int x86_taa_enable;
+int x86_taa_state;
+enum {
+ TAA_NONE = 0,
+ TAA_TSX_DISABLE = 1,
+ TAA_VERW = 2,
+ TAA_AUTO = 3,
+ TAA_TAA_NO = 4
+};
+
+static void
+taa_set_one(bool enable)
+{
+ uint64_t v;
+
+ v = rdmsr(MSR_IA32_TSX_CTRL);
+ if (enable)
+ v |= (uint64_t)(IA32_TSX_CTRL_RTM_DISABLE |
+ IA32_TSX_CTRL_TSX_CPUID_CLEAR);
+ else
+ v &= ~(uint64_t)(IA32_TSX_CTRL_RTM_DISABLE |
+ IA32_TSX_CTRL_TSX_CPUID_CLEAR);
+
+ wrmsr(MSR_IA32_TSX_CTRL, v);
+}
+
+static void
+taa_set(bool enable, bool all)
+{
+ struct thread *td;
+ int bound_cpu, i, is_bound;
+
+ if (all) {
+ td = curthread;
+ thread_lock(td);
+ is_bound = sched_is_bound(td);
+ bound_cpu = td->td_oncpu;
+ CPU_FOREACH(i) {
+ sched_bind(td, i);
+ taa_set_one(enable);
+ }
+ if (is_bound)
+ sched_bind(td, bound_cpu);
+ else
+ sched_unbind(td);
+ thread_unlock(td);
+ } else
+ taa_set_one(enable);
+}
+
+void
+x86_taa_recalculate(void)
+{
+ static int taa_saved_mds_disable = 0;
+ int taa_need = 0, taa_state = 0;
+ int mds_disable = 0, need_mds_recalc = 0;
+
+ /* Check CPUID.07h.EBX.HLE and RTM for the presence of TSX */
+ if ((cpu_stdext_feature & CPUID_STDEXT_HLE) == 0 ||
+ (cpu_stdext_feature & CPUID_STDEXT_RTM) == 0) {
+ /* TSX is not present */
+ x86_taa_state = 0;
+ return;
+ }
+
+ /* Check to see what mitigation options the CPU gives us */
+ if (cpu_ia32_arch_caps & IA32_ARCH_CAP_TAA_NO) {
+ /* CPU is not suseptible to TAA */
+ taa_need = TAA_NONE;
+ taa_state = TAA_TAA_NO;
+ } else if (cpu_ia32_arch_caps & IA32_ARCH_CAP_TSX_CTRL) {
+ /*
+ * CPU can turn off TSX. This is the next best option
+ * if TAA_NO hardware mitigation isn't present
+ */
+ taa_need = TAA_TSX_DISABLE;
+ } else {
+ /* No TSX/TAA specific remedies are available. */
+ if (x86_taa_enable == TAA_TSX_DISABLE) {
+ if (bootverbose)
+ printf("TSX control not available\n");
+ return;
+ } else
+ taa_need = TAA_VERW;
+ }
+
+ /* Can we automatically take action, or are we being forced? */
+ if (x86_taa_enable == TAA_AUTO)
+ taa_state = taa_need;
+ else
+ taa_state = x86_taa_enable;
+
+ /* No state change, nothing to do */
+ if (taa_state == x86_taa_state) {
+ if (bootverbose)
+ printf("No TSX change made\n");
+ return;
+ }
+
+ /* Does the MSR need to be turned on or off? */
+ if (taa_state == TAA_TSX_DISABLE)
+ taa_set(true, true);
+ else if (x86_taa_state == TAA_TSX_DISABLE)
+ taa_set(false, true);
+
+ /* Does MDS need to be set to turn on VERW? */
+ if (taa_state == TAA_VERW) {
+ taa_saved_mds_disable = hw_mds_disable;
+ mds_disable = hw_mds_disable = 1;
+ need_mds_recalc = 1;
+ } else if (x86_taa_state == TAA_VERW) {
+ mds_disable = hw_mds_disable = taa_saved_mds_disable;
+ need_mds_recalc = 1;
+ }
+ if (need_mds_recalc) {
+ hw_mds_recalculate();
+ if (mds_disable != hw_mds_disable) {
+ if (bootverbose)
+ printf("Cannot change MDS state for TAA\n");
+ /* Don't update our state */
+ return;
+ }
+ }
+
+ x86_taa_state = taa_state;
+ return;
+}
+
+static void
+taa_recalculate_boot(void * arg __unused)
+{
+
+ x86_taa_recalculate();
+}
+SYSINIT(taa_recalc, SI_SUB_SMP, SI_ORDER_ANY, taa_recalculate_boot, NULL);
+
+SYSCTL_NODE(_machdep_mitigations, OID_AUTO, taa, CTLFLAG_RW, 0,
+ "TSX Asynchronous Abort Mitigation");
+
+static int
+sysctl_taa_handler(SYSCTL_HANDLER_ARGS)
+{
+ int error, val;
+
+ val = x86_taa_enable;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ if (val < TAA_NONE || val > TAA_AUTO)
+ return (EINVAL);
+ x86_taa_enable = val;
+ x86_taa_recalculate();
+ return (0);
+}
+
+SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, enable, CTLTYPE_INT |
+ CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
+ sysctl_taa_handler, "I",
+ "TAA Mitigation enablement control "
+ "(0 - off, 1 - disable TSX, 2 - VERW, 3 - on AUTO");
+
+static int
+sysctl_taa_state_handler(SYSCTL_HANDLER_ARGS)
+{
+ const char *state;
+
+ switch (x86_taa_state) {
+ case TAA_NONE:
+ state = "inactive";
+ break;
+ case TAA_TSX_DISABLE:
+ state = "TSX disabled";
+ break;
+ case TAA_VERW:
+ state = "VERW";
+ break;
+ case TAA_TAA_NO:
+ state = "Not vulnerable";
+ break;
+ default:
+ state = "unknown";
+ }
+
+ return (SYSCTL_OUT(req, state, strlen(state)));
+}
+
+SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, state,
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
+ sysctl_taa_state_handler, "A",
+ "TAA Mitigation state");
+
/*
* Enable and restore kernel text write permissions.
* Callers must ensure that disable_wp()/restore_wp() are executed