aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/hwpmc/hwpmc_arm64.c
diff options
context:
space:
mode:
authorJessica Clarke <jrtc27@FreeBSD.org>2022-01-10 14:30:05 +0000
committerJessica Clarke <jrtc27@FreeBSD.org>2022-01-10 14:30:05 +0000
commite74c7ffcb11b6ac879167249adc23a1f9ee5aab6 (patch)
tree4a5f2b2c93f256522c9c3516962aca3f8b165323 /sys/dev/hwpmc/hwpmc_arm64.c
parent78c347d9f33bf3d69986578dbdbfdab399381675 (diff)
downloadsrc-e74c7ffcb11b6ac879167249adc23a1f9ee5aab6.tar.gz
src-e74c7ffcb11b6ac879167249adc23a1f9ee5aab6.zip
hwpmc: Fix amd/arm64/armv7/uncore sampling overflow race
If a counter more than overflows just as we read it on switch out then, if using sampling mode, we will negate this small value to give a huge reload count, and if we later switch back in that context we will validate that value against pm_reloadcount and panic an INVARIANTS kernel with: panic: [pmc,1470] pmcval outside of expected range cpu=2 ri=16 pmcval=fffff292 pm_reloadcount=10000 or similar. Presumably in a non-INVARIANTS kernel we will instead just use the provided value as the reload count, which would lead to the overflow not happing for a very long time (e.g. 78 minutes for a 48-bit counter incrementing at an averate rate of 1GHz). Instead, clamp the reload count to 0 (which corresponds precisely to the value we would compute if it had just overflowed and no more), which will result in hwpmc using the full original reload count again. This is the approach used by core for Intel (for both fixed and programmable counters). As part of this, armv7 and arm64 are made conceptually simpler; rather than skipping modifying the overflow count for sampling mode counters so it's always kept as ~0, those special cases are removed so it's always applicable and the concatentation of it and the hardware counter can always be viewed as a 64-bit counter, which also makes them look more like other architectures. Whilst here, fix an instance of UB (shifting a 1 into the sign bit) for amd in its sign-extension code. Reviewed by: andrew, mhorne, kib MFC after: 1 week Differential Revision: https://reviews.freebsd.org/D33654
Diffstat (limited to 'sys/dev/hwpmc/hwpmc_arm64.c')
-rw-r--r--sys/dev/hwpmc/hwpmc_arm64.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/sys/dev/hwpmc/hwpmc_arm64.c b/sys/dev/hwpmc/hwpmc_arm64.c
index 14c176894c6a..988cd1744a07 100644
--- a/sys/dev/hwpmc/hwpmc_arm64.c
+++ b/sys/dev/hwpmc/hwpmc_arm64.c
@@ -218,8 +218,7 @@ arm64_read_pmc(int cpu, int ri, pmc_value_t *v)
if ((READ_SPECIALREG(pmovsclr_el0) & reg) != 0) {
/* Clear Overflow Flag */
WRITE_SPECIALREG(pmovsclr_el0, reg);
- if (!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
- pm->pm_pcpu_state[cpu].pps_overflowcnt++;
+ pm->pm_pcpu_state[cpu].pps_overflowcnt++;
/* Reread counter in case we raced. */
tmp = arm64_pmcn_read(ri);
@@ -228,10 +227,18 @@ arm64_read_pmc(int cpu, int ri, pmc_value_t *v)
intr_restore(s);
PMCDBG2(MDP, REA, 2, "arm64-read id=%d -> %jd", ri, tmp);
- if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
- *v = ARMV8_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
- else
- *v = tmp;
+ if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
+ /*
+ * Clamp value to 0 if the counter just overflowed,
+ * otherwise the returned reload count would wrap to a
+ * huge value.
+ */
+ if ((tmp & (1ull << 63)) == 0)
+ tmp = 0;
+ else
+ tmp = ARMV8_PERFCTR_VALUE_TO_RELOAD_COUNT(tmp);
+ }
+ *v = tmp;
return (0);
}
@@ -379,10 +386,10 @@ arm64_intr(struct trapframe *tf)
retval = 1; /* Found an interrupting PMC. */
- if (!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
- pm->pm_pcpu_state[cpu].pps_overflowcnt += 1;
+ pm->pm_pcpu_state[cpu].pps_overflowcnt += 1;
+
+ if (!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
continue;
- }
if (pm->pm_state != PMC_STATE_RUNNING)
continue;