aboutsummaryrefslogtreecommitdiff
path: root/sys/amd64
diff options
context:
space:
mode:
authorJung-uk Kim <jkim@FreeBSD.org>2011-04-07 23:28:28 +0000
committerJung-uk Kim <jkim@FreeBSD.org>2011-04-07 23:28:28 +0000
commit3453537fa54255e670f22aaf57e45ab40a14f55e (patch)
tree664d6632a866948517ff612d5ccfb37cd5bf91db /sys/amd64
parentcb379161b1e700f2ce5d6985085983b77410475c (diff)
downloadsrc-3453537fa54255e670f22aaf57e45ab40a14f55e.tar.gz
src-3453537fa54255e670f22aaf57e45ab40a14f55e.zip
Use atomic load & store for TSC frequency. It may be overkill for amd64 but
safer for i386 because it can be easily over 4 GHz now. More worse, it can be easily changed by user with 'machdep.tsc_freq' tunable (directly) or cpufreq(4) (indirectly). Note it is intentionally not used in performance critical paths to avoid performance regression (but we should, in theory). Alternatively, we may add "virtual TSC" with lower frequency if maximum frequency overflows 32 bits (and ignore possible incoherency as we do now).
Notes
Notes: svn path=/head/; revision=220433
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/legacy.c3
-rw-r--r--sys/amd64/amd64/machdep.c11
-rw-r--r--sys/amd64/amd64/prof_machdep.c16
3 files changed, 18 insertions, 12 deletions
diff --git a/sys/amd64/amd64/legacy.c b/sys/amd64/amd64/legacy.c
index 0e7bac1143f0..100ce7c2c2dc 100644
--- a/sys/amd64/amd64/legacy.c
+++ b/sys/amd64/amd64/legacy.c
@@ -321,7 +321,8 @@ cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
break;
case CPU_IVAR_NOMINAL_MHZ:
if (tsc_is_invariant) {
- *result = (uintptr_t)(tsc_freq / 1000000);
+ *result = (uintptr_t)(atomic_load_acq_64(&tsc_freq) /
+ 1000000);
break;
}
/* FALLTHROUGH */
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index 59a6ca8839f6..07a42e6f0f71 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -546,18 +546,19 @@ int
cpu_est_clockrate(int cpu_id, uint64_t *rate)
{
register_t reg;
- uint64_t tsc1, tsc2;
+ uint64_t freq, tsc1, tsc2;
if (pcpu_find(cpu_id) == NULL || rate == NULL)
return (EINVAL);
+ freq = atomic_load_acq_64(&tsc_freq);
/* If TSC is P-state invariant, DELAY(9) based logic fails. */
- if (tsc_is_invariant && tsc_freq != 0)
+ if (tsc_is_invariant && freq != 0)
return (EOPNOTSUPP);
/* If we're booting, trust the rate calibrated moments ago. */
- if (cold && tsc_freq != 0) {
- *rate = tsc_freq;
+ if (cold && freq != 0) {
+ *rate = freq;
return (0);
}
@@ -586,7 +587,7 @@ cpu_est_clockrate(int cpu_id, uint64_t *rate)
#endif
tsc2 -= tsc1;
- if (tsc_freq != 0) {
+ if (freq != 0) {
*rate = tsc2 * 1000;
return (0);
}
diff --git a/sys/amd64/amd64/prof_machdep.c b/sys/amd64/amd64/prof_machdep.c
index 6ebc0380b873..273c8336fca3 100644
--- a/sys/amd64/amd64/prof_machdep.c
+++ b/sys/amd64/amd64/prof_machdep.c
@@ -311,18 +311,22 @@ void
startguprof(gp)
struct gmonparam *gp;
{
+ uint64_t freq;
+
+ freq = atomic_load_acq_64(&tsc_freq);
if (cputime_clock == CPUTIME_CLOCK_UNINITIALIZED) {
- cputime_clock = CPUTIME_CLOCK_I8254;
- if (tsc_freq != 0 && mp_ncpus == 1)
+ if (freq != 0 && mp_ncpus == 1)
cputime_clock = CPUTIME_CLOCK_TSC;
+ else
+ cputime_clock = CPUTIME_CLOCK_I8254;
}
- gp->profrate = i8254_freq << CPUTIME_CLOCK_I8254_SHIFT;
if (cputime_clock == CPUTIME_CLOCK_TSC) {
- gp->profrate = tsc_freq >> 1;
+ gp->profrate = freq >> 1;
cputime_prof_active = 1;
- }
+ } else
+ gp->profrate = i8254_freq << CPUTIME_CLOCK_I8254_SHIFT;
#if defined(PERFMON) && defined(I586_PMC_GUPROF)
- else if (cputime_clock == CPUTIME_CLOCK_I586_PMC) {
+ if (cputime_clock == CPUTIME_CLOCK_I586_PMC) {
if (perfmon_avail() &&
perfmon_setup(0, cputime_clock_pmc_conf) == 0) {
if (perfmon_start(0) != 0)