aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/sched_ule.c
diff options
context:
space:
mode:
authorWarner Losh <imp@FreeBSD.org>2015-02-27 21:15:12 +0000
committerWarner Losh <imp@FreeBSD.org>2015-02-27 21:15:12 +0000
commitb250ad349918e0d1ae95ee9ce5ff3c87295fdd66 (patch)
tree667de0aedd57f1a1cf27a65767a4399907937094 /sys/kern/sched_ule.c
parente52a2dc83cbc674c3f009634913453fe1d0408d0 (diff)
downloadsrc-b250ad349918e0d1ae95ee9ce5ff3c87295fdd66.tar.gz
src-b250ad349918e0d1ae95ee9ce5ff3c87295fdd66.zip
Make sched_random() return an unsigned number, and use uint32_t
consistently. This also matches the per-cpu pointer declaration anyway. This changes the tweak we give to the load from -32..31 to be 0..31 which seems more inline with the rest of the code (- rnd and the -= 64). It should also provide the randomness we need, and may fix a signedness bug in the old code (it isn't clear that the effect was intentional as opposed to sloppy, and the right shift of a signed value is undefined to boot). This stores sched_balance() behavior when it used random(). Differential Revision: https://reviews.freebsd.org/D1981
Notes
Notes: svn path=/head/; revision=279373
Diffstat (limited to 'sys/kern/sched_ule.c')
-rw-r--r--sys/kern/sched_ule.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 04b4ec6c4d87..ea96934a6b55 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -360,17 +360,19 @@ SDT_PROBE_DEFINE2(sched, , , surrender, "struct thread *",
/*
* We need some randomness. Implement the classic Linear Congruential
* generator X_{n+1}=(aX_n+c) mod m. These values are optimized for
- * m = 2^32, a = 69069 and c = 5. This is signed so that we can get
- * both positive and negative values from it by shifting the value
- * right.
+ * m = 2^32, a = 69069 and c = 5. We only return the upper 16 bits
+ * of the random state (in the low bits of our answer) to return
+ * the maximum randomness.
*/
-static int sched_random(void)
+static uint32_t
+sched_random()
{
- int rnd, *rndptr;
- rndptr = DPCPU_PTR(randomval);
- rnd = *rndptr * 69069 + 5;
- *rndptr = rnd;
- return(rnd);
+ uint32_t *rndptr;
+
+ rndptr = DPCPU_PTR(randomval);
+ *rndptr = *rndptr * 69069 + 5;
+
+ return (*rndptr >> 16);
}
#endif
@@ -718,7 +720,7 @@ cpu_search(const struct cpu_group *cg, struct cpu_search *low,
CPU_CLR(cpu, &cpumask);
tdq = TDQ_CPU(cpu);
load = tdq->tdq_load * 256;
- rnd = sched_random() >> 26; /* -32 to +31 */
+ rnd = sched_random() % 32;
if (match & CPU_SEARCH_LOWEST) {
if (cpu == low->cs_prefer)
load -= 64;
@@ -882,7 +884,7 @@ sched_balance(void)
return;
balance_ticks = max(balance_interval / 2, 1) +
- ((sched_random() >> 16) % balance_interval);
+ (sched_random() % balance_interval);
tdq = TDQ_SELF();
TDQ_UNLOCK(tdq);
sched_balance_group(cpu_top);