aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/sched_ule.c
diff options
context:
space:
mode:
authorWarner Losh <imp@FreeBSD.org>2015-02-27 02:56:58 +0000
committerWarner Losh <imp@FreeBSD.org>2015-02-27 02:56:58 +0000
commit0567b6cc16673727a15578ef6e69e163e55a11eb (patch)
tree9bdc396b242b01138048b2c28e0fadf7869a5162 /sys/kern/sched_ule.c
parent999efd901b5130085620dc57dbaaa35647c782e1 (diff)
downloadsrc-0567b6cc16673727a15578ef6e69e163e55a11eb.tar.gz
src-0567b6cc16673727a15578ef6e69e163e55a11eb.zip
Create sched_rand() and move the LCG code into that. Call this when
we need randomness in ULE. This removes random() call from the rebalance interval code. Submitted by: Harrison Grundy Differential Revision: https://reviews.freebsd.org/D1968
Notes
Notes: svn path=/head/; revision=279349
Diffstat (limited to 'sys/kern/sched_ule.c')
-rw-r--r--sys/kern/sched_ule.c31
1 files changed, 22 insertions, 9 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 0e98fc81414f..76cd3b30f9c5 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -302,6 +302,7 @@ static int sched_interact_score(struct thread *);
static void sched_interact_update(struct thread *);
static void sched_interact_fork(struct thread *);
static void sched_pctcpu_update(struct td_sched *, int);
+static int sched_random(void);
/* Operations on per processor queues */
static struct thread *tdq_choose(struct tdq *);
@@ -357,6 +358,22 @@ SDT_PROBE_DEFINE2(sched, , , surrender, "struct thread *",
"struct proc *");
/*
+ * We need some randomness. Implement the classic Linear Congruential
+ * generator X_{n+1}=(aX_n+c) mod m. These values are optimized for
+ * m = 2^32, a = 69069 and c = 5. This is signed so that we can get
+ * both positive and negative values from it by shifting the value
+ * right.
+ */
+static int sched_random()
+{
+ int rnd, *rndptr;
+ rndptr = DPCPU_PTR(randomval);
+ rnd = *rndptr * 69069 + 5;
+ *rndptr = rnd;
+ return(rnd);
+}
+
+/*
* Print the threads waiting on a run-queue.
*/
static void
@@ -651,7 +668,7 @@ cpu_search(const struct cpu_group *cg, struct cpu_search *low,
cpuset_t cpumask;
struct cpu_group *child;
struct tdq *tdq;
- int cpu, i, hload, lload, load, total, rnd, *rndptr;
+ int cpu, i, hload, lload, load, total, rnd;
total = 0;
cpumask = cg->cg_mask;
@@ -700,8 +717,7 @@ cpu_search(const struct cpu_group *cg, struct cpu_search *low,
CPU_CLR(cpu, &cpumask);
tdq = TDQ_CPU(cpu);
load = tdq->tdq_load * 256;
- rndptr = DPCPU_PTR(randomval);
- rnd = (*rndptr = *rndptr * 69069 + 5) >> 26;
+ rnd = sched_random() >> 26; /* -32 to +31 */
if (match & CPU_SEARCH_LOWEST) {
if (cpu == low->cs_prefer)
load -= 64;
@@ -861,14 +877,11 @@ sched_balance(void)
{
struct tdq *tdq;
- /*
- * Select a random time between .5 * balance_interval and
- * 1.5 * balance_interval.
- */
- balance_ticks = max(balance_interval / 2, 1);
- balance_ticks += random() % balance_interval;
if (smp_started == 0 || rebalance == 0)
return;
+
+ balance_ticks = max(balance_interval / 2, 1) +
+ ((sched_random() >> 16) % balance_interval);
tdq = TDQ_SELF();
TDQ_UNLOCK(tdq);
sched_balance_group(cpu_top);