aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/sched_ule.c
diff options
context:
space:
mode:
authorAlexander Motin <mav@FreeBSD.org>2021-08-02 14:50:34 +0000
committerAlexander Motin <mav@FreeBSD.org>2021-08-02 14:55:28 +0000
commitca34553b6f631ec4ec5ae9f3825e3196e172c35d (patch)
tree1b27d5f3ad19f9c66b65af9ffba96098994f38cb /sys/kern/sched_ule.c
parent5c9cb96a239cd9793a74ff91baa7296b814948aa (diff)
downloadsrc-ca34553b6f631ec4ec5ae9f3825e3196e172c35d.tar.gz
src-ca34553b6f631ec4ec5ae9f3825e3196e172c35d.zip
sched_ule(4): Pre-seed sched_random().
I don't think it changes anything, but why not. While there, make cpu_search_highest() use all 8 lower load bits for noise, since it does not use cs_prefer and the code is not shared with cpu_search_lowest() any more. MFC after: 1 month
Diffstat (limited to 'sys/kern/sched_ule.c')
-rw-r--r--sys/kern/sched_ule.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 1bdcfb1f793d..9bcc2a64e2b8 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -739,7 +739,7 @@ cpu_search_highest(const struct cpu_group *cg, const struct cpu_search *s,
if (l < s->cs_limit || !tdq->tdq_transferable ||
!CPU_ISSET(c, s->cs_mask))
continue;
- load -= sched_random() % 128;
+ load -= sched_random() % 256;
if (load > bload) {
bload = load;
r->cs_cpu = c;
@@ -1416,6 +1416,7 @@ sched_setup_smp(void)
tdq->tdq_cg = smp_topo_find(cpu_top, i);
if (tdq->tdq_cg == NULL)
panic("Can't find cpu group for %d\n", i);
+ DPCPU_ID_SET(i, randomval, i * 69069 + 5);
}
PCPU_SET(sched, DPCPU_PTR(tdq));
balance_tdq = TDQ_SELF();