![]() |
|
|||
0001 /* SPDX-License-Identifier: GPL-2.0 */ 0002 /* 0003 * Only give sleepers 50% of their service deficit. This allows 0004 * them to run sooner, but does not allow tons of sleepers to 0005 * rip the spread apart. 0006 */ 0007 SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true) 0008 0009 /* 0010 * Place new tasks ahead so that they do not starve already running 0011 * tasks 0012 */ 0013 SCHED_FEAT(START_DEBIT, true) 0014 0015 /* 0016 * Prefer to schedule the task we woke last (assuming it failed 0017 * wakeup-preemption), since its likely going to consume data we 0018 * touched, increases cache locality. 0019 */ 0020 SCHED_FEAT(NEXT_BUDDY, false) 0021 0022 /* 0023 * Prefer to schedule the task that ran last (when we did 0024 * wake-preempt) as that likely will touch the same data, increases 0025 * cache locality. 0026 */ 0027 SCHED_FEAT(LAST_BUDDY, true) 0028 0029 /* 0030 * Consider buddies to be cache hot, decreases the likeliness of a 0031 * cache buddy being migrated away, increases cache locality. 0032 */ 0033 SCHED_FEAT(CACHE_HOT_BUDDY, true) 0034 0035 /* 0036 * Allow wakeup-time preemption of the current task: 0037 */ 0038 SCHED_FEAT(WAKEUP_PREEMPTION, true) 0039 0040 SCHED_FEAT(HRTICK, false) 0041 SCHED_FEAT(HRTICK_DL, false) 0042 SCHED_FEAT(DOUBLE_TICK, false) 0043 0044 /* 0045 * Decrement CPU capacity based on time not spent running tasks 0046 */ 0047 SCHED_FEAT(NONTASK_CAPACITY, true) 0048 0049 #ifdef CONFIG_PREEMPT_RT 0050 SCHED_FEAT(TTWU_QUEUE, false) 0051 #else 0052 0053 /* 0054 * Queue remote wakeups on the target CPU and process them 0055 * using the scheduler IPI. Reduces rq->lock contention/bounces. 0056 */ 0057 SCHED_FEAT(TTWU_QUEUE, true) 0058 #endif 0059 0060 /* 0061 * When doing wakeups, attempt to limit superfluous scans of the LLC domain. 0062 */ 0063 SCHED_FEAT(SIS_PROP, false) 0064 SCHED_FEAT(SIS_UTIL, true) 0065 0066 /* 0067 * Issue a WARN when we do multiple update_rq_clock() calls 0068 * in a single rq->lock section. Default disabled because the 0069 * annotations are not complete. 0070 */ 0071 SCHED_FEAT(WARN_DOUBLE_CLOCK, false) 0072 0073 #ifdef HAVE_RT_PUSH_IPI 0074 /* 0075 * In order to avoid a thundering herd attack of CPUs that are 0076 * lowering their priorities at the same time, and there being 0077 * a single CPU that has an RT task that can migrate and is waiting 0078 * to run, where the other CPUs will try to take that CPUs 0079 * rq lock and possibly create a large contention, sending an 0080 * IPI to that CPU and let that CPU push the RT task to where 0081 * it should go may be a better scenario. 0082 */ 0083 SCHED_FEAT(RT_PUSH_IPI, true) 0084 #endif 0085 0086 SCHED_FEAT(RT_RUNTIME_SHARE, false) 0087 SCHED_FEAT(LB_MIN, false) 0088 SCHED_FEAT(ATTACH_AGE_LOAD, true) 0089 0090 SCHED_FEAT(WA_IDLE, true) 0091 SCHED_FEAT(WA_WEIGHT, true) 0092 SCHED_FEAT(WA_BIAS, true) 0093 0094 /* 0095 * UtilEstimation. Use estimated CPU utilization. 0096 */ 0097 SCHED_FEAT(UTIL_EST, true) 0098 SCHED_FEAT(UTIL_EST_FASTUP, true) 0099 0100 SCHED_FEAT(LATENCY_WARN, false) 0101 0102 SCHED_FEAT(ALT_PERIOD, true) 0103 SCHED_FEAT(BASE_SLICE, true)
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |