0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #ifdef CONFIG_SMP
0012 static int
0013 select_task_rq_stop(struct task_struct *p, int cpu, int flags)
0014 {
0015 return task_cpu(p);
0016 }
0017
0018 static int
0019 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
0020 {
0021 return sched_stop_runnable(rq);
0022 }
0023 #endif
0024
0025 static void
0026 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
0027 {
0028
0029 }
0030
0031 static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first)
0032 {
0033 stop->se.exec_start = rq_clock_task(rq);
0034 }
0035
0036 static struct task_struct *pick_task_stop(struct rq *rq)
0037 {
0038 if (!sched_stop_runnable(rq))
0039 return NULL;
0040
0041 return rq->stop;
0042 }
0043
0044 static struct task_struct *pick_next_task_stop(struct rq *rq)
0045 {
0046 struct task_struct *p = pick_task_stop(rq);
0047
0048 if (p)
0049 set_next_task_stop(rq, p, true);
0050
0051 return p;
0052 }
0053
0054 static void
0055 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
0056 {
0057 add_nr_running(rq, 1);
0058 }
0059
0060 static void
0061 dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
0062 {
0063 sub_nr_running(rq, 1);
0064 }
0065
0066 static void yield_task_stop(struct rq *rq)
0067 {
0068 BUG();
0069 }
0070
0071 static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
0072 {
0073 struct task_struct *curr = rq->curr;
0074 u64 delta_exec;
0075
0076 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
0077 if (unlikely((s64)delta_exec < 0))
0078 delta_exec = 0;
0079
0080 schedstat_set(curr->stats.exec_max,
0081 max(curr->stats.exec_max, delta_exec));
0082
0083 curr->se.sum_exec_runtime += delta_exec;
0084 account_group_exec_runtime(curr, delta_exec);
0085
0086 curr->se.exec_start = rq_clock_task(rq);
0087 cgroup_account_cputime(curr, delta_exec);
0088 }
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098 static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
0099 {
0100 }
0101
0102 static void switched_to_stop(struct rq *rq, struct task_struct *p)
0103 {
0104 BUG();
0105 }
0106
0107 static void
0108 prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
0109 {
0110 BUG();
0111 }
0112
0113 static void update_curr_stop(struct rq *rq)
0114 {
0115 }
0116
0117
0118
0119
0120 DEFINE_SCHED_CLASS(stop) = {
0121
0122 .enqueue_task = enqueue_task_stop,
0123 .dequeue_task = dequeue_task_stop,
0124 .yield_task = yield_task_stop,
0125
0126 .check_preempt_curr = check_preempt_curr_stop,
0127
0128 .pick_next_task = pick_next_task_stop,
0129 .put_prev_task = put_prev_task_stop,
0130 .set_next_task = set_next_task_stop,
0131
0132 #ifdef CONFIG_SMP
0133 .balance = balance_stop,
0134 .pick_task = pick_task_stop,
0135 .select_task_rq = select_task_rq_stop,
0136 .set_cpus_allowed = set_cpus_allowed_common,
0137 #endif
0138
0139 .task_tick = task_tick_stop,
0140
0141 .prio_changed = prio_changed_stop,
0142 .switched_to = switched_to_stop,
0143 .update_curr = update_curr_stop,
0144 };