0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 static inline int parent(int i)
0011 {
0012 return (i - 1) >> 1;
0013 }
0014
0015 static inline int left_child(int i)
0016 {
0017 return (i << 1) + 1;
0018 }
0019
0020 static inline int right_child(int i)
0021 {
0022 return (i << 1) + 2;
0023 }
0024
0025 static void cpudl_heapify_down(struct cpudl *cp, int idx)
0026 {
0027 int l, r, largest;
0028
0029 int orig_cpu = cp->elements[idx].cpu;
0030 u64 orig_dl = cp->elements[idx].dl;
0031
0032 if (left_child(idx) >= cp->size)
0033 return;
0034
0035
0036 while (1) {
0037 u64 largest_dl;
0038
0039 l = left_child(idx);
0040 r = right_child(idx);
0041 largest = idx;
0042 largest_dl = orig_dl;
0043
0044 if ((l < cp->size) && dl_time_before(orig_dl,
0045 cp->elements[l].dl)) {
0046 largest = l;
0047 largest_dl = cp->elements[l].dl;
0048 }
0049 if ((r < cp->size) && dl_time_before(largest_dl,
0050 cp->elements[r].dl))
0051 largest = r;
0052
0053 if (largest == idx)
0054 break;
0055
0056
0057 cp->elements[idx].cpu = cp->elements[largest].cpu;
0058 cp->elements[idx].dl = cp->elements[largest].dl;
0059 cp->elements[cp->elements[idx].cpu].idx = idx;
0060 idx = largest;
0061 }
0062
0063 cp->elements[idx].cpu = orig_cpu;
0064 cp->elements[idx].dl = orig_dl;
0065 cp->elements[cp->elements[idx].cpu].idx = idx;
0066 }
0067
0068 static void cpudl_heapify_up(struct cpudl *cp, int idx)
0069 {
0070 int p;
0071
0072 int orig_cpu = cp->elements[idx].cpu;
0073 u64 orig_dl = cp->elements[idx].dl;
0074
0075 if (idx == 0)
0076 return;
0077
0078 do {
0079 p = parent(idx);
0080 if (dl_time_before(orig_dl, cp->elements[p].dl))
0081 break;
0082
0083 cp->elements[idx].cpu = cp->elements[p].cpu;
0084 cp->elements[idx].dl = cp->elements[p].dl;
0085 cp->elements[cp->elements[idx].cpu].idx = idx;
0086 idx = p;
0087 } while (idx != 0);
0088
0089 cp->elements[idx].cpu = orig_cpu;
0090 cp->elements[idx].dl = orig_dl;
0091 cp->elements[cp->elements[idx].cpu].idx = idx;
0092 }
0093
0094 static void cpudl_heapify(struct cpudl *cp, int idx)
0095 {
0096 if (idx > 0 && dl_time_before(cp->elements[parent(idx)].dl,
0097 cp->elements[idx].dl))
0098 cpudl_heapify_up(cp, idx);
0099 else
0100 cpudl_heapify_down(cp, idx);
0101 }
0102
0103 static inline int cpudl_maximum(struct cpudl *cp)
0104 {
0105 return cp->elements[0].cpu;
0106 }
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116 int cpudl_find(struct cpudl *cp, struct task_struct *p,
0117 struct cpumask *later_mask)
0118 {
0119 const struct sched_dl_entity *dl_se = &p->dl;
0120
0121 if (later_mask &&
0122 cpumask_and(later_mask, cp->free_cpus, &p->cpus_mask)) {
0123 unsigned long cap, max_cap = 0;
0124 int cpu, max_cpu = -1;
0125
0126 if (!static_branch_unlikely(&sched_asym_cpucapacity))
0127 return 1;
0128
0129
0130 for_each_cpu(cpu, later_mask) {
0131 if (!dl_task_fits_capacity(p, cpu)) {
0132 cpumask_clear_cpu(cpu, later_mask);
0133
0134 cap = capacity_orig_of(cpu);
0135
0136 if (cap > max_cap ||
0137 (cpu == task_cpu(p) && cap == max_cap)) {
0138 max_cap = cap;
0139 max_cpu = cpu;
0140 }
0141 }
0142 }
0143
0144 if (cpumask_empty(later_mask))
0145 cpumask_set_cpu(max_cpu, later_mask);
0146
0147 return 1;
0148 } else {
0149 int best_cpu = cpudl_maximum(cp);
0150
0151 WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
0152
0153 if (cpumask_test_cpu(best_cpu, &p->cpus_mask) &&
0154 dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
0155 if (later_mask)
0156 cpumask_set_cpu(best_cpu, later_mask);
0157
0158 return 1;
0159 }
0160 }
0161 return 0;
0162 }
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173 void cpudl_clear(struct cpudl *cp, int cpu)
0174 {
0175 int old_idx, new_cpu;
0176 unsigned long flags;
0177
0178 WARN_ON(!cpu_present(cpu));
0179
0180 raw_spin_lock_irqsave(&cp->lock, flags);
0181
0182 old_idx = cp->elements[cpu].idx;
0183 if (old_idx == IDX_INVALID) {
0184
0185
0186
0187
0188
0189 } else {
0190 new_cpu = cp->elements[cp->size - 1].cpu;
0191 cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl;
0192 cp->elements[old_idx].cpu = new_cpu;
0193 cp->size--;
0194 cp->elements[new_cpu].idx = old_idx;
0195 cp->elements[cpu].idx = IDX_INVALID;
0196 cpudl_heapify(cp, old_idx);
0197
0198 cpumask_set_cpu(cpu, cp->free_cpus);
0199 }
0200 raw_spin_unlock_irqrestore(&cp->lock, flags);
0201 }
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213 void cpudl_set(struct cpudl *cp, int cpu, u64 dl)
0214 {
0215 int old_idx;
0216 unsigned long flags;
0217
0218 WARN_ON(!cpu_present(cpu));
0219
0220 raw_spin_lock_irqsave(&cp->lock, flags);
0221
0222 old_idx = cp->elements[cpu].idx;
0223 if (old_idx == IDX_INVALID) {
0224 int new_idx = cp->size++;
0225
0226 cp->elements[new_idx].dl = dl;
0227 cp->elements[new_idx].cpu = cpu;
0228 cp->elements[cpu].idx = new_idx;
0229 cpudl_heapify_up(cp, new_idx);
0230 cpumask_clear_cpu(cpu, cp->free_cpus);
0231 } else {
0232 cp->elements[old_idx].dl = dl;
0233 cpudl_heapify(cp, old_idx);
0234 }
0235
0236 raw_spin_unlock_irqrestore(&cp->lock, flags);
0237 }
0238
0239
0240
0241
0242
0243
0244 void cpudl_set_freecpu(struct cpudl *cp, int cpu)
0245 {
0246 cpumask_set_cpu(cpu, cp->free_cpus);
0247 }
0248
0249
0250
0251
0252
0253
0254 void cpudl_clear_freecpu(struct cpudl *cp, int cpu)
0255 {
0256 cpumask_clear_cpu(cpu, cp->free_cpus);
0257 }
0258
0259
0260
0261
0262
0263 int cpudl_init(struct cpudl *cp)
0264 {
0265 int i;
0266
0267 raw_spin_lock_init(&cp->lock);
0268 cp->size = 0;
0269
0270 cp->elements = kcalloc(nr_cpu_ids,
0271 sizeof(struct cpudl_item),
0272 GFP_KERNEL);
0273 if (!cp->elements)
0274 return -ENOMEM;
0275
0276 if (!zalloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {
0277 kfree(cp->elements);
0278 return -ENOMEM;
0279 }
0280
0281 for_each_possible_cpu(i)
0282 cp->elements[i].idx = IDX_INVALID;
0283
0284 return 0;
0285 }
0286
0287
0288
0289
0290
0291 void cpudl_cleanup(struct cpudl *cp)
0292 {
0293 free_cpumask_var(cp->free_cpus);
0294 kfree(cp->elements);
0295 }