Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice (including the next
0012  * paragraph) shall be included in all copies or substantial portions of the
0013  * Software.
0014  *
0015  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0016  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0017  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0018  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
0019  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
0020  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0021  * SOFTWARE.
0022  *
0023  * Authors:
0024  *    Anhua Xu
0025  *    Kevin Tian <kevin.tian@intel.com>
0026  *
0027  * Contributors:
0028  *    Min He <min.he@intel.com>
0029  *    Bing Niu <bing.niu@intel.com>
0030  *    Zhi Wang <zhi.a.wang@intel.com>
0031  *
0032  */
0033 
0034 #include "i915_drv.h"
0035 #include "gvt.h"
0036 
0037 static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
0038 {
0039     enum intel_engine_id i;
0040     struct intel_engine_cs *engine;
0041 
0042     for_each_engine(engine, vgpu->gvt->gt, i) {
0043         if (!list_empty(workload_q_head(vgpu, engine)))
0044             return true;
0045     }
0046 
0047     return false;
0048 }
0049 
0050 /* We give 2 seconds higher prio for vGPU during start */
0051 #define GVT_SCHED_VGPU_PRI_TIME  2
0052 
0053 struct vgpu_sched_data {
0054     struct list_head lru_list;
0055     struct intel_vgpu *vgpu;
0056     bool active;
0057     bool pri_sched;
0058     ktime_t pri_time;
0059     ktime_t sched_in_time;
0060     ktime_t sched_time;
0061     ktime_t left_ts;
0062     ktime_t allocated_ts;
0063 
0064     struct vgpu_sched_ctl sched_ctl;
0065 };
0066 
0067 struct gvt_sched_data {
0068     struct intel_gvt *gvt;
0069     struct hrtimer timer;
0070     unsigned long period;
0071     struct list_head lru_runq_head;
0072     ktime_t expire_time;
0073 };
0074 
0075 static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time)
0076 {
0077     ktime_t delta_ts;
0078     struct vgpu_sched_data *vgpu_data;
0079 
0080     if (!vgpu || vgpu == vgpu->gvt->idle_vgpu)
0081         return;
0082 
0083     vgpu_data = vgpu->sched_data;
0084     delta_ts = ktime_sub(cur_time, vgpu_data->sched_in_time);
0085     vgpu_data->sched_time = ktime_add(vgpu_data->sched_time, delta_ts);
0086     vgpu_data->left_ts = ktime_sub(vgpu_data->left_ts, delta_ts);
0087     vgpu_data->sched_in_time = cur_time;
0088 }
0089 
0090 #define GVT_TS_BALANCE_PERIOD_MS 100
0091 #define GVT_TS_BALANCE_STAGE_NUM 10
0092 
0093 static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
0094 {
0095     struct vgpu_sched_data *vgpu_data;
0096     struct list_head *pos;
0097     static u64 stage_check;
0098     int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM;
0099 
0100     /* The timeslice accumulation reset at stage 0, which is
0101      * allocated again without adding previous debt.
0102      */
0103     if (stage == 0) {
0104         int total_weight = 0;
0105         ktime_t fair_timeslice;
0106 
0107         list_for_each(pos, &sched_data->lru_runq_head) {
0108             vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
0109             total_weight += vgpu_data->sched_ctl.weight;
0110         }
0111 
0112         list_for_each(pos, &sched_data->lru_runq_head) {
0113             vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
0114             fair_timeslice = ktime_divns(ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS),
0115                              total_weight) * vgpu_data->sched_ctl.weight;
0116 
0117             vgpu_data->allocated_ts = fair_timeslice;
0118             vgpu_data->left_ts = vgpu_data->allocated_ts;
0119         }
0120     } else {
0121         list_for_each(pos, &sched_data->lru_runq_head) {
0122             vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
0123 
0124             /* timeslice for next 100ms should add the left/debt
0125              * slice of previous stages.
0126              */
0127             vgpu_data->left_ts += vgpu_data->allocated_ts;
0128         }
0129     }
0130 }
0131 
0132 static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
0133 {
0134     struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
0135     enum intel_engine_id i;
0136     struct intel_engine_cs *engine;
0137     struct vgpu_sched_data *vgpu_data;
0138     ktime_t cur_time;
0139 
0140     /* no need to schedule if next_vgpu is the same with current_vgpu,
0141      * let scheduler chose next_vgpu again by setting it to NULL.
0142      */
0143     if (scheduler->next_vgpu == scheduler->current_vgpu) {
0144         scheduler->next_vgpu = NULL;
0145         return;
0146     }
0147 
0148     /*
0149      * after the flag is set, workload dispatch thread will
0150      * stop dispatching workload for current vgpu
0151      */
0152     scheduler->need_reschedule = true;
0153 
0154     /* still have uncompleted workload? */
0155     for_each_engine(engine, gvt->gt, i) {
0156         if (scheduler->current_workload[engine->id])
0157             return;
0158     }
0159 
0160     cur_time = ktime_get();
0161     vgpu_update_timeslice(scheduler->current_vgpu, cur_time);
0162     vgpu_data = scheduler->next_vgpu->sched_data;
0163     vgpu_data->sched_in_time = cur_time;
0164 
0165     /* switch current vgpu */
0166     scheduler->current_vgpu = scheduler->next_vgpu;
0167     scheduler->next_vgpu = NULL;
0168 
0169     scheduler->need_reschedule = false;
0170 
0171     /* wake up workload dispatch thread */
0172     for_each_engine(engine, gvt->gt, i)
0173         wake_up(&scheduler->waitq[engine->id]);
0174 }
0175 
0176 static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
0177 {
0178     struct vgpu_sched_data *vgpu_data;
0179     struct intel_vgpu *vgpu = NULL;
0180     struct list_head *head = &sched_data->lru_runq_head;
0181     struct list_head *pos;
0182 
0183     /* search a vgpu with pending workload */
0184     list_for_each(pos, head) {
0185 
0186         vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
0187         if (!vgpu_has_pending_workload(vgpu_data->vgpu))
0188             continue;
0189 
0190         if (vgpu_data->pri_sched) {
0191             if (ktime_before(ktime_get(), vgpu_data->pri_time)) {
0192                 vgpu = vgpu_data->vgpu;
0193                 break;
0194             } else
0195                 vgpu_data->pri_sched = false;
0196         }
0197 
0198         /* Return the vGPU only if it has time slice left */
0199         if (vgpu_data->left_ts > 0) {
0200             vgpu = vgpu_data->vgpu;
0201             break;
0202         }
0203     }
0204 
0205     return vgpu;
0206 }
0207 
0208 /* in nanosecond */
0209 #define GVT_DEFAULT_TIME_SLICE 1000000
0210 
0211 static void tbs_sched_func(struct gvt_sched_data *sched_data)
0212 {
0213     struct intel_gvt *gvt = sched_data->gvt;
0214     struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
0215     struct vgpu_sched_data *vgpu_data;
0216     struct intel_vgpu *vgpu = NULL;
0217 
0218     /* no active vgpu or has already had a target */
0219     if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
0220         goto out;
0221 
0222     vgpu = find_busy_vgpu(sched_data);
0223     if (vgpu) {
0224         scheduler->next_vgpu = vgpu;
0225         vgpu_data = vgpu->sched_data;
0226         if (!vgpu_data->pri_sched) {
0227             /* Move the last used vGPU to the tail of lru_list */
0228             list_del_init(&vgpu_data->lru_list);
0229             list_add_tail(&vgpu_data->lru_list,
0230                       &sched_data->lru_runq_head);
0231         }
0232     } else {
0233         scheduler->next_vgpu = gvt->idle_vgpu;
0234     }
0235 out:
0236     if (scheduler->next_vgpu)
0237         try_to_schedule_next_vgpu(gvt);
0238 }
0239 
0240 void intel_gvt_schedule(struct intel_gvt *gvt)
0241 {
0242     struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
0243     ktime_t cur_time;
0244 
0245     mutex_lock(&gvt->sched_lock);
0246     cur_time = ktime_get();
0247 
0248     if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
0249                 (void *)&gvt->service_request)) {
0250         if (cur_time >= sched_data->expire_time) {
0251             gvt_balance_timeslice(sched_data);
0252             sched_data->expire_time = ktime_add_ms(
0253                 cur_time, GVT_TS_BALANCE_PERIOD_MS);
0254         }
0255     }
0256     clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request);
0257 
0258     vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
0259     tbs_sched_func(sched_data);
0260 
0261     mutex_unlock(&gvt->sched_lock);
0262 }
0263 
0264 static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
0265 {
0266     struct gvt_sched_data *data;
0267 
0268     data = container_of(timer_data, struct gvt_sched_data, timer);
0269 
0270     intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED);
0271 
0272     hrtimer_add_expires_ns(&data->timer, data->period);
0273 
0274     return HRTIMER_RESTART;
0275 }
0276 
0277 static int tbs_sched_init(struct intel_gvt *gvt)
0278 {
0279     struct intel_gvt_workload_scheduler *scheduler =
0280         &gvt->scheduler;
0281 
0282     struct gvt_sched_data *data;
0283 
0284     data = kzalloc(sizeof(*data), GFP_KERNEL);
0285     if (!data)
0286         return -ENOMEM;
0287 
0288     INIT_LIST_HEAD(&data->lru_runq_head);
0289     hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
0290     data->timer.function = tbs_timer_fn;
0291     data->period = GVT_DEFAULT_TIME_SLICE;
0292     data->gvt = gvt;
0293 
0294     scheduler->sched_data = data;
0295 
0296     return 0;
0297 }
0298 
0299 static void tbs_sched_clean(struct intel_gvt *gvt)
0300 {
0301     struct intel_gvt_workload_scheduler *scheduler =
0302         &gvt->scheduler;
0303     struct gvt_sched_data *data = scheduler->sched_data;
0304 
0305     hrtimer_cancel(&data->timer);
0306 
0307     kfree(data);
0308     scheduler->sched_data = NULL;
0309 }
0310 
0311 static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
0312 {
0313     struct vgpu_sched_data *data;
0314 
0315     data = kzalloc(sizeof(*data), GFP_KERNEL);
0316     if (!data)
0317         return -ENOMEM;
0318 
0319     data->sched_ctl.weight = vgpu->sched_ctl.weight;
0320     data->vgpu = vgpu;
0321     INIT_LIST_HEAD(&data->lru_list);
0322 
0323     vgpu->sched_data = data;
0324 
0325     return 0;
0326 }
0327 
0328 static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
0329 {
0330     struct intel_gvt *gvt = vgpu->gvt;
0331     struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
0332 
0333     kfree(vgpu->sched_data);
0334     vgpu->sched_data = NULL;
0335 
0336     /* this vgpu id has been removed */
0337     if (idr_is_empty(&gvt->vgpu_idr))
0338         hrtimer_cancel(&sched_data->timer);
0339 }
0340 
0341 static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
0342 {
0343     struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
0344     struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
0345     ktime_t now;
0346 
0347     if (!list_empty(&vgpu_data->lru_list))
0348         return;
0349 
0350     now = ktime_get();
0351     vgpu_data->pri_time = ktime_add(now,
0352                     ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0));
0353     vgpu_data->pri_sched = true;
0354 
0355     list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head);
0356 
0357     if (!hrtimer_active(&sched_data->timer))
0358         hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
0359             sched_data->period), HRTIMER_MODE_ABS);
0360     vgpu_data->active = true;
0361 }
0362 
0363 static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
0364 {
0365     struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
0366 
0367     list_del_init(&vgpu_data->lru_list);
0368     vgpu_data->active = false;
0369 }
0370 
0371 static const struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
0372     .init = tbs_sched_init,
0373     .clean = tbs_sched_clean,
0374     .init_vgpu = tbs_sched_init_vgpu,
0375     .clean_vgpu = tbs_sched_clean_vgpu,
0376     .start_schedule = tbs_sched_start_schedule,
0377     .stop_schedule = tbs_sched_stop_schedule,
0378 };
0379 
0380 int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
0381 {
0382     int ret;
0383 
0384     mutex_lock(&gvt->sched_lock);
0385     gvt->scheduler.sched_ops = &tbs_schedule_ops;
0386     ret = gvt->scheduler.sched_ops->init(gvt);
0387     mutex_unlock(&gvt->sched_lock);
0388 
0389     return ret;
0390 }
0391 
0392 void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
0393 {
0394     mutex_lock(&gvt->sched_lock);
0395     gvt->scheduler.sched_ops->clean(gvt);
0396     mutex_unlock(&gvt->sched_lock);
0397 }
0398 
0399 /* for per-vgpu scheduler policy, there are 2 per-vgpu data:
0400  * sched_data, and sched_ctl. We see these 2 data as part of
0401  * the global scheduler which are proteced by gvt->sched_lock.
0402  * Caller should make their decision if the vgpu_lock should
0403  * be hold outside.
0404  */
0405 
0406 int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
0407 {
0408     int ret;
0409 
0410     mutex_lock(&vgpu->gvt->sched_lock);
0411     ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
0412     mutex_unlock(&vgpu->gvt->sched_lock);
0413 
0414     return ret;
0415 }
0416 
0417 void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
0418 {
0419     mutex_lock(&vgpu->gvt->sched_lock);
0420     vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
0421     mutex_unlock(&vgpu->gvt->sched_lock);
0422 }
0423 
0424 void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
0425 {
0426     struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
0427 
0428     mutex_lock(&vgpu->gvt->sched_lock);
0429     if (!vgpu_data->active) {
0430         gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
0431         vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
0432     }
0433     mutex_unlock(&vgpu->gvt->sched_lock);
0434 }
0435 
0436 void intel_gvt_kick_schedule(struct intel_gvt *gvt)
0437 {
0438     mutex_lock(&gvt->sched_lock);
0439     intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
0440     mutex_unlock(&gvt->sched_lock);
0441 }
0442 
0443 void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
0444 {
0445     struct intel_gvt_workload_scheduler *scheduler =
0446         &vgpu->gvt->scheduler;
0447     struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
0448     struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
0449     struct intel_engine_cs *engine;
0450     enum intel_engine_id id;
0451 
0452     if (!vgpu_data->active)
0453         return;
0454 
0455     gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
0456 
0457     mutex_lock(&vgpu->gvt->sched_lock);
0458     scheduler->sched_ops->stop_schedule(vgpu);
0459 
0460     if (scheduler->next_vgpu == vgpu)
0461         scheduler->next_vgpu = NULL;
0462 
0463     if (scheduler->current_vgpu == vgpu) {
0464         /* stop workload dispatching */
0465         scheduler->need_reschedule = true;
0466         scheduler->current_vgpu = NULL;
0467     }
0468 
0469     intel_runtime_pm_get(&dev_priv->runtime_pm);
0470     spin_lock_bh(&scheduler->mmio_context_lock);
0471     for_each_engine(engine, vgpu->gvt->gt, id) {
0472         if (scheduler->engine_owner[engine->id] == vgpu) {
0473             intel_gvt_switch_mmio(vgpu, NULL, engine);
0474             scheduler->engine_owner[engine->id] = NULL;
0475         }
0476     }
0477     spin_unlock_bh(&scheduler->mmio_context_lock);
0478     intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
0479     mutex_unlock(&vgpu->gvt->sched_lock);
0480 }