162306a36Sopenharmony_ci/* 262306a36Sopenharmony_ci * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 362306a36Sopenharmony_ci * 462306a36Sopenharmony_ci * Permission is hereby granted, free of charge, to any person obtaining a 562306a36Sopenharmony_ci * copy of this software and associated documentation files (the "Software"), 662306a36Sopenharmony_ci * to deal in the Software without restriction, including without limitation 762306a36Sopenharmony_ci * the rights to use, copy, modify, merge, publish, distribute, sublicense, 862306a36Sopenharmony_ci * and/or sell copies of the Software, and to permit persons to whom the 962306a36Sopenharmony_ci * Software is furnished to do so, subject to the following conditions: 1062306a36Sopenharmony_ci * 1162306a36Sopenharmony_ci * The above copyright notice and this permission notice (including the next 1262306a36Sopenharmony_ci * paragraph) shall be included in all copies or substantial portions of the 1362306a36Sopenharmony_ci * Software. 1462306a36Sopenharmony_ci * 1562306a36Sopenharmony_ci * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 1662306a36Sopenharmony_ci * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 1762306a36Sopenharmony_ci * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 1862306a36Sopenharmony_ci * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 1962306a36Sopenharmony_ci * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 2062306a36Sopenharmony_ci * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 2162306a36Sopenharmony_ci * SOFTWARE. 2262306a36Sopenharmony_ci * 2362306a36Sopenharmony_ci * Authors: 2462306a36Sopenharmony_ci * Anhua Xu 2562306a36Sopenharmony_ci * Kevin Tian <kevin.tian@intel.com> 2662306a36Sopenharmony_ci * 2762306a36Sopenharmony_ci * Contributors: 2862306a36Sopenharmony_ci * Min He <min.he@intel.com> 2962306a36Sopenharmony_ci * Bing Niu <bing.niu@intel.com> 3062306a36Sopenharmony_ci * Zhi Wang <zhi.a.wang@intel.com> 3162306a36Sopenharmony_ci * 3262306a36Sopenharmony_ci */ 3362306a36Sopenharmony_ci 3462306a36Sopenharmony_ci#include "i915_drv.h" 3562306a36Sopenharmony_ci#include "gvt.h" 3662306a36Sopenharmony_ci 3762306a36Sopenharmony_cistatic bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) 3862306a36Sopenharmony_ci{ 3962306a36Sopenharmony_ci enum intel_engine_id i; 4062306a36Sopenharmony_ci struct intel_engine_cs *engine; 4162306a36Sopenharmony_ci 4262306a36Sopenharmony_ci for_each_engine(engine, vgpu->gvt->gt, i) { 4362306a36Sopenharmony_ci if (!list_empty(workload_q_head(vgpu, engine))) 4462306a36Sopenharmony_ci return true; 4562306a36Sopenharmony_ci } 4662306a36Sopenharmony_ci 4762306a36Sopenharmony_ci return false; 4862306a36Sopenharmony_ci} 4962306a36Sopenharmony_ci 5062306a36Sopenharmony_ci/* We give 2 seconds higher prio for vGPU during start */ 5162306a36Sopenharmony_ci#define GVT_SCHED_VGPU_PRI_TIME 2 5262306a36Sopenharmony_ci 5362306a36Sopenharmony_cistruct vgpu_sched_data { 5462306a36Sopenharmony_ci struct list_head lru_list; 5562306a36Sopenharmony_ci struct intel_vgpu *vgpu; 5662306a36Sopenharmony_ci bool active; 5762306a36Sopenharmony_ci bool pri_sched; 5862306a36Sopenharmony_ci ktime_t pri_time; 5962306a36Sopenharmony_ci ktime_t sched_in_time; 6062306a36Sopenharmony_ci ktime_t sched_time; 6162306a36Sopenharmony_ci ktime_t left_ts; 6262306a36Sopenharmony_ci ktime_t allocated_ts; 6362306a36Sopenharmony_ci 6462306a36Sopenharmony_ci struct vgpu_sched_ctl sched_ctl; 6562306a36Sopenharmony_ci}; 6662306a36Sopenharmony_ci 6762306a36Sopenharmony_cistruct gvt_sched_data { 6862306a36Sopenharmony_ci struct intel_gvt *gvt; 6962306a36Sopenharmony_ci struct hrtimer timer; 7062306a36Sopenharmony_ci unsigned long period; 7162306a36Sopenharmony_ci struct list_head lru_runq_head; 7262306a36Sopenharmony_ci ktime_t expire_time; 7362306a36Sopenharmony_ci}; 7462306a36Sopenharmony_ci 7562306a36Sopenharmony_cistatic void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time) 7662306a36Sopenharmony_ci{ 7762306a36Sopenharmony_ci ktime_t delta_ts; 7862306a36Sopenharmony_ci struct vgpu_sched_data *vgpu_data; 7962306a36Sopenharmony_ci 8062306a36Sopenharmony_ci if (!vgpu || vgpu == vgpu->gvt->idle_vgpu) 8162306a36Sopenharmony_ci return; 8262306a36Sopenharmony_ci 8362306a36Sopenharmony_ci vgpu_data = vgpu->sched_data; 8462306a36Sopenharmony_ci delta_ts = ktime_sub(cur_time, vgpu_data->sched_in_time); 8562306a36Sopenharmony_ci vgpu_data->sched_time = ktime_add(vgpu_data->sched_time, delta_ts); 8662306a36Sopenharmony_ci vgpu_data->left_ts = ktime_sub(vgpu_data->left_ts, delta_ts); 8762306a36Sopenharmony_ci vgpu_data->sched_in_time = cur_time; 8862306a36Sopenharmony_ci} 8962306a36Sopenharmony_ci 9062306a36Sopenharmony_ci#define GVT_TS_BALANCE_PERIOD_MS 100 9162306a36Sopenharmony_ci#define GVT_TS_BALANCE_STAGE_NUM 10 9262306a36Sopenharmony_ci 9362306a36Sopenharmony_cistatic void gvt_balance_timeslice(struct gvt_sched_data *sched_data) 9462306a36Sopenharmony_ci{ 9562306a36Sopenharmony_ci struct vgpu_sched_data *vgpu_data; 9662306a36Sopenharmony_ci struct list_head *pos; 9762306a36Sopenharmony_ci static u64 stage_check; 9862306a36Sopenharmony_ci int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM; 9962306a36Sopenharmony_ci 10062306a36Sopenharmony_ci /* The timeslice accumulation reset at stage 0, which is 10162306a36Sopenharmony_ci * allocated again without adding previous debt. 10262306a36Sopenharmony_ci */ 10362306a36Sopenharmony_ci if (stage == 0) { 10462306a36Sopenharmony_ci int total_weight = 0; 10562306a36Sopenharmony_ci ktime_t fair_timeslice; 10662306a36Sopenharmony_ci 10762306a36Sopenharmony_ci list_for_each(pos, &sched_data->lru_runq_head) { 10862306a36Sopenharmony_ci vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); 10962306a36Sopenharmony_ci total_weight += vgpu_data->sched_ctl.weight; 11062306a36Sopenharmony_ci } 11162306a36Sopenharmony_ci 11262306a36Sopenharmony_ci list_for_each(pos, &sched_data->lru_runq_head) { 11362306a36Sopenharmony_ci vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); 11462306a36Sopenharmony_ci fair_timeslice = ktime_divns(ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS), 11562306a36Sopenharmony_ci total_weight) * vgpu_data->sched_ctl.weight; 11662306a36Sopenharmony_ci 11762306a36Sopenharmony_ci vgpu_data->allocated_ts = fair_timeslice; 11862306a36Sopenharmony_ci vgpu_data->left_ts = vgpu_data->allocated_ts; 11962306a36Sopenharmony_ci } 12062306a36Sopenharmony_ci } else { 12162306a36Sopenharmony_ci list_for_each(pos, &sched_data->lru_runq_head) { 12262306a36Sopenharmony_ci vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); 12362306a36Sopenharmony_ci 12462306a36Sopenharmony_ci /* timeslice for next 100ms should add the left/debt 12562306a36Sopenharmony_ci * slice of previous stages. 12662306a36Sopenharmony_ci */ 12762306a36Sopenharmony_ci vgpu_data->left_ts += vgpu_data->allocated_ts; 12862306a36Sopenharmony_ci } 12962306a36Sopenharmony_ci } 13062306a36Sopenharmony_ci} 13162306a36Sopenharmony_ci 13262306a36Sopenharmony_cistatic void try_to_schedule_next_vgpu(struct intel_gvt *gvt) 13362306a36Sopenharmony_ci{ 13462306a36Sopenharmony_ci struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 13562306a36Sopenharmony_ci enum intel_engine_id i; 13662306a36Sopenharmony_ci struct intel_engine_cs *engine; 13762306a36Sopenharmony_ci struct vgpu_sched_data *vgpu_data; 13862306a36Sopenharmony_ci ktime_t cur_time; 13962306a36Sopenharmony_ci 14062306a36Sopenharmony_ci /* no need to schedule if next_vgpu is the same with current_vgpu, 14162306a36Sopenharmony_ci * let scheduler chose next_vgpu again by setting it to NULL. 14262306a36Sopenharmony_ci */ 14362306a36Sopenharmony_ci if (scheduler->next_vgpu == scheduler->current_vgpu) { 14462306a36Sopenharmony_ci scheduler->next_vgpu = NULL; 14562306a36Sopenharmony_ci return; 14662306a36Sopenharmony_ci } 14762306a36Sopenharmony_ci 14862306a36Sopenharmony_ci /* 14962306a36Sopenharmony_ci * after the flag is set, workload dispatch thread will 15062306a36Sopenharmony_ci * stop dispatching workload for current vgpu 15162306a36Sopenharmony_ci */ 15262306a36Sopenharmony_ci scheduler->need_reschedule = true; 15362306a36Sopenharmony_ci 15462306a36Sopenharmony_ci /* still have uncompleted workload? */ 15562306a36Sopenharmony_ci for_each_engine(engine, gvt->gt, i) { 15662306a36Sopenharmony_ci if (scheduler->current_workload[engine->id]) 15762306a36Sopenharmony_ci return; 15862306a36Sopenharmony_ci } 15962306a36Sopenharmony_ci 16062306a36Sopenharmony_ci cur_time = ktime_get(); 16162306a36Sopenharmony_ci vgpu_update_timeslice(scheduler->current_vgpu, cur_time); 16262306a36Sopenharmony_ci vgpu_data = scheduler->next_vgpu->sched_data; 16362306a36Sopenharmony_ci vgpu_data->sched_in_time = cur_time; 16462306a36Sopenharmony_ci 16562306a36Sopenharmony_ci /* switch current vgpu */ 16662306a36Sopenharmony_ci scheduler->current_vgpu = scheduler->next_vgpu; 16762306a36Sopenharmony_ci scheduler->next_vgpu = NULL; 16862306a36Sopenharmony_ci 16962306a36Sopenharmony_ci scheduler->need_reschedule = false; 17062306a36Sopenharmony_ci 17162306a36Sopenharmony_ci /* wake up workload dispatch thread */ 17262306a36Sopenharmony_ci for_each_engine(engine, gvt->gt, i) 17362306a36Sopenharmony_ci wake_up(&scheduler->waitq[engine->id]); 17462306a36Sopenharmony_ci} 17562306a36Sopenharmony_ci 17662306a36Sopenharmony_cistatic struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data) 17762306a36Sopenharmony_ci{ 17862306a36Sopenharmony_ci struct vgpu_sched_data *vgpu_data; 17962306a36Sopenharmony_ci struct intel_vgpu *vgpu = NULL; 18062306a36Sopenharmony_ci struct list_head *head = &sched_data->lru_runq_head; 18162306a36Sopenharmony_ci struct list_head *pos; 18262306a36Sopenharmony_ci 18362306a36Sopenharmony_ci /* search a vgpu with pending workload */ 18462306a36Sopenharmony_ci list_for_each(pos, head) { 18562306a36Sopenharmony_ci 18662306a36Sopenharmony_ci vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); 18762306a36Sopenharmony_ci if (!vgpu_has_pending_workload(vgpu_data->vgpu)) 18862306a36Sopenharmony_ci continue; 18962306a36Sopenharmony_ci 19062306a36Sopenharmony_ci if (vgpu_data->pri_sched) { 19162306a36Sopenharmony_ci if (ktime_before(ktime_get(), vgpu_data->pri_time)) { 19262306a36Sopenharmony_ci vgpu = vgpu_data->vgpu; 19362306a36Sopenharmony_ci break; 19462306a36Sopenharmony_ci } else 19562306a36Sopenharmony_ci vgpu_data->pri_sched = false; 19662306a36Sopenharmony_ci } 19762306a36Sopenharmony_ci 19862306a36Sopenharmony_ci /* Return the vGPU only if it has time slice left */ 19962306a36Sopenharmony_ci if (vgpu_data->left_ts > 0) { 20062306a36Sopenharmony_ci vgpu = vgpu_data->vgpu; 20162306a36Sopenharmony_ci break; 20262306a36Sopenharmony_ci } 20362306a36Sopenharmony_ci } 20462306a36Sopenharmony_ci 20562306a36Sopenharmony_ci return vgpu; 20662306a36Sopenharmony_ci} 20762306a36Sopenharmony_ci 20862306a36Sopenharmony_ci/* in nanosecond */ 20962306a36Sopenharmony_ci#define GVT_DEFAULT_TIME_SLICE 1000000 21062306a36Sopenharmony_ci 21162306a36Sopenharmony_cistatic void tbs_sched_func(struct gvt_sched_data *sched_data) 21262306a36Sopenharmony_ci{ 21362306a36Sopenharmony_ci struct intel_gvt *gvt = sched_data->gvt; 21462306a36Sopenharmony_ci struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 21562306a36Sopenharmony_ci struct vgpu_sched_data *vgpu_data; 21662306a36Sopenharmony_ci struct intel_vgpu *vgpu = NULL; 21762306a36Sopenharmony_ci 21862306a36Sopenharmony_ci /* no active vgpu or has already had a target */ 21962306a36Sopenharmony_ci if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu) 22062306a36Sopenharmony_ci goto out; 22162306a36Sopenharmony_ci 22262306a36Sopenharmony_ci vgpu = find_busy_vgpu(sched_data); 22362306a36Sopenharmony_ci if (vgpu) { 22462306a36Sopenharmony_ci scheduler->next_vgpu = vgpu; 22562306a36Sopenharmony_ci vgpu_data = vgpu->sched_data; 22662306a36Sopenharmony_ci if (!vgpu_data->pri_sched) { 22762306a36Sopenharmony_ci /* Move the last used vGPU to the tail of lru_list */ 22862306a36Sopenharmony_ci list_del_init(&vgpu_data->lru_list); 22962306a36Sopenharmony_ci list_add_tail(&vgpu_data->lru_list, 23062306a36Sopenharmony_ci &sched_data->lru_runq_head); 23162306a36Sopenharmony_ci } 23262306a36Sopenharmony_ci } else { 23362306a36Sopenharmony_ci scheduler->next_vgpu = gvt->idle_vgpu; 23462306a36Sopenharmony_ci } 23562306a36Sopenharmony_ciout: 23662306a36Sopenharmony_ci if (scheduler->next_vgpu) 23762306a36Sopenharmony_ci try_to_schedule_next_vgpu(gvt); 23862306a36Sopenharmony_ci} 23962306a36Sopenharmony_ci 24062306a36Sopenharmony_civoid intel_gvt_schedule(struct intel_gvt *gvt) 24162306a36Sopenharmony_ci{ 24262306a36Sopenharmony_ci struct gvt_sched_data *sched_data = gvt->scheduler.sched_data; 24362306a36Sopenharmony_ci ktime_t cur_time; 24462306a36Sopenharmony_ci 24562306a36Sopenharmony_ci mutex_lock(&gvt->sched_lock); 24662306a36Sopenharmony_ci cur_time = ktime_get(); 24762306a36Sopenharmony_ci 24862306a36Sopenharmony_ci if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED, 24962306a36Sopenharmony_ci (void *)&gvt->service_request)) { 25062306a36Sopenharmony_ci if (cur_time >= sched_data->expire_time) { 25162306a36Sopenharmony_ci gvt_balance_timeslice(sched_data); 25262306a36Sopenharmony_ci sched_data->expire_time = ktime_add_ms( 25362306a36Sopenharmony_ci cur_time, GVT_TS_BALANCE_PERIOD_MS); 25462306a36Sopenharmony_ci } 25562306a36Sopenharmony_ci } 25662306a36Sopenharmony_ci clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request); 25762306a36Sopenharmony_ci 25862306a36Sopenharmony_ci vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time); 25962306a36Sopenharmony_ci tbs_sched_func(sched_data); 26062306a36Sopenharmony_ci 26162306a36Sopenharmony_ci mutex_unlock(&gvt->sched_lock); 26262306a36Sopenharmony_ci} 26362306a36Sopenharmony_ci 26462306a36Sopenharmony_cistatic enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data) 26562306a36Sopenharmony_ci{ 26662306a36Sopenharmony_ci struct gvt_sched_data *data; 26762306a36Sopenharmony_ci 26862306a36Sopenharmony_ci data = container_of(timer_data, struct gvt_sched_data, timer); 26962306a36Sopenharmony_ci 27062306a36Sopenharmony_ci intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED); 27162306a36Sopenharmony_ci 27262306a36Sopenharmony_ci hrtimer_add_expires_ns(&data->timer, data->period); 27362306a36Sopenharmony_ci 27462306a36Sopenharmony_ci return HRTIMER_RESTART; 27562306a36Sopenharmony_ci} 27662306a36Sopenharmony_ci 27762306a36Sopenharmony_cistatic int tbs_sched_init(struct intel_gvt *gvt) 27862306a36Sopenharmony_ci{ 27962306a36Sopenharmony_ci struct intel_gvt_workload_scheduler *scheduler = 28062306a36Sopenharmony_ci &gvt->scheduler; 28162306a36Sopenharmony_ci 28262306a36Sopenharmony_ci struct gvt_sched_data *data; 28362306a36Sopenharmony_ci 28462306a36Sopenharmony_ci data = kzalloc(sizeof(*data), GFP_KERNEL); 28562306a36Sopenharmony_ci if (!data) 28662306a36Sopenharmony_ci return -ENOMEM; 28762306a36Sopenharmony_ci 28862306a36Sopenharmony_ci INIT_LIST_HEAD(&data->lru_runq_head); 28962306a36Sopenharmony_ci hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 29062306a36Sopenharmony_ci data->timer.function = tbs_timer_fn; 29162306a36Sopenharmony_ci data->period = GVT_DEFAULT_TIME_SLICE; 29262306a36Sopenharmony_ci data->gvt = gvt; 29362306a36Sopenharmony_ci 29462306a36Sopenharmony_ci scheduler->sched_data = data; 29562306a36Sopenharmony_ci 29662306a36Sopenharmony_ci return 0; 29762306a36Sopenharmony_ci} 29862306a36Sopenharmony_ci 29962306a36Sopenharmony_cistatic void tbs_sched_clean(struct intel_gvt *gvt) 30062306a36Sopenharmony_ci{ 30162306a36Sopenharmony_ci struct intel_gvt_workload_scheduler *scheduler = 30262306a36Sopenharmony_ci &gvt->scheduler; 30362306a36Sopenharmony_ci struct gvt_sched_data *data = scheduler->sched_data; 30462306a36Sopenharmony_ci 30562306a36Sopenharmony_ci hrtimer_cancel(&data->timer); 30662306a36Sopenharmony_ci 30762306a36Sopenharmony_ci kfree(data); 30862306a36Sopenharmony_ci scheduler->sched_data = NULL; 30962306a36Sopenharmony_ci} 31062306a36Sopenharmony_ci 31162306a36Sopenharmony_cistatic int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) 31262306a36Sopenharmony_ci{ 31362306a36Sopenharmony_ci struct vgpu_sched_data *data; 31462306a36Sopenharmony_ci 31562306a36Sopenharmony_ci data = kzalloc(sizeof(*data), GFP_KERNEL); 31662306a36Sopenharmony_ci if (!data) 31762306a36Sopenharmony_ci return -ENOMEM; 31862306a36Sopenharmony_ci 31962306a36Sopenharmony_ci data->sched_ctl.weight = vgpu->sched_ctl.weight; 32062306a36Sopenharmony_ci data->vgpu = vgpu; 32162306a36Sopenharmony_ci INIT_LIST_HEAD(&data->lru_list); 32262306a36Sopenharmony_ci 32362306a36Sopenharmony_ci vgpu->sched_data = data; 32462306a36Sopenharmony_ci 32562306a36Sopenharmony_ci return 0; 32662306a36Sopenharmony_ci} 32762306a36Sopenharmony_ci 32862306a36Sopenharmony_cistatic void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) 32962306a36Sopenharmony_ci{ 33062306a36Sopenharmony_ci struct intel_gvt *gvt = vgpu->gvt; 33162306a36Sopenharmony_ci struct gvt_sched_data *sched_data = gvt->scheduler.sched_data; 33262306a36Sopenharmony_ci 33362306a36Sopenharmony_ci kfree(vgpu->sched_data); 33462306a36Sopenharmony_ci vgpu->sched_data = NULL; 33562306a36Sopenharmony_ci 33662306a36Sopenharmony_ci /* this vgpu id has been removed */ 33762306a36Sopenharmony_ci if (idr_is_empty(&gvt->vgpu_idr)) 33862306a36Sopenharmony_ci hrtimer_cancel(&sched_data->timer); 33962306a36Sopenharmony_ci} 34062306a36Sopenharmony_ci 34162306a36Sopenharmony_cistatic void tbs_sched_start_schedule(struct intel_vgpu *vgpu) 34262306a36Sopenharmony_ci{ 34362306a36Sopenharmony_ci struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; 34462306a36Sopenharmony_ci struct vgpu_sched_data *vgpu_data = vgpu->sched_data; 34562306a36Sopenharmony_ci ktime_t now; 34662306a36Sopenharmony_ci 34762306a36Sopenharmony_ci if (!list_empty(&vgpu_data->lru_list)) 34862306a36Sopenharmony_ci return; 34962306a36Sopenharmony_ci 35062306a36Sopenharmony_ci now = ktime_get(); 35162306a36Sopenharmony_ci vgpu_data->pri_time = ktime_add(now, 35262306a36Sopenharmony_ci ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0)); 35362306a36Sopenharmony_ci vgpu_data->pri_sched = true; 35462306a36Sopenharmony_ci 35562306a36Sopenharmony_ci list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head); 35662306a36Sopenharmony_ci 35762306a36Sopenharmony_ci if (!hrtimer_active(&sched_data->timer)) 35862306a36Sopenharmony_ci hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), 35962306a36Sopenharmony_ci sched_data->period), HRTIMER_MODE_ABS); 36062306a36Sopenharmony_ci vgpu_data->active = true; 36162306a36Sopenharmony_ci} 36262306a36Sopenharmony_ci 36362306a36Sopenharmony_cistatic void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) 36462306a36Sopenharmony_ci{ 36562306a36Sopenharmony_ci struct vgpu_sched_data *vgpu_data = vgpu->sched_data; 36662306a36Sopenharmony_ci 36762306a36Sopenharmony_ci list_del_init(&vgpu_data->lru_list); 36862306a36Sopenharmony_ci vgpu_data->active = false; 36962306a36Sopenharmony_ci} 37062306a36Sopenharmony_ci 37162306a36Sopenharmony_cistatic const struct intel_gvt_sched_policy_ops tbs_schedule_ops = { 37262306a36Sopenharmony_ci .init = tbs_sched_init, 37362306a36Sopenharmony_ci .clean = tbs_sched_clean, 37462306a36Sopenharmony_ci .init_vgpu = tbs_sched_init_vgpu, 37562306a36Sopenharmony_ci .clean_vgpu = tbs_sched_clean_vgpu, 37662306a36Sopenharmony_ci .start_schedule = tbs_sched_start_schedule, 37762306a36Sopenharmony_ci .stop_schedule = tbs_sched_stop_schedule, 37862306a36Sopenharmony_ci}; 37962306a36Sopenharmony_ci 38062306a36Sopenharmony_ciint intel_gvt_init_sched_policy(struct intel_gvt *gvt) 38162306a36Sopenharmony_ci{ 38262306a36Sopenharmony_ci int ret; 38362306a36Sopenharmony_ci 38462306a36Sopenharmony_ci mutex_lock(&gvt->sched_lock); 38562306a36Sopenharmony_ci gvt->scheduler.sched_ops = &tbs_schedule_ops; 38662306a36Sopenharmony_ci ret = gvt->scheduler.sched_ops->init(gvt); 38762306a36Sopenharmony_ci mutex_unlock(&gvt->sched_lock); 38862306a36Sopenharmony_ci 38962306a36Sopenharmony_ci return ret; 39062306a36Sopenharmony_ci} 39162306a36Sopenharmony_ci 39262306a36Sopenharmony_civoid intel_gvt_clean_sched_policy(struct intel_gvt *gvt) 39362306a36Sopenharmony_ci{ 39462306a36Sopenharmony_ci mutex_lock(&gvt->sched_lock); 39562306a36Sopenharmony_ci gvt->scheduler.sched_ops->clean(gvt); 39662306a36Sopenharmony_ci mutex_unlock(&gvt->sched_lock); 39762306a36Sopenharmony_ci} 39862306a36Sopenharmony_ci 39962306a36Sopenharmony_ci/* for per-vgpu scheduler policy, there are 2 per-vgpu data: 40062306a36Sopenharmony_ci * sched_data, and sched_ctl. We see these 2 data as part of 40162306a36Sopenharmony_ci * the global scheduler which are proteced by gvt->sched_lock. 40262306a36Sopenharmony_ci * Caller should make their decision if the vgpu_lock should 40362306a36Sopenharmony_ci * be hold outside. 40462306a36Sopenharmony_ci */ 40562306a36Sopenharmony_ci 40662306a36Sopenharmony_ciint intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu) 40762306a36Sopenharmony_ci{ 40862306a36Sopenharmony_ci int ret; 40962306a36Sopenharmony_ci 41062306a36Sopenharmony_ci mutex_lock(&vgpu->gvt->sched_lock); 41162306a36Sopenharmony_ci ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu); 41262306a36Sopenharmony_ci mutex_unlock(&vgpu->gvt->sched_lock); 41362306a36Sopenharmony_ci 41462306a36Sopenharmony_ci return ret; 41562306a36Sopenharmony_ci} 41662306a36Sopenharmony_ci 41762306a36Sopenharmony_civoid intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu) 41862306a36Sopenharmony_ci{ 41962306a36Sopenharmony_ci mutex_lock(&vgpu->gvt->sched_lock); 42062306a36Sopenharmony_ci vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu); 42162306a36Sopenharmony_ci mutex_unlock(&vgpu->gvt->sched_lock); 42262306a36Sopenharmony_ci} 42362306a36Sopenharmony_ci 42462306a36Sopenharmony_civoid intel_vgpu_start_schedule(struct intel_vgpu *vgpu) 42562306a36Sopenharmony_ci{ 42662306a36Sopenharmony_ci struct vgpu_sched_data *vgpu_data = vgpu->sched_data; 42762306a36Sopenharmony_ci 42862306a36Sopenharmony_ci mutex_lock(&vgpu->gvt->sched_lock); 42962306a36Sopenharmony_ci if (!vgpu_data->active) { 43062306a36Sopenharmony_ci gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id); 43162306a36Sopenharmony_ci vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); 43262306a36Sopenharmony_ci } 43362306a36Sopenharmony_ci mutex_unlock(&vgpu->gvt->sched_lock); 43462306a36Sopenharmony_ci} 43562306a36Sopenharmony_ci 43662306a36Sopenharmony_civoid intel_gvt_kick_schedule(struct intel_gvt *gvt) 43762306a36Sopenharmony_ci{ 43862306a36Sopenharmony_ci mutex_lock(&gvt->sched_lock); 43962306a36Sopenharmony_ci intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED); 44062306a36Sopenharmony_ci mutex_unlock(&gvt->sched_lock); 44162306a36Sopenharmony_ci} 44262306a36Sopenharmony_ci 44362306a36Sopenharmony_civoid intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) 44462306a36Sopenharmony_ci{ 44562306a36Sopenharmony_ci struct intel_gvt_workload_scheduler *scheduler = 44662306a36Sopenharmony_ci &vgpu->gvt->scheduler; 44762306a36Sopenharmony_ci struct vgpu_sched_data *vgpu_data = vgpu->sched_data; 44862306a36Sopenharmony_ci struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; 44962306a36Sopenharmony_ci struct intel_engine_cs *engine; 45062306a36Sopenharmony_ci enum intel_engine_id id; 45162306a36Sopenharmony_ci 45262306a36Sopenharmony_ci if (!vgpu_data->active) 45362306a36Sopenharmony_ci return; 45462306a36Sopenharmony_ci 45562306a36Sopenharmony_ci gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); 45662306a36Sopenharmony_ci 45762306a36Sopenharmony_ci mutex_lock(&vgpu->gvt->sched_lock); 45862306a36Sopenharmony_ci scheduler->sched_ops->stop_schedule(vgpu); 45962306a36Sopenharmony_ci 46062306a36Sopenharmony_ci if (scheduler->next_vgpu == vgpu) 46162306a36Sopenharmony_ci scheduler->next_vgpu = NULL; 46262306a36Sopenharmony_ci 46362306a36Sopenharmony_ci if (scheduler->current_vgpu == vgpu) { 46462306a36Sopenharmony_ci /* stop workload dispatching */ 46562306a36Sopenharmony_ci scheduler->need_reschedule = true; 46662306a36Sopenharmony_ci scheduler->current_vgpu = NULL; 46762306a36Sopenharmony_ci } 46862306a36Sopenharmony_ci 46962306a36Sopenharmony_ci intel_runtime_pm_get(&dev_priv->runtime_pm); 47062306a36Sopenharmony_ci spin_lock_bh(&scheduler->mmio_context_lock); 47162306a36Sopenharmony_ci for_each_engine(engine, vgpu->gvt->gt, id) { 47262306a36Sopenharmony_ci if (scheduler->engine_owner[engine->id] == vgpu) { 47362306a36Sopenharmony_ci intel_gvt_switch_mmio(vgpu, NULL, engine); 47462306a36Sopenharmony_ci scheduler->engine_owner[engine->id] = NULL; 47562306a36Sopenharmony_ci } 47662306a36Sopenharmony_ci } 47762306a36Sopenharmony_ci spin_unlock_bh(&scheduler->mmio_context_lock); 47862306a36Sopenharmony_ci intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); 47962306a36Sopenharmony_ci mutex_unlock(&vgpu->gvt->sched_lock); 48062306a36Sopenharmony_ci} 481