/* * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Anhua Xu * Kevin Tian <kevin.tian@intel.com> * * Contributors: * Min He <min.he@intel.com> * Bing Niu <bing.niu@intel.com> * Zhi Wang <zhi.a.wang@intel.com> *
*/
/* The timeslice accumulation reset at stage 0, which is * allocated again without adding previous debt.
*/ if (stage == 0) { int total_weight = 0;
ktime_t fair_timeslice;
/* no need to schedule if next_vgpu is the same with current_vgpu, * let scheduler chose next_vgpu again by setting it to NULL.
*/ if (scheduler->next_vgpu == scheduler->current_vgpu) {
scheduler->next_vgpu = NULL; return;
}
/* * after the flag is set, workload dispatch thread will * stop dispatching workload for current vgpu
*/
scheduler->need_reschedule = true;
/* still have uncompleted workload? */
for_each_engine(engine, gvt->gt, i) { if (scheduler->current_workload[engine->id]) return;
}
/* no active vgpu or has already had a target */ if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu) goto out;
vgpu = find_busy_vgpu(sched_data); if (vgpu) {
scheduler->next_vgpu = vgpu;
vgpu_data = vgpu->sched_data; if (!vgpu_data->pri_sched) { /* Move the last used vGPU to the tail of lru_list */
list_del_init(&vgpu_data->lru_list);
list_add_tail(&vgpu_data->lru_list,
&sched_data->lru_runq_head);
}
} else {
scheduler->next_vgpu = gvt->idle_vgpu;
}
out: if (scheduler->next_vgpu)
try_to_schedule_next_vgpu(gvt);
}
/* for per-vgpu scheduler policy, there are 2 per-vgpu data: * sched_data, and sched_ctl. We see these 2 data as part of * the global scheduler which are proteced by gvt->sched_lock. * Caller should make their decision if the vgpu_lock should * be hold outside.
*/
int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
{ int ret;
mutex_lock(&vgpu->gvt->sched_lock);
ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
mutex_unlock(&vgpu->gvt->sched_lock);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.