/* * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Eddie Dong <eddie.dong@intel.com> * Kevin Tian <kevin.tian@intel.com> * * Contributors: * Zhi Wang <zhi.a.wang@intel.com> * Changbin Du <changbin.du@intel.com> * Zhenyu Wang <zhenyuw@linux.intel.com> * Tina Zhang <tina.zhang@intel.com> * Bing Niu <bing.niu@intel.com> *
*/
/* * Use lri command to initialize the mmio which is in context state image for * inhibit context, it contains tracked engine mmio, render_mocs and * render_mocs_l3cc.
*/ int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu, struct i915_request *req)
{ int ret;
u32 *cs;
cs = intel_ring_begin(req, 2); if (IS_ERR(cs)) return PTR_ERR(cs);
if (drm_WARN_ON(&engine->i915->drm, engine->id >= cnt)) return;
if (!test_and_clear_bit(engine->id, (void *)s->tlb_handle_pending)) return;
reg = _MMIO(regs[engine->id]);
/* WaForceWakeRenderDuringMmioTLBInvalidate:skl * we need to put a forcewake when invalidating RCS TLB caches, * otherwise device can go to RC6 state and interrupt invalidation * process
*/
fw = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ | FW_REG_WRITE); if (engine->id == RCS0 && GRAPHICS_VER(engine->i915) >= 9)
fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(uncore, fw);
intel_uncore_write_fw(uncore, reg, 0x1);
if (wait_for_atomic(intel_uncore_read_fw(uncore, reg) == 0, 50))
gvt_vgpu_err("timeout in invalidate ring %s tlb\n",
engine->name); else
vgpu_vreg_t(vgpu, reg) = 0;
intel_uncore_forcewake_put(uncore, fw);
gvt_dbg_core("invalidate TLB for ring %s\n", engine->name);
}
if (GRAPHICS_VER(engine->i915) >= 9)
switch_mocs(pre, next, engine);
for (mmio = engine->i915->gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) { if (mmio->id != engine->id) continue; /* * No need to do save or restore of the mmio which is in context * state image on gen9, it's initialized by lri command and * save or restore with context together.
*/ if (GRAPHICS_VER(engine->i915) == 9 && mmio->in_context) continue;
// restore if (next) {
s = &next->submission; /* * No need to restore the mmio which is in context state * image if it's not inhibit context, it will restore * itself.
*/ if (mmio->in_context &&
!is_inhibit_context(s->shadow[engine->id])) continue;
if (next)
handle_tlb_pending_event(next, engine);
}
/** * intel_gvt_switch_mmio - switch mmio context of specific engine * @pre: the last vGPU that own the engine * @next: the vGPU to switch to * @engine: the engine * * If pre is null indicates that host own the engine. If next is null * indicates that we are switching to host workload.
*/ void intel_gvt_switch_mmio(struct intel_vgpu *pre, struct intel_vgpu *next, conststruct intel_engine_cs *engine)
{ if (WARN(!pre && !next, "switch ring %s from host to HOST\n",
engine->name)) return;
gvt_dbg_render("switch ring %s from %s to %s\n", engine->name,
pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
/** * We are using raw mmio access wrapper to improve the * performance for batch mmio read/write, so we need * handle forcewake manually.
*/
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
switch_mmio(pre, next, engine);
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.