/* * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Kevin Tian <kevin.tian@intel.com> * Eddie Dong <eddie.dong@intel.com> * * Contributors: * Niu Bing <bing.niu@intel.com> * Zhi Wang <zhi.a.wang@intel.com> *
*/
/* Both sched_data and sched_ctl can be seen a part of the global gvt * scheduler structure. So below 2 vgpu data are protected * by sched_lock, not vgpu_lock.
*/ void *sched_data; struct vgpu_sched_ctl sched_ctl;
struct intel_vgpu_fence fence; struct intel_vgpu_gm gm; struct intel_vgpu_cfg_space cfg_space; struct intel_vgpu_mmio mmio; struct intel_vgpu_irq irq; struct intel_vgpu_gtt gtt; struct intel_vgpu_opregion opregion; struct intel_vgpu_display display; struct intel_vgpu_submission submission; struct radix_tree_root page_track_tree;
u32 hws_pga[I915_NUM_ENGINES]; /* Set on PCI_D3, reset on DMLR, not reflecting the actual PM state */ bool d3_entered;
struct vfio_region *region; int num_regions; struct eventfd_ctx *msi_trigger;
/* * Two caches are used to avoid mapping duplicated pages (eg. * scratch pages). This help to reduce dma setup overhead.
*/ struct rb_root gfn_cache; struct rb_root dma_addr_cache; unsignedlong nr_cache_entries; struct mutex cache_lock;
struct intel_gvt_mmio {
u16 *mmio_attribute; /* Register contains RO bits */ #define F_RO (1 << 0) /* Register contains graphics address */ #define F_GMADR (1 << 1) /* Mode mask registers with high 16 bits as the mask bits */ #define F_MODE_MASK (1 << 2) /* This reg can be accessed by GPU commands */ #define F_CMD_ACCESS (1 << 3) /* This reg has been accessed by a VM */ #define F_ACCESSED (1 << 4) /* This reg requires save & restore during host PM suspend/resume */ #define F_PM_SAVE (1 << 5) /* This reg could be accessed by unaligned address */ #define F_UNALIGN (1 << 6) /* This reg is in GVT's mmio save-restor list and in hardware * logical context image
*/ #define F_SR_IN_CTX (1 << 7) /* Value of command write of this reg needs to be patched */ #define F_CMD_WRITE_PATCH (1 << 8)
/* * A vGPU with a weight of 8 will get twice as much GPU as a vGPU with * a weight of 4 on a contended host, different vGPU type has different * weight set. Legal weights range from 1 to 16.
*/ unsignedint weight; enum intel_vgpu_edid edid; constchar *name;
};
struct intel_gvt { /* GVT scope lock, protect GVT itself, and all resource currently * not yet protected by special locks(vgpu and scheduler lock).
*/ struct mutex lock; /* scheduler scope lock, protect gvt and vgpu schedule related data */ struct mutex sched_lock;
/* service_request is always used in bit operation, we should always * use it with atomic bit ops so that no need to use gvt big lock.
*/ unsignedlong service_request;
/** * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed * @gvt: a GVT device * @offset: register offset *
*/ staticinlinevoid intel_gvt_mmio_set_accessed( struct intel_gvt *gvt, unsignedint offset)
{
gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED;
}
/** * intel_gvt_mmio_is_cmd_accessible - if a MMIO could be accessed by command * @gvt: a GVT device * @offset: register offset * * Returns: * True if an MMIO is able to be accessed by GPU commands
*/ staticinlinebool intel_gvt_mmio_is_cmd_accessible( struct intel_gvt *gvt, unsignedint offset)
{ return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
}
/** * intel_gvt_mmio_set_cmd_accessible - * mark a MMIO could be accessible by command * @gvt: a GVT device * @offset: register offset *
*/ staticinlinevoid intel_gvt_mmio_set_cmd_accessible( struct intel_gvt *gvt, unsignedint offset)
{
gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESS;
}
/** * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned * @gvt: a GVT device * @offset: register offset *
*/ staticinlinebool intel_gvt_mmio_is_unalign( struct intel_gvt *gvt, unsignedint offset)
{ return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
}
/** * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask * @gvt: a GVT device * @offset: register offset * * Returns: * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't. *
*/ staticinlinebool intel_gvt_mmio_has_mode_mask( struct intel_gvt *gvt, unsignedint offset)
{ return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
}
/** * intel_gvt_mmio_is_sr_in_ctx - * check if an MMIO has F_SR_IN_CTX mask * @gvt: a GVT device * @offset: register offset * * Returns: * True if an MMIO has an F_SR_IN_CTX mask, false if it isn't. *
*/ staticinlinebool intel_gvt_mmio_is_sr_in_ctx( struct intel_gvt *gvt, unsignedint offset)
{ return gvt->mmio.mmio_attribute[offset >> 2] & F_SR_IN_CTX;
}
/** * intel_gvt_mmio_set_sr_in_ctx - * mask an MMIO in GVT's mmio save-restore list and also * in hardware logical context image * @gvt: a GVT device * @offset: register offset *
*/ staticinlinevoid intel_gvt_mmio_set_sr_in_ctx( struct intel_gvt *gvt, unsignedint offset)
{
gvt->mmio.mmio_attribute[offset >> 2] |= F_SR_IN_CTX;
}
void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu); /** * intel_gvt_mmio_set_cmd_write_patch - * mark an MMIO if its cmd write needs to be * patched * @gvt: a GVT device * @offset: register offset *
*/ staticinlinevoid intel_gvt_mmio_set_cmd_write_patch( struct intel_gvt *gvt, unsignedint offset)
{
gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_WRITE_PATCH;
}
/** * intel_gvt_mmio_is_cmd_write_patch - check if an mmio's cmd access needs to * be patched * @gvt: a GVT device * @offset: register offset * * Returns: * True if GPU command write to an MMIO should be patched.
*/ staticinlinebool intel_gvt_mmio_is_cmd_write_patch( struct intel_gvt *gvt, unsignedint offset)
{ return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH;
}
/** * intel_gvt_read_gpa - copy data from GPA to host data buffer * @vgpu: a vGPU * @gpa: guest physical address * @buf: host data buffer * @len: data length * * Returns: * Zero on success, negative error code if failed.
*/ staticinlineint intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsignedlong gpa, void *buf, unsignedlong len)
{ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) return -ESRCH; return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, false);
}
/** * intel_gvt_write_gpa - copy data from host data buffer to GPA * @vgpu: a vGPU * @gpa: guest physical address * @buf: host data buffer * @len: data length * * Returns: * Zero on success, negative error code if failed.
*/ staticinlineint intel_gvt_write_gpa(struct intel_vgpu *vgpu, unsignedlong gpa, void *buf, unsignedlong len)
{ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) return -ESRCH; return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, true);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.