/** * DOC: fence register handling * * Important to avoid confusions: "fences" in the i915 driver are not execution * fences used to track command completion but hardware detiler objects which * wrap a given range of the global GTT. Each platform has only a fairly limited * set of these objects. * * Fences are used to detile GTT memory mappings. They're also connected to the * hardware frontbuffer render tracking and hence interact with frontbuffer * compression. Furthermore on older platforms fences are required for tiled * objects used by the display engine. They can also be used by the render * engine - they're required for blitter commands and are optional for render * commands. But on gen4+ both display (with the exception of fbc) and rendering * have their own tiling state bits and don't need fences. * * Also note that fences only support X and Y tiling and hence can't be used for * the fancier new tiling formats like W, Ys and Yf. * * Finally note that because fences are such a restricted resource they're * dynamically associated with objects. Furthermore fence state is committed to * the hardware lazily to avoid unnecessary stalls on gen2/3. Therefore code must * explicitly call i915_gem_object_get_fence() to synchronize fencing status * for cpu access. Also note that some code wants an unfenced view, for those * cases the fence can be removed forcefully with i915_gem_object_put_fence(). * * Internally these functions will synchronize with userspace access by removing * CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed.
*/
val = 0; if (fence->tiling) { unsignedint stride = fence->stride;
GEM_BUG_ON(!IS_ALIGNED(stride, 128));
val = fence->start + fence->size - I965_FENCE_PAGE;
val <<= 32;
val |= fence->start;
val |= (u64)((stride / 128) - 1) << fence_pitch_shift; if (fence->tiling == I915_TILING_Y)
val |= BIT(I965_FENCE_TILING_Y_SHIFT);
val |= I965_FENCE_REG_VALID;
}
if (!pipelined) { struct intel_uncore *uncore = fence_to_uncore(fence);
/* * To w/a incoherency with non-atomic 64-bit register updates, * we split the 64-bit update into two 32-bit writes. In order * for a partial fence not to be evaluated between writes, we * precede the update with write to turn off the fence register, * and only enable the fence as the last step. * * For extra levels of paranoia, we make sure each step lands * before applying the next step.
*/
intel_uncore_write_fw(uncore, fence_reg_lo, 0);
intel_uncore_posting_read_fw(uncore, fence_reg_lo);
val = fence->start; if (is_y_tiled)
val |= BIT(I830_FENCE_TILING_Y_SHIFT);
val |= I915_FENCE_SIZE_BITS(fence->size);
val |= ilog2(stride) << I830_FENCE_PITCH_SHIFT;
val = 0; if (fence->tiling) { unsignedint stride = fence->stride;
val = fence->start; if (fence->tiling == I915_TILING_Y)
val |= BIT(I830_FENCE_TILING_Y_SHIFT);
val |= I830_FENCE_SIZE_BITS(fence->size);
val |= ilog2(stride / 128) << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
}
/* * Previous access through the fence register is marshalled by * the mb() inside the fault handlers (i915_gem_release_mmaps) * and explicitly managed for internal users.
*/
old = xchg(&fence->vma, NULL); if (old) { /* XXX Ideally we would move the waiting to outside the mutex */
ret = i915_active_wait(&fence->active); if (ret) {
fence->vma = old; return ret;
}
i915_vma_flush_writes(old);
/* * Ensure that all userspace CPU access is completed before * stealing the fence.
*/ if (old != vma) {
GEM_BUG_ON(old->fence != fence);
i915_vma_revoke_mmap(old);
old->fence = NULL;
}
list_move(&fence->link, &ggtt->fence_list);
}
/* * We only need to update the register itself if the device is awake. * If the device is currently powered down, we will defer the write * to the runtime resume, see intel_ggtt_restore_fences(). * * This only works for removing the fence register, on acquisition * the caller must hold the rpm wakeref. The fence register must * be cleared before we can use any other fences to ensure that * the new fences do not overlap the elided clears, confusing HW.
*/
wakeref = intel_runtime_pm_get_if_in_use(uncore->rpm); if (!wakeref) {
GEM_BUG_ON(vma); return 0;
}
WRITE_ONCE(fence->vma, vma);
fence_write(fence);
if (vma) {
vma->fence = fence;
list_move_tail(&fence->link, &ggtt->fence_list);
}
/** * i915_vma_revoke_fence - force-remove fence for a VMA * @vma: vma to map linearly (not through a fence reg) * * This function force-removes any fence from the given object, which is useful * if the kernel wants to do untiled GTT access.
*/ void i915_vma_revoke_fence(struct i915_vma *vma)
{ struct i915_fence_reg *fence = vma->fence;
intel_wakeref_t wakeref;
lockdep_assert_held(&vma->vm->mutex); if (!fence) return;
/* * Skip the write to HW if and only if the device is currently * suspended. * * If the driver does not currently hold a wakeref (if_in_use == 0), * the device may currently be runtime suspended, or it may be woken * up before the suspend takes place. If the device is not suspended * (powered down) and we skip clearing the fence register, the HW is * left in an undefined state where we may end up with multiple * registers overlapping.
*/
with_intel_runtime_pm_if_active(fence_to_uncore(fence)->rpm, wakeref)
fence_write(fence);
}
/* Just update our place in the LRU if our fence is getting reused. */ if (vma->fence) {
fence = vma->fence;
GEM_BUG_ON(fence->vma != vma);
atomic_inc(&fence->pin_count); if (!fence->dirty) {
list_move_tail(&fence->link, &ggtt->fence_list); return 0;
}
} elseif (set) {
fence = fence_find(ggtt); if (IS_ERR(fence)) return PTR_ERR(fence);
/** * i915_vma_pin_fence - set up fencing for a vma * @vma: vma to map through a fence reg * * When mapping objects through the GTT, userspace wants to be able to write * to them without having to worry about swizzling if the object is tiled. * This function walks the fence regs looking for a free one for @obj, * stealing one if it can't find any. * * It then sets up the reg based on the object's properties: address, pitch * and tiling format. * * For an untiled surface, this removes any existing fence. * * Returns: * 0 on success, negative error code on failure.
*/ int i915_vma_pin_fence(struct i915_vma *vma)
{ int err;
if (!vma->fence && !i915_gem_object_is_tiled(vma->obj)) return 0;
/* * Note that we revoke fences on runtime suspend. Therefore the user * must keep the device awake whilst using the fence.
*/
assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm);
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
err = mutex_lock_interruptible(&vma->vm->mutex); if (err) return err;
/** * i915_reserve_fence - Reserve a fence for vGPU * @ggtt: Global GTT * * This function walks the fence regs looking for a free one and remove * it from the fence_list. It is used to reserve fence for vGPU to use.
*/ struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt)
{ struct i915_fence_reg *fence; int count; int ret;
lockdep_assert_held(&ggtt->vm.mutex);
/* Keep at least one fence available for the display engine. */
count = 0;
list_for_each_entry(fence, &ggtt->fence_list, link)
count += !atomic_read(&fence->pin_count); if (count <= 1) return ERR_PTR(-ENOSPC);
fence = fence_find(ggtt); if (IS_ERR(fence)) return fence;
if (fence->vma) { /* Force-remove fence from VMA */
ret = fence_update(fence, NULL); if (ret) return ERR_PTR(ret);
}
list_del(&fence->link);
return fence;
}
/** * i915_unreserve_fence - Reclaim a reserved fence * @fence: the fence reg * * This function add a reserved fence register from vGPU to the fence_list.
*/ void i915_unreserve_fence(struct i915_fence_reg *fence)
{ struct i915_ggtt *ggtt = fence->ggtt;
lockdep_assert_held(&ggtt->vm.mutex);
list_add(&fence->link, &ggtt->fence_list);
}
/** * intel_ggtt_restore_fences - restore fence state * @ggtt: Global GTT * * Restore the hw fence state to match the software tracking again, to be called * after a gpu reset and on resume. Note that on runtime suspend we only cancel * the fences, to be reacquired by the user later.
*/ void intel_ggtt_restore_fences(struct i915_ggtt *ggtt)
{ int i;
for (i = 0; i < ggtt->num_fences; i++)
fence_write(&ggtt->fence_regs[i]);
}
/** * DOC: tiling swizzling details * * The idea behind tiling is to increase cache hit rates by rearranging * pixel data so that a group of pixel accesses are in the same cacheline. * Performance improvement from doing this on the back/depth buffer are on * the order of 30%. * * Intel architectures make this somewhat more complicated, though, by * adjustments made to addressing of data when the memory is in interleaved * mode (matched pairs of DIMMS) to improve memory bandwidth. * For interleaved memory, the CPU sends every sequential 64 bytes * to an alternate memory channel so it can get the bandwidth from both. * * The GPU also rearranges its accesses for increased bandwidth to interleaved * memory, and it matches what the CPU does for non-tiled. However, when tiled * it does it a little differently, since one walks addresses not just in the * X direction but also Y. So, along with alternating channels when bit * 6 of the address flips, it also alternates when other bits flip -- Bits 9 * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines) * are common to both the 915 and 965-class hardware. * * The CPU also sometimes XORs in higher bits as well, to improve * bandwidth doing strided access like we do so frequently in graphics. This * is called "Channel XOR Randomization" in the MCH documentation. The result * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address * decode. * * All of this bit 6 XORing has an effect on our memory management, * as we need to make sure that the 3d driver can correctly address object * contents. * * If we don't have interleaved memory, all tiling is safe and no swizzling is * required. * * When bit 17 is XORed in, we simply refuse to tile at all. Bit * 17 is not just a page offset, so as we page an object out and back in, * individual pages in it will have different bit 17 addresses, resulting in * each 64 bytes being swapped with its neighbor! * * Otherwise, if interleaved, we have to tell the 3d driver what the address * swizzling it needs to do is, since it's writing with the CPU to the pages * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order * to match what the GPU expects.
*/
/** * detect_bit_6_swizzle - detect bit 6 swizzling pattern * @ggtt: Global GGTT * * Detects bit 6 swizzling of address lookup between IGD access and CPU * access through main memory.
*/ staticvoid detect_bit_6_swizzle(struct i915_ggtt *ggtt)
{ struct intel_uncore *uncore = ggtt->vm.gt->uncore; struct drm_i915_private *i915 = ggtt->vm.i915;
u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
if (GRAPHICS_VER(i915) >= 8 || IS_VALLEYVIEW(i915)) { /* * On BDW+, swizzling is not used. We leave the CPU memory * controller in charge of optimizing memory accesses without * the extra address manipulation GPU side. * * VLV and CHV don't have GPU swizzling.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} elseif (GRAPHICS_VER(i915) >= 6) { if (i915->preserve_bios_swizzle) { if (intel_uncore_read(uncore, DISP_ARB_CTL) &
DISP_TILE_SURFACE_SWIZZLING) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
} else {
u32 dimm_c0, dimm_c1;
dimm_c0 = intel_uncore_read(uncore, MAD_DIMM_C0);
dimm_c1 = intel_uncore_read(uncore, MAD_DIMM_C1);
dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; /* * Enable swizzling when the channels are populated * with identically sized dimms. We don't need to check * the 3rd channel because no cpu with gpu attached * ships in that configuration. Also, swizzling only * makes sense for 2 channels anyway.
*/ if (dimm_c0 == dimm_c1) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
}
} elseif (GRAPHICS_VER(i915) == 5) { /* * On Ironlake whatever DRAM config, GPU always do * same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} elseif (GRAPHICS_VER(i915) == 2) { /* * As far as we know, the 865 doesn't have these bit 6 * swizzling issues.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} elseif (IS_G45(i915) || IS_I965G(i915) || IS_G33(i915)) { /* * The 965, G33, and newer, have a very flexible memory * configuration. It will enable dual-channel mode * (interleaving) on as much memory as it can, and the GPU * will additionally sometimes enable different bit 6 * swizzling for tiled objects from the CPU. * * Here's what I found on the G965: * slot fill memory size swizzling * 0A 0B 1A 1B 1-ch 2-ch * 512 0 0 0 512 0 O * 512 0 512 0 16 1008 X * 512 0 0 512 16 1008 X * 0 512 0 512 16 1008 X * 1024 1024 1024 0 2048 1024 O * * We could probably detect this based on either the DRB * matching, which was the case for the swizzling required in * the table above, or from the 1-ch value being less than * the minimum size of a rank. * * Reports indicate that the swizzling actually * varies depending upon page placement inside the * channels, i.e. we see swizzled pages where the * banks of memory are paired and unswizzled on the * uneven portion, so leave that as unknown.
*/ if (intel_uncore_read16(uncore, C0DRB3_BW) ==
intel_uncore_read16(uncore, C1DRB3_BW)) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
}
} else {
u32 dcc = intel_uncore_read(uncore, DCC);
/* * On 9xx chipsets, channel interleave by the CPU is * determined by DCC. For single-channel, neither the CPU * nor the GPU do swizzling. For dual channel interleaved, * the GPU's interleave is bit 9 and 10 for X tiled, and bit * 9 for Y tiled. The CPU's interleave is independent, and * can be based on either bit 11 (haven't seen this yet) or * bit 17 (common).
*/ switch (dcc & DCC_ADDRESSING_MODE_MASK) { case DCC_ADDRESSING_MODE_SINGLE_CHANNEL: case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE; break; case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: if (dcc & DCC_CHANNEL_XOR_DISABLE) { /* * This is the base swizzling by the GPU for * tiled buffers.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} elseif ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { /* Bit 11 swizzling by the CPU in addition. */
swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
swizzle_y = I915_BIT_6_SWIZZLE_9_11;
} else { /* Bit 17 swizzling by the CPU in addition. */
swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
swizzle_y = I915_BIT_6_SWIZZLE_9_17;
} break;
}
if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) { /* * Userspace likes to explode if it sees unknown swizzling, * so lie. We will finish the lie when reporting through * the get-tiling-ioctl by reporting the physical swizzle * mode as unknown instead. * * As we don't strictly know what the swizzling is, it may be * bit17 dependent, and so we need to also prevent the pages * from being moved.
*/
i915->gem_quirks |= GEM_QUIRK_PIN_SWIZZLED_PAGES;
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
/* * Swap every 64 bytes of this page around, to account for it having a new * bit 17 of its physical address and therefore being interpreted differently * by the GPU.
*/ staticvoid swizzle_page(struct page *page)
{ char temp[64]; char *vaddr; int i;
vaddr = kmap_local_page(page);
for (i = 0; i < PAGE_SIZE; i += 128) {
memcpy(temp, &vaddr[i], 64);
memcpy(&vaddr[i], &vaddr[i + 64], 64);
memcpy(&vaddr[i + 64], temp, 64);
}
kunmap_local(vaddr);
}
/** * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling * @obj: i915 GEM buffer object * @pages: the scattergather list of physical pages * * This function fixes up the swizzling in case any page frame number for this * object has changed in bit 17 since that state has been saved with * i915_gem_object_save_bit_17_swizzle(). * * This is called when pinning backing storage again, since the kernel is free * to move unpinned backing storage around (either by directly moving pages or * by swapping them out and back in again).
*/ void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj, struct sg_table *pages)
{ struct sgt_iter sgt_iter; struct page *page; int i;
/** * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling * @obj: i915 GEM buffer object * @pages: the scattergather list of physical pages * * This function saves the bit 17 of each page frame number so that swizzling * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must * be called before the backing storage can be unpinned.
*/ void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, struct sg_table *pages)
{ constunsignedint page_count = obj->base.size >> PAGE_SHIFT; struct sgt_iter sgt_iter; struct page *page; int i;
if (obj->bit_17 == NULL) {
obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL); if (obj->bit_17 == NULL) {
drm_err(obj->base.dev, "Failed to allocate memory for bit 17 record\n"); return;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.