/* * Intel GTT (Graphics Translation Table) routines * * Caveat: This driver implements the linux agp interface, but this is far from * a agp driver! GTT support ended up here for purely historical reasons: The * old userspace intel graphics drivers needed an interface to map memory into * the GTT. And the drm provides a default interface for graphic devices sitting * on an agp port. So it made sense to fake the GTT support as an agp port to * avoid having to create a new api. * * With gem this does not make much sense anymore, just needlessly complicates * the code. But as long as the old graphics stack is still support, it's stuck * here. * * /fairy-tale-mode off
*/
/* * If we have Intel graphics, we're not going to have anything other than * an Intel IOMMU. So make the correct use of the PCI DMA API contingent * on the Intel IOMMU support (CONFIG_INTEL_IOMMU). * Only newer chipsets need to bother with this, of course.
*/ #ifdef CONFIG_INTEL_IOMMU #define USE_PCI_DMA_API 1 #else #define USE_PCI_DMA_API 0 #endif
struct intel_gtt_driver { unsignedint gen : 8; unsignedint is_g33 : 1; unsignedint is_pineview : 1; unsignedint is_ironlake : 1; unsignedint has_pgtbl_enable : 1; unsignedint dma_mask_size : 8; /* Chipset specific GTT setup */ int (*setup)(void); /* This should undo anything done in ->setup() save the unmapping
* of the mmio register file, that's done in the generic code. */ void (*cleanup)(void); void (*write_entry)(dma_addr_t addr, unsignedint entry, unsignedint flags);
dma_addr_t (*read_entry)(unsignedint entry, bool *is_present, bool *is_local); /* Flags is a more or less chipset specific opaque value. * For chipsets that need to support old ums (non-gem) code, this
* needs to be identical to the various supported agp memory types! */ bool (*check_flags)(unsignedint flags); void (*chipset_flush)(void);
};
staticstruct _intel_private { conststruct intel_gtt_driver *driver; struct pci_dev *pcidev; /* device one */ struct pci_dev *bridge_dev;
u8 __iomem *registers;
phys_addr_t gtt_phys_addr;
u32 PGETBL_save;
u32 __iomem *gtt; /* I915G */ bool clear_fake_agp; /* on first access via agp, fill with scratch */ int num_dcache_entries; void __iomem *i9xx_flush_page; char *i81x_gtt_table; struct resource ifp_resource; int resource_valid; struct page *scratch_page;
phys_addr_t scratch_page_dma; int refcount; /* Whether i915 needs to use the dmar apis or not. */ unsignedint needs_dmar : 1;
phys_addr_t gma_bus_addr; /* Size of memory reserved for graphics by the BIOS */
resource_size_t stolen_size; /* Total number of gtt entries. */ unsignedint gtt_total_entries; /* Part of the gtt that is mappable by the cpu, for those chips where
* this is not the full gtt. */ unsignedint gtt_mappable_entries;
} intel_private;
/* i81x does not preallocate the gtt. It's always 64kb in size. */
gtt_table = alloc_gatt_pages(I810_GTT_ORDER); if (gtt_table == NULL) return -ENOMEM;
intel_private.i81x_gtt_table = gtt_table;
#if IS_ENABLED(CONFIG_AGP_INTEL) staticint i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start, int type)
{ int i;
if ((pg_start + mem->page_count)
> intel_private.num_dcache_entries) return -EINVAL;
if (!mem->is_flushed)
global_cache_flush();
for (i = pg_start; i < (pg_start + mem->page_count); i++) {
dma_addr_t addr = i << PAGE_SHIFT;
intel_private.driver->write_entry(addr,
i, type);
}
wmb();
return 0;
}
/* * The i810/i830 requires a physical address to program its mouse * pointer into hardware. * However the Xserver still writes to it through the agp aperture.
*/ staticstruct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
{ struct agp_memory *new; struct page *page;
switch (pg_count) { case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge); break; case 4: /* kludge to get 4 physical pages for ARGB cursor */
page = i8xx_alloc_pages(); break; default: return NULL;
}
if (page == NULL) return NULL;
new = agp_create_memory(pg_count); if (new == NULL) return NULL;
if (INTEL_GTT_GEN == 5) { switch (gmch_ctl & G4x_GMCH_SIZE_MASK) { case G4x_GMCH_SIZE_1M: case G4x_GMCH_SIZE_VT_1M:
i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB); break; case G4x_GMCH_SIZE_VT_1_5M:
i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB); break; case G4x_GMCH_SIZE_2M: case G4x_GMCH_SIZE_VT_2M:
i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB); break;
}
}
switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { case I965_PGETBL_SIZE_128KB:
size = KB(128); break; case I965_PGETBL_SIZE_256KB:
size = KB(256); break; case I965_PGETBL_SIZE_512KB:
size = KB(512); break; /* GTT pagetable sizes bigger than 512KB are not possible on G33! */ case I965_PGETBL_SIZE_1MB:
size = KB(1024); break; case I965_PGETBL_SIZE_2MB:
size = KB(2048); break; case I965_PGETBL_SIZE_1_5MB:
size = KB(1024 + 512); break; default:
dev_info(&intel_private.pcidev->dev, "unknown page table size, assuming 512KB\n");
size = KB(512);
}
return size/4;
}
staticunsignedint intel_gtt_total_entries(void)
{ if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) return i965_gtt_total_entries(); else { /* On previous hardware, the GTT size was just what was * required to map the aperture.
*/ return intel_private.gtt_mappable_entries;
}
}
/* Certain Gen5 chipsets require require idling the GPU before * unmapping anything from the GTT when VT-d is enabled.
*/ staticinlineint needs_ilk_vtd_wa(void)
{ constunsignedshort gpu_devid = intel_private.pcidev->device;
/* * Query iommu subsystem to see if we need the workaround. Presumably * that was loaded first.
*/ return ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
device_iommu_mapped(&intel_private.pcidev->dev));
}
staticbool intel_gtt_can_wc(void)
{ if (INTEL_GTT_GEN <= 2) returnfalse;
if (INTEL_GTT_GEN >= 6) returnfalse;
/* Reports of major corruption with ILK vt'd enabled */ if (needs_ilk_vtd_wa()) returnfalse;
returntrue;
}
staticint intel_gtt_init(void)
{
u32 gtt_map_size; int ret, bar;
ret = intel_private.driver->setup(); if (ret != 0) return ret;
/* save the PGETBL reg for resume */
intel_private.PGETBL_save =
readl(intel_private.registers+I810_PGETBL_CTL)
& ~I810_PGETBL_ENABLED; /* we only ever restore the register when enabling the PGTBL... */ if (HAS_PGTBL_EN)
intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
for (i = 0; i < num_sizes; i++) { if (aper_size == intel_fake_agp_sizes[i].size) {
agp_bridge->current_size =
(void *) (intel_fake_agp_sizes + i); return aper_size;
}
}
return 0;
} #endif
staticvoid i830_cleanup(void)
{
}
/* The chipset_flush interface needs to get data that has already been * flushed out of the CPU all the way out to main memory, because the GPU * doesn't snoop those buffers. * * The 8xx series doesn't have the same lovely interface for flushing the * chipset write buffers that the later chips do. According to the 865 * specs, it's 64 octwords, or 1KB. So, to get those previous things in * that buffer out, we just fill 1KB and clflush it out, on the assumption * that it'll push whatever was in there out. It appears to work.
*/ staticvoid i830_chipset_flush(void)
{ unsignedlong timeout = jiffies + msecs_to_jiffies(1000);
/* Forcibly evict everything from the CPU write buffers. * clflush appears to be insufficient.
*/
wbinvd_on_all_cpus();
/* Now we've only seen documents for this magic bit on 855GM, * we hope it exists for the other gen2 chipsets... * * Also works as advertised on my 845G.
*/
writel(readl(intel_private.registers+I830_HIC) | (1<<31),
intel_private.registers+I830_HIC);
while (readl(intel_private.registers+I830_HIC) & (1<<31)) { if (time_after(jiffies, timeout)) break;
pci_read_config_word(intel_private.bridge_dev,
I830_GMCH_CTRL, &gmch_ctrl); if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
dev_err(&intel_private.pcidev->dev, "failed to enable the GTT: GMCH_CTRL=%x\n",
gmch_ctrl); returnfalse;
}
}
/* On the resume path we may be adjusting the PGTBL value, so * be paranoid and flush all chipset write buffers...
*/ if (INTEL_GTT_GEN >= 3)
writel(0, intel_private.registers+GFX_FLSH_CNTL);
reg = intel_private.registers+I810_PGETBL_CTL;
writel(intel_private.PGETBL_save, reg); if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
dev_err(&intel_private.pcidev->dev, "failed to enable the GTT: PGETBL=%x [expected %x]\n",
readl(reg), intel_private.PGETBL_save); returnfalse;
}
if (INTEL_GTT_GEN >= 3)
writel(0, intel_private.registers+GFX_FLSH_CNTL);
staticbool i830_check_flags(unsignedint flags)
{ switch (flags) { case 0: case AGP_PHYS_MEMORY: case AGP_USER_CACHED_MEMORY: case AGP_USER_MEMORY: returntrue;
}
#if IS_ENABLED(CONFIG_AGP_INTEL) staticvoid intel_gmch_gtt_insert_pages(unsignedint first_entry, unsignedint num_entries, struct page **pages, unsignedint flags)
{ int i, j;
for (i = 0, j = first_entry; i < num_entries; i++, j++) {
dma_addr_t addr = page_to_phys(pages[i]);
intel_private.driver->write_entry(addr,
j, flags);
}
wmb();
}
staticint intel_fake_agp_insert_entries(struct agp_memory *mem,
off_t pg_start, int type)
{ int ret = -EINVAL;
if (intel_private.clear_fake_agp) { int start = intel_private.stolen_size / PAGE_SIZE; int end = intel_private.gtt_mappable_entries;
intel_gmch_gtt_clear_range(start, end - start);
intel_private.clear_fake_agp = false;
}
if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY) return i810_insert_dcache_entries(mem, pg_start, type);
if (mem->page_count == 0) goto out;
if (pg_start + mem->page_count > intel_private.gtt_total_entries) goto out_err;
if (type != mem->type) goto out_err;
if (!intel_private.driver->check_flags(type)) goto out_err;
if (!mem->is_flushed)
global_cache_flush();
if (intel_private.needs_dmar) { struct sg_table st;
ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st); if (ret != 0) return ret;
for (i = first_entry; i < (first_entry + num_entries); i++) {
intel_private.driver->write_entry(intel_private.scratch_page_dma,
i, 0);
}
wmb();
}
EXPORT_SYMBOL(intel_gmch_gtt_clear_range);
#if IS_ENABLED(CONFIG_AGP_INTEL) staticint intel_fake_agp_remove_entries(struct agp_memory *mem,
off_t pg_start, int type)
{ if (mem->page_count == 0) return 0;
intel_private.resource_valid = 1;
intel_private.ifp_resource.start = temp;
intel_private.ifp_resource.end = temp + PAGE_SIZE;
ret = request_resource(&iomem_resource, &intel_private.ifp_resource); /* some BIOSes reserve this area in a pnp some don't */ if (ret)
intel_private.resource_valid = 0;
}
}
staticvoid intel_i965_g33_setup_chipset_flush(void)
{
u32 temp_hi, temp_lo; int ret;
intel_private.resource_valid = 1;
intel_private.ifp_resource.start = l64;
intel_private.ifp_resource.end = l64 + PAGE_SIZE;
ret = request_resource(&iomem_resource, &intel_private.ifp_resource); /* some BIOSes reserve this area in a pnp some don't */ if (ret)
intel_private.resource_valid = 0;
}
}
staticvoid intel_i9xx_setup_flush(void)
{ /* return if already configured */ if (intel_private.ifp_resource.start) return;
if (INTEL_GTT_GEN == 6) return;
/* setup a resource for this object */
intel_private.ifp_resource.name = "Intel Flush Page";
intel_private.ifp_resource.flags = IORESOURCE_MEM;
/* Setup chipset flush for 915 */ if (IS_G33 || INTEL_GTT_GEN >= 4) {
intel_i965_g33_setup_chipset_flush();
} else {
intel_i915_setup_chipset_flush();
}
if (intel_private.ifp_resource.start)
intel_private.i9xx_flush_page = ioremap(intel_private.ifp_resource.start, PAGE_SIZE); if (!intel_private.i9xx_flush_page)
dev_err(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing\n");
}
staticvoid i9xx_cleanup(void)
{ if (intel_private.i9xx_flush_page)
iounmap(intel_private.i9xx_flush_page); if (intel_private.resource_valid)
release_resource(&intel_private.ifp_resource);
intel_private.ifp_resource.start = 0;
intel_private.resource_valid = 0;
}
staticvoid i9xx_chipset_flush(void)
{
wmb(); if (intel_private.i9xx_flush_page)
writel(1, intel_private.i9xx_flush_page);
}
/* * Can be called from the fake agp driver but also directly from * drm/i915.ko. Hence we need to check whether everything is set up * already.
*/ if (intel_private.refcount++) return 1;
void intel_gmch_gtt_flush(void)
{ if (intel_private.driver->chipset_flush)
intel_private.driver->chipset_flush();
}
EXPORT_SYMBOL(intel_gmch_gtt_flush);
void intel_gmch_remove(void)
{ if (--intel_private.refcount) return;
if (intel_private.scratch_page)
intel_gtt_teardown_scratch_page(); if (intel_private.pcidev)
pci_dev_put(intel_private.pcidev); if (intel_private.bridge_dev)
pci_dev_put(intel_private.bridge_dev);
intel_private.driver = NULL;
}
EXPORT_SYMBOL(intel_gmch_remove);
MODULE_AUTHOR("Dave Jones, Various @Intel");
MODULE_DESCRIPTION("Intel GTT (Graphics Translation Table) routines");
MODULE_LICENSE("GPL and additional rights");
Messung V0.5
¤ Dauer der Verarbeitung: 0.29 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.