/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1996 David S. Miller (davem@davemloft.net) * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org * Carsten Langgaard, carstenl@mips.com * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
*/ #include <linux/cpu_pm.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/export.h>
/* * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has * a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately, * itlb/dtlb are not totally transparent to software.
*/ staticinlinevoid flush_micro_tlb(void)
{ switch (current_cpu_type()) { case CPU_LOONGSON2EF:
write_c0_diag(LOONGSON_DIAG_ITLB); break; case CPU_LOONGSON64:
write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB); break; default: break;
}
}
void local_flush_tlb_all(void)
{ unsignedlong flags; unsignedlong old_ctx; int entry, ftlbhighset;
local_irq_save(flags); /* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
htw_stop();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
entry = num_wired_entries();
/* * Blast 'em all away. * If there are any wired entries, fall back to iterating
*/ if (cpu_has_tlbinv && !entry) { if (current_cpu_data.tlbsizevtlb) {
write_c0_index(0);
mtc0_tlbw_hazard();
tlbinvf(); /* invalidate VTLB */
}
ftlbhighset = current_cpu_data.tlbsizevtlb +
current_cpu_data.tlbsizeftlbsets; for (entry = current_cpu_data.tlbsizevtlb;
entry < ftlbhighset;
entry++) {
write_c0_index(entry);
mtc0_tlbw_hazard();
tlbinvf(); /* invalidate one FTLB set */
}
} else { while (entry < current_cpu_data.tlbsize) { /* Make sure all entries differ. */
write_c0_entryhi(UNIQUE_ENTRYHI(entry));
write_c0_index(entry);
mtc0_tlbw_hazard();
tlb_write_indexed();
entry++;
}
}
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
htw_start();
flush_micro_tlb();
local_irq_restore(flags);
}
EXPORT_SYMBOL(local_flush_tlb_all);
void local_flush_tlb_range(struct vm_area_struct *vma, unsignedlong start, unsignedlong end)
{ struct mm_struct *mm = vma->vm_mm; int cpu = smp_processor_id();
if (cpu_context(cpu, mm) != 0) { unsignedlong size, flags;
finish:
write_c0_entryhi(old_entryhi); if (cpu_has_mmid)
write_c0_memorymapid(old_mmid);
htw_start();
flush_micro_tlb_vm(vma);
local_irq_restore(flags);
}
}
/* * This one is only used for pages with the global bit set so we don't care * much about the ASID.
*/ void local_flush_tlb_one(unsignedlong page)
{ unsignedlong flags; int oldpid, idx;
/* * We will need multiple versions of update_mmu_cache(), one that just * updates the TLB with the new pte(s), and another which also checks * for the R4k "end of page" hardware bug and does the needy.
*/ void __update_tlb(struct vm_area_struct * vma, unsignedlong address, pte_t pte)
{ unsignedlong flags;
pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep, *ptemap = NULL; int idx, pid;
/* * Handle debugger faulting in for debuggee.
*/ if (current->active_mm != vma->vm_mm) return;
mtc0_tlbw_hazard(); if (idx < 0)
tlb_write_random(); else
tlb_write_indexed();
tlbw_use_hazard();
write_c0_pagemask(PM_DEFAULT_MASK);
} else #endif
{
ptemap = ptep = pte_offset_map(pmdp, address); /* * update_mmu_cache() is called between pte_offset_map_lock() * and pte_unmap_unlock(), so we can assume that ptep is not * NULL here: and what should be done below if it were NULL?
*/
local_irq_save(flags); if (cpu_has_mmid) {
old_mmid = read_c0_memorymapid();
write_c0_memorymapid(MMID_KERNEL_WIRED);
} /* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
htw_stop();
old_pagemask = read_c0_pagemask();
wired = num_wired_entries();
write_c0_wired(wired + 1);
write_c0_index(wired);
tlbw_use_hazard(); /* What is the hazard here? */
write_c0_pagemask(pagemask);
write_c0_entryhi(entryhi);
write_c0_entrylo0(entrylo0);
write_c0_entrylo1(entrylo1);
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
write_c0_entryhi(old_ctx); if (cpu_has_mmid)
write_c0_memorymapid(old_mmid);
tlbw_use_hazard(); /* What is the hazard here? */
htw_start();
write_c0_pagemask(old_pagemask);
local_flush_tlb_all();
local_irq_restore(flags); #endif
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int has_transparent_hugepage(void)
{ staticunsignedint mask = -1;
if (mask == -1) { /* first call comes during __init */ unsignedlong flags;
/* * Used for loading TLB entries before trap_init() has started, when we * don't actually want to add a wired entry which remains throughout the * lifetime of the system
*/
int temp_tlb_entry;
#ifndef CONFIG_64BIT
__init int add_temporary_entry(unsignedlong entrylo0, unsignedlong entrylo1, unsignedlong entryhi, unsignedlong pagemask)
{ int ret = 0; unsignedlong flags; unsignedlong wired; unsignedlong old_pagemask; unsignedlong old_ctx;
local_irq_save(flags); /* Save old context and create impossible VPN2 value */
htw_stop();
old_ctx = read_c0_entryhi();
old_pagemask = read_c0_pagemask();
wired = num_wired_entries(); if (--temp_tlb_entry < wired) {
printk(KERN_WARNING "No TLB space left for add_temporary_entry\n");
ret = -ENOSPC; goto out;
}
while (entry < current_cpu_data.tlbsize) { unsignedlong asid_mask = cpu_asid_mask(¤t_cpu_data); unsignedlong asid = 0; int idx;
/* Skip wired MMID to make ginvt_mmid work */ if (cpu_has_mmid)
asid = MMID_KERNEL_WIRED + 1;
/* Check for match before using UNIQUE_ENTRYHI */ do { if (cpu_has_mmid) {
write_c0_memorymapid(asid);
write_c0_entryhi(UNIQUE_ENTRYHI(entry));
} else {
write_c0_entryhi(UNIQUE_ENTRYHI(entry) | asid);
}
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
idx = read_c0_index(); /* No match or match is on current entry */ if (idx < 0 || idx == entry) break; /* * If we hit a match, we need to try again with * a different ASID.
*/
asid++;
} while (asid < asid_mask);
if (idx >= 0 && idx != entry)
panic("Unable to uniquify TLB entry %d", idx);
/* * Configure TLB (for init or after a CPU has been powered off).
*/ staticvoid r4k_tlb_configure(void)
{ /* * You should never change this register: * - On R4600 1.7 the tlbp never hits for pages smaller than * the value in the c0_pagemask register. * - The entire mm handling assumes the c0_pagemask register to * be set to fixed-size pages.
*/
write_c0_pagemask(PM_DEFAULT_MASK);
back_to_back_c0_hazard(); if (read_c0_pagemask() != PM_DEFAULT_MASK)
panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.