/* * Switch a MMU context. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1996, 1997, 1998, 1999 by Ralf Baechle * Copyright (C) 1999 Silicon Graphics, Inc.
*/ #ifndef _ASM_MMU_CONTEXT_H #define _ASM_MMU_CONTEXT_H
/* Note: This is also implemented with uasm in arch/mips/kvm/entry.c */ #define TLBMISS_HANDLER_SETUP_PGD(pgd) \ do { \
tlbmiss_handler_setup_pgd((unsignedlong)(pgd)); \
htw_set_pwbase((unsignedlong)pgd); \
} while (0)
#define TLBMISS_HANDLER_SETUP() \ do { \
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); \
TLBMISS_HANDLER_RESTORE(); \
} while (0)
#else/* !CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/
/* * For the fast tlb miss handlers, we keep a per cpu array of pointers * to the current pgd for each processor. Also, the proc. id is stuffed * into the context register.
*/ externunsignedlong pgd_current[];
/* * The ginvt instruction will invalidate wired entries when its type field * targets anything other than the entire TLB. That means that if we were to * allow the kernel to create wired entries with the MMID of current->active_mm * then those wired entries could be invalidated when we later use ginvt to * invalidate TLB entries with that MMID. * * In order to prevent ginvt from trashing wired entries, we reserve one MMID * for use by the kernel when creating wired entries. This MMID will never be * assigned to a struct mm, and we'll never target it with a ginvt instruction.
*/ #define MMID_KERNEL_WIRED 0
/* * All unused by hardware upper bits will be considered * as a software asid extension.
*/ staticinline u64 asid_version_mask(unsignedint cpu)
{ unsignedlong asid_mask = cpu_asid_mask(&cpu_data[cpu]);
/* * Initialize the context related info for a new mm_struct * instance.
*/ #define init_new_context init_new_context staticinlineint
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{ int i;
if (cpu_has_mmid) {
set_cpu_context(0, mm, 0);
} else {
for_each_possible_cpu(i)
set_cpu_context(i, mm, 0);
}
/* * Mark current->active_mm as not "active" anymore. * We don't want to mislead possible IPI tlb flush routines.
*/
cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next));
htw_start();
local_irq_restore(flags);
}
/* * Destroy context related info for an mm_struct that is about * to be put to rest.
*/ #define destroy_context destroy_context staticinlinevoid destroy_context(struct mm_struct *mm)
{
dsemul_mm_cleanup(mm);
}
cpu = smp_processor_id();
ctx = cpu_context(cpu, mm);
if (!ctx) { /* no-op */
} elseif (cpu_has_mmid) { /* * Globally invalidating TLB entries associated with the MMID * is pretty cheap using the GINVT instruction, so we'll do * that rather than incur the overhead of allocating a new * MMID. The latter would be especially difficult since MMIDs * are global & other CPUs may be actively using ctx.
*/
htw_stop();
old_mmid = read_c0_memorymapid();
write_c0_memorymapid(ctx & cpu_asid_mask(&cpu_data[cpu]));
mtc0_tlbw_hazard();
ginvt_mmid();
sync_ginv();
write_c0_memorymapid(old_mmid);
instruction_hazard();
htw_start();
} elseif (cpumask_test_cpu(cpu, mm_cpumask(mm))) { /* * mm is currently active, so we can't really drop it. * Instead we bump the ASID.
*/
htw_stop();
get_new_mmu_context(mm);
write_c0_entryhi(cpu_asid(cpu, mm));
htw_start();
} else { /* will get a new context next time */
set_cpu_context(cpu, mm, 0);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.