/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2005-2007 Cavium Networks
*/ #include <linux/export.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/mm.h> #include <linux/bitops.h> #include <linux/cpu.h> #include <linux/io.h>
/* * Octeon automatically flushes the dcache on tlb changes, so * from Linux's viewpoint it acts much like a physically * tagged cache. No flushing is needed *
*/ staticvoid octeon_flush_data_cache_page(unsignedlong addr)
{ /* Nothing to do */
}
/* * Flush local I-cache for the specified range.
*/ staticvoid local_octeon_flush_icache_range(unsignedlong start, unsignedlong end)
{
octeon_local_flush_icache();
}
/** * octeon_flush_icache_all_cores - Flush caches as necessary for all cores * affected by a vma. If no vma is supplied, all cores are flushed. * * @vma: VMA to flush or NULL to flush all icaches.
*/ staticvoid octeon_flush_icache_all_cores(struct vm_area_struct *vma)
{ externvoid octeon_send_ipi_single(int cpu, unsignedint action); #ifdef CONFIG_SMP int cpu;
cpumask_t mask; #endif
mb();
octeon_local_flush_icache(); #ifdef CONFIG_SMP
preempt_disable();
cpu = smp_processor_id();
/* * If we have a vma structure, we only need to worry about * cores it has been used on
*/ if (vma)
mask = *mm_cpumask(vma->vm_mm); else
mask = *cpu_online_mask;
cpumask_clear_cpu(cpu, &mask); #ifdef CONFIG_CAVIUM_OCTEON_SOC
for_each_cpu(cpu, &mask)
octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH); #else
smp_call_function_many(&mask, (smp_call_func_t)octeon_local_flush_icache,
NULL, 1); #endif
preempt_enable(); #endif
}
/* * Called to flush the icache on all cores
*/ staticvoid octeon_flush_icache_all(void)
{
octeon_flush_icache_all_cores(NULL);
}
/** * octeon_flush_cache_mm - flush all memory associated with a memory context. * * @mm: Memory context to flush
*/ staticvoid octeon_flush_cache_mm(struct mm_struct *mm)
{ /* * According to the R4K version of this file, CPUs without * dcache aliases don't need to do anything here
*/
}
/* * Flush a range of kernel addresses out of the icache *
*/ staticvoid octeon_flush_icache_range(unsignedlong start, unsignedlong end)
{
octeon_flush_icache_all_cores(NULL);
}
/** * octeon_flush_cache_range - Flush a range out of a vma * * @vma: VMA to flush * @start: beginning address for flush * @end: ending address for flush
*/ staticvoid octeon_flush_cache_range(struct vm_area_struct *vma, unsignedlong start, unsignedlong end)
{ if (vma->vm_flags & VM_EXEC)
octeon_flush_icache_all_cores(vma);
}
/** * octeon_flush_cache_page - Flush a specific page of a vma * * @vma: VMA to flush page for * @page: Page to flush * @pfn: Page frame number
*/ staticvoid octeon_flush_cache_page(struct vm_area_struct *vma, unsignedlong page, unsignedlong pfn)
{ if (vma->vm_flags & VM_EXEC)
octeon_flush_icache_all_cores(vma);
}
staticvoid octeon_flush_kernel_vmap_range(unsignedlong vaddr, int size)
{
BUG();
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.