mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / arch / tile / kernel / tlb.c
blobf23b53515671bb13b0e21b93815e665f6fd6eebc
1 /*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
16 #include <linux/cpumask.h>
17 #include <linux/module.h>
18 #include <linux/hugetlb.h>
19 #include <asm/tlbflush.h>
20 #include <asm/homecache.h>
21 #include <hv/hypervisor.h>
23 /* From tlbflush.h */
24 DEFINE_PER_CPU(int, current_asid);
25 int min_asid, max_asid;
28 * Note that we flush the L1I (for VM_EXEC pages) as well as the TLB
29 * so that when we are unmapping an executable page, we also flush it.
30 * Combined with flushing the L1I at context switch time, this means
31 * we don't have to do any other icache flushes.
34 void flush_tlb_mm(struct mm_struct *mm)
36 HV_Remote_ASID asids[NR_CPUS];
37 int i = 0, cpu;
38 for_each_cpu(cpu, mm_cpumask(mm)) {
39 HV_Remote_ASID *asid = &asids[i++];
40 asid->y = cpu / smp_topology.width;
41 asid->x = cpu % smp_topology.width;
42 asid->asid = per_cpu(current_asid, cpu);
44 flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(mm),
45 0, 0, 0, NULL, asids, i);
48 void flush_tlb_current_task(void)
50 flush_tlb_mm(current->mm);
53 void flush_tlb_page_mm(struct vm_area_struct *vma, struct mm_struct *mm,
54 unsigned long va)
56 unsigned long size = vma_kernel_pagesize(vma);
57 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
58 flush_remote(0, cache, mm_cpumask(mm),
59 va, size, size, mm_cpumask(mm), NULL, 0);
62 void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
64 flush_tlb_page_mm(vma, vma->vm_mm, va);
66 EXPORT_SYMBOL(flush_tlb_page);
68 void flush_tlb_range(struct vm_area_struct *vma,
69 unsigned long start, unsigned long end)
71 unsigned long size = vma_kernel_pagesize(vma);
72 struct mm_struct *mm = vma->vm_mm;
73 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
74 flush_remote(0, cache, mm_cpumask(mm), start, end - start, size,
75 mm_cpumask(mm), NULL, 0);
78 void flush_tlb_all(void)
80 int i;
81 for (i = 0; ; ++i) {
82 HV_VirtAddrRange r = hv_inquire_virtual(i);
83 if (r.size == 0)
84 break;
85 flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask,
86 r.start, r.size, PAGE_SIZE, cpu_online_mask,
87 NULL, 0);
88 flush_remote(0, 0, NULL,
89 r.start, r.size, HPAGE_SIZE, cpu_online_mask,
90 NULL, 0);
95 * Callers need to flush the L1I themselves if necessary, e.g. for
96 * kernel module unload. Otherwise we assume callers are not using
97 * executable pgprot_t's. Using EVICT_L1I means that dataplane cpus
98 * will get an unnecessary interrupt otherwise.
100 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
102 flush_remote(0, 0, NULL,
103 start, end - start, PAGE_SIZE, cpu_online_mask, NULL, 0);