2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * This code maintains the "home" for each page in the system.
17 #include <linux/kernel.h>
19 #include <linux/spinlock.h>
20 #include <linux/list.h>
21 #include <linux/bootmem.h>
22 #include <linux/rmap.h>
23 #include <linux/pagemap.h>
24 #include <linux/mutex.h>
25 #include <linux/interrupt.h>
26 #include <linux/sysctl.h>
27 #include <linux/pagevec.h>
28 #include <linux/ptrace.h>
29 #include <linux/timex.h>
30 #include <linux/cache.h>
31 #include <linux/smp.h>
32 #include <linux/module.h>
33 #include <linux/hugetlb.h>
36 #include <asm/sections.h>
37 #include <asm/tlbflush.h>
38 #include <asm/pgalloc.h>
39 #include <asm/homecache.h>
46 #if CHIP_HAS_COHERENT_LOCAL_CACHE()
49 * The noallocl2 option suppresses all use of the L2 cache to cache
50 * locally from a remote home. There's no point in using it if we
51 * don't have coherent local caching, though.
53 static int __write_once noallocl2
;
54 static int __init
set_noallocl2(char *str
)
59 early_param("noallocl2", set_noallocl2
);
69 * Update the irq_stat for cpus that we are going to interrupt
70 * with TLB or cache flushes. Also handle removing dataplane cpus
71 * from the TLB flush set, and setting dataplane_tlb_state instead.
73 static void hv_flush_update(const struct cpumask
*cache_cpumask
,
74 struct cpumask
*tlb_cpumask
,
75 unsigned long tlb_va
, unsigned long tlb_length
,
76 HV_Remote_ASID
*asids
, int asidcount
)
83 cpumask_or(&mask
, &mask
, cache_cpumask
);
84 if (tlb_cpumask
&& tlb_length
) {
85 cpumask_or(&mask
, &mask
, tlb_cpumask
);
88 for (i
= 0; i
< asidcount
; ++i
)
89 cpumask_set_cpu(asids
[i
].y
* smp_width
+ asids
[i
].x
, &mask
);
92 * Don't bother to update atomically; losing a count
93 * here is not that critical.
95 for_each_cpu(cpu
, &mask
)
96 ++per_cpu(irq_stat
, cpu
).irq_hv_flush_count
;
100 * This wrapper function around hv_flush_remote() does several things:
102 * - Provides a return value error-checking panic path, since
103 * there's never any good reason for hv_flush_remote() to fail.
104 * - Accepts a 32-bit PFN rather than a 64-bit PA, which generally
105 * is the type that Linux wants to pass around anyway.
106 * - Canonicalizes that lengths of zero make cpumasks NULL.
107 * - Handles deferring TLB flushes for dataplane tiles.
108 * - Tracks remote interrupts in the per-cpu irq_cpustat_t.
110 * Note that we have to wait until the cache flush completes before
111 * updating the per-cpu last_cache_flush word, since otherwise another
112 * concurrent flush can race, conclude the flush has already
113 * completed, and start to use the page while it's still dirty
114 * remotely (running concurrently with the actual evict, presumably).
116 void flush_remote(unsigned long cache_pfn
, unsigned long cache_control
,
117 const struct cpumask
*cache_cpumask_orig
,
118 HV_VirtAddr tlb_va
, unsigned long tlb_length
,
119 unsigned long tlb_pgsize
,
120 const struct cpumask
*tlb_cpumask_orig
,
121 HV_Remote_ASID
*asids
, int asidcount
)
124 struct cpumask cache_cpumask_copy
, tlb_cpumask_copy
;
125 struct cpumask
*cache_cpumask
, *tlb_cpumask
;
126 HV_PhysAddr cache_pa
;
127 char cache_buf
[NR_CPUS
*5], tlb_buf
[NR_CPUS
*5];
129 mb(); /* provided just to simplify "magic hypervisor" mode */
132 * Canonicalize and copy the cpumasks.
134 if (cache_cpumask_orig
&& cache_control
) {
135 cpumask_copy(&cache_cpumask_copy
, cache_cpumask_orig
);
136 cache_cpumask
= &cache_cpumask_copy
;
138 cpumask_clear(&cache_cpumask_copy
);
139 cache_cpumask
= NULL
;
141 if (cache_cpumask
== NULL
)
143 if (tlb_cpumask_orig
&& tlb_length
) {
144 cpumask_copy(&tlb_cpumask_copy
, tlb_cpumask_orig
);
145 tlb_cpumask
= &tlb_cpumask_copy
;
147 cpumask_clear(&tlb_cpumask_copy
);
151 hv_flush_update(cache_cpumask
, tlb_cpumask
, tlb_va
, tlb_length
,
153 cache_pa
= (HV_PhysAddr
)cache_pfn
<< PAGE_SHIFT
;
154 rc
= hv_flush_remote(cache_pa
, cache_control
,
155 cpumask_bits(cache_cpumask
),
156 tlb_va
, tlb_length
, tlb_pgsize
,
157 cpumask_bits(tlb_cpumask
),
161 cpumask_scnprintf(cache_buf
, sizeof(cache_buf
), &cache_cpumask_copy
);
162 cpumask_scnprintf(tlb_buf
, sizeof(tlb_buf
), &tlb_cpumask_copy
);
164 pr_err("hv_flush_remote(%#llx, %#lx, %p [%s],"
165 " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
166 cache_pa
, cache_control
, cache_cpumask
, cache_buf
,
167 (unsigned long)tlb_va
, tlb_length
, tlb_pgsize
,
168 tlb_cpumask
, tlb_buf
,
169 asids
, asidcount
, rc
);
170 panic("Unsafe to continue.");
173 static void homecache_finv_page_va(void* va
, int home
)
175 if (home
== smp_processor_id()) {
176 finv_buffer_local(va
, PAGE_SIZE
);
177 } else if (home
== PAGE_HOME_HASH
) {
178 finv_buffer_remote(va
, PAGE_SIZE
, 1);
180 BUG_ON(home
< 0 || home
>= NR_CPUS
);
181 finv_buffer_remote(va
, PAGE_SIZE
, 0);
185 void homecache_finv_map_page(struct page
*page
, int home
)
192 if (home
== PAGE_HOME_UNCACHED
)
194 local_irq_save(flags
);
195 #ifdef CONFIG_HIGHMEM
196 va
= __fix_to_virt(FIX_KMAP_BEGIN
+ kmap_atomic_idx_push() +
197 (KM_TYPE_NR
* smp_processor_id()));
199 va
= __fix_to_virt(FIX_HOMECACHE_BEGIN
+ smp_processor_id());
201 ptep
= virt_to_pte(NULL
, (unsigned long)va
);
202 pte
= pfn_pte(page_to_pfn(page
), PAGE_KERNEL
);
203 __set_pte(ptep
, pte_set_home(pte
, home
));
204 homecache_finv_page_va((void *)va
, home
);
206 hv_flush_page(va
, PAGE_SIZE
);
207 #ifdef CONFIG_HIGHMEM
208 kmap_atomic_idx_pop();
210 local_irq_restore(flags
);
213 static void homecache_finv_page_home(struct page
*page
, int home
)
215 if (!PageHighMem(page
) && home
== page_home(page
))
216 homecache_finv_page_va(page_address(page
), home
);
218 homecache_finv_map_page(page
, home
);
221 static inline bool incoherent_home(int home
)
223 return home
== PAGE_HOME_IMMUTABLE
|| home
== PAGE_HOME_INCOHERENT
;
226 static void homecache_finv_page_internal(struct page
*page
, int force_map
)
228 int home
= page_home(page
);
229 if (home
== PAGE_HOME_UNCACHED
)
231 if (incoherent_home(home
)) {
233 for_each_cpu(cpu
, &cpu_cacheable_map
)
234 homecache_finv_map_page(page
, cpu
);
235 } else if (force_map
) {
236 /* Force if, e.g., the normal mapping is migrating. */
237 homecache_finv_map_page(page
, home
);
239 homecache_finv_page_home(page
, home
);
241 sim_validate_lines_evicted(PFN_PHYS(page_to_pfn(page
)), PAGE_SIZE
);
244 void homecache_finv_page(struct page
*page
)
246 homecache_finv_page_internal(page
, 0);
249 void homecache_evict(const struct cpumask
*mask
)
251 flush_remote(0, HV_FLUSH_EVICT_L2
, mask
, 0, 0, 0, NULL
, NULL
, 0);
254 /* Report the home corresponding to a given PTE. */
255 static int pte_to_home(pte_t pte
)
257 if (hv_pte_get_nc(pte
))
258 return PAGE_HOME_IMMUTABLE
;
259 switch (hv_pte_get_mode(pte
)) {
260 case HV_PTE_MODE_CACHE_TILE_L3
:
261 return get_remote_cache_cpu(pte
);
262 case HV_PTE_MODE_CACHE_NO_L3
:
263 return PAGE_HOME_INCOHERENT
;
264 case HV_PTE_MODE_UNCACHED
:
265 return PAGE_HOME_UNCACHED
;
266 #if CHIP_HAS_CBOX_HOME_MAP()
267 case HV_PTE_MODE_CACHE_HASH_L3
:
268 return PAGE_HOME_HASH
;
271 panic("Bad PTE %#llx\n", pte
.val
);
274 /* Update the home of a PTE if necessary (can also be used for a pgprot_t). */
275 pte_t
pte_set_home(pte_t pte
, int home
)
277 /* Check for non-linear file mapping "PTEs" and pass them through. */
282 /* Check for MMIO mappings and pass them through. */
283 if (hv_pte_get_mode(pte
) == HV_PTE_MODE_MMIO
)
289 * Only immutable pages get NC mappings. If we have a
290 * non-coherent PTE, but the underlying page is not
291 * immutable, it's likely the result of a forced
292 * caching setting running up against ptrace setting
293 * the page to be writable underneath. In this case,
294 * just keep the PTE coherent.
296 if (hv_pte_get_nc(pte
) && home
!= PAGE_HOME_IMMUTABLE
) {
297 pte
= hv_pte_clear_nc(pte
);
298 pr_err("non-immutable page incoherently referenced: %#llx\n",
304 case PAGE_HOME_UNCACHED
:
305 pte
= hv_pte_set_mode(pte
, HV_PTE_MODE_UNCACHED
);
308 case PAGE_HOME_INCOHERENT
:
309 pte
= hv_pte_set_mode(pte
, HV_PTE_MODE_CACHE_NO_L3
);
312 case PAGE_HOME_IMMUTABLE
:
314 * We could home this page anywhere, since it's immutable,
315 * but by default just home it to follow "hash_default".
317 BUG_ON(hv_pte_get_writable(pte
));
318 if (pte_get_forcecache(pte
)) {
319 /* Upgrade "force any cpu" to "No L3" for immutable. */
320 if (hv_pte_get_mode(pte
) == HV_PTE_MODE_CACHE_TILE_L3
321 && pte_get_anyhome(pte
)) {
322 pte
= hv_pte_set_mode(pte
,
323 HV_PTE_MODE_CACHE_NO_L3
);
326 #if CHIP_HAS_CBOX_HOME_MAP()
328 pte
= hv_pte_set_mode(pte
, HV_PTE_MODE_CACHE_HASH_L3
);
331 pte
= hv_pte_set_mode(pte
, HV_PTE_MODE_CACHE_NO_L3
);
332 pte
= hv_pte_set_nc(pte
);
335 #if CHIP_HAS_CBOX_HOME_MAP()
337 pte
= hv_pte_set_mode(pte
, HV_PTE_MODE_CACHE_HASH_L3
);
342 BUG_ON(home
< 0 || home
>= NR_CPUS
||
343 !cpu_is_valid_lotar(home
));
344 pte
= hv_pte_set_mode(pte
, HV_PTE_MODE_CACHE_TILE_L3
);
345 pte
= set_remote_cache_cpu(pte
, home
);
349 #if CHIP_HAS_NC_AND_NOALLOC_BITS()
351 pte
= hv_pte_set_no_alloc_l2(pte
);
353 /* Simplify "no local and no l3" to "uncached" */
354 if (hv_pte_get_no_alloc_l2(pte
) && hv_pte_get_no_alloc_l1(pte
) &&
355 hv_pte_get_mode(pte
) == HV_PTE_MODE_CACHE_NO_L3
) {
356 pte
= hv_pte_set_mode(pte
, HV_PTE_MODE_UNCACHED
);
360 /* Checking this case here gives a better panic than from the hv. */
361 BUG_ON(hv_pte_get_mode(pte
) == 0);
365 EXPORT_SYMBOL(pte_set_home
);
368 * The routines in this section are the "static" versions of the normal
369 * dynamic homecaching routines; they just set the home cache
370 * of a kernel page once, and require a full-chip cache/TLB flush,
371 * so they're not suitable for anything but infrequent use.
374 #if CHIP_HAS_CBOX_HOME_MAP()
375 static inline int initial_page_home(void) { return PAGE_HOME_HASH
; }
377 static inline int initial_page_home(void) { return 0; }
380 int page_home(struct page
*page
)
382 if (PageHighMem(page
)) {
383 return initial_page_home();
385 unsigned long kva
= (unsigned long)page_address(page
);
386 return pte_to_home(*virt_to_pte(NULL
, kva
));
389 EXPORT_SYMBOL(page_home
);
391 void homecache_change_page_home(struct page
*page
, int order
, int home
)
393 int i
, pages
= (1 << order
);
396 BUG_ON(PageHighMem(page
));
397 BUG_ON(page_count(page
) > 1);
398 BUG_ON(page_mapcount(page
) != 0);
399 kva
= (unsigned long) page_address(page
);
400 flush_remote(0, HV_FLUSH_EVICT_L2
, &cpu_cacheable_map
,
401 kva
, pages
* PAGE_SIZE
, PAGE_SIZE
, cpu_online_mask
,
404 for (i
= 0; i
< pages
; ++i
, kva
+= PAGE_SIZE
) {
405 pte_t
*ptep
= virt_to_pte(NULL
, kva
);
406 pte_t pteval
= *ptep
;
407 BUG_ON(!pte_present(pteval
) || pte_huge(pteval
));
408 __set_pte(ptep
, pte_set_home(pteval
, home
));
412 struct page
*homecache_alloc_pages(gfp_t gfp_mask
,
413 unsigned int order
, int home
)
416 BUG_ON(gfp_mask
& __GFP_HIGHMEM
); /* must be lowmem */
417 page
= alloc_pages(gfp_mask
, order
);
419 homecache_change_page_home(page
, order
, home
);
422 EXPORT_SYMBOL(homecache_alloc_pages
);
424 struct page
*homecache_alloc_pages_node(int nid
, gfp_t gfp_mask
,
425 unsigned int order
, int home
)
428 BUG_ON(gfp_mask
& __GFP_HIGHMEM
); /* must be lowmem */
429 page
= alloc_pages_node(nid
, gfp_mask
, order
);
431 homecache_change_page_home(page
, order
, home
);
435 void __homecache_free_pages(struct page
*page
, unsigned int order
)
437 if (put_page_testzero(page
)) {
438 homecache_change_page_home(page
, order
, initial_page_home());
440 free_hot_cold_page(page
, 0);
442 init_page_count(page
);
443 __free_pages(page
, order
);
447 EXPORT_SYMBOL(__homecache_free_pages
);
449 void homecache_free_pages(unsigned long addr
, unsigned int order
)
452 VM_BUG_ON(!virt_addr_valid((void *)addr
));
453 __homecache_free_pages(virt_to_page((void *)addr
), order
);
456 EXPORT_SYMBOL(homecache_free_pages
);