btrfs: Attempt to fix GCC2 build.
[haiku.git] / src / system / kernel / arch / arm / paging / ARMVMTranslationMap.cpp
bloba4c3c48f615a40067464ed8c18de11a0ae8c193a
1 /*
2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
11 #include "paging/ARMVMTranslationMap.h"
13 #include <thread.h>
14 #include <smp.h>
16 #include "paging/ARMPagingStructures.h"
19 //#define TRACE_ARM_VM_TRANSLATION_MAP
20 #ifdef TRACE_ARM_VM_TRANSLATION_MAP
21 # define TRACE(x...) dprintf(x)
22 #else
23 # define TRACE(x...) ;
24 #endif
27 ARMVMTranslationMap::ARMVMTranslationMap()
29 fPageMapper(NULL),
30 fInvalidPagesCount(0)
35 ARMVMTranslationMap::~ARMVMTranslationMap()
40 status_t
41 ARMVMTranslationMap::Init(bool kernel)
43 fIsKernelMap = kernel;
44 return B_OK;
48 /*! Acquires the map's recursive lock, and resets the invalidate pages counter
49 in case it's the first locking recursion.
51 bool
52 ARMVMTranslationMap::Lock()
54 TRACE("%p->ARMVMTranslationMap::Lock()\n", this);
56 recursive_lock_lock(&fLock);
57 if (recursive_lock_get_recursion(&fLock) == 1) {
58 // we were the first one to grab the lock
59 TRACE("clearing invalidated page count\n");
60 fInvalidPagesCount = 0;
63 return true;
67 /*! Unlocks the map, and, if we are actually losing the recursive lock,
68 flush all pending changes of this map (ie. flush TLB caches as
69 needed).
71 void
72 ARMVMTranslationMap::Unlock()
74 TRACE("%p->ARMVMTranslationMap::Unlock()\n", this);
76 if (recursive_lock_get_recursion(&fLock) == 1) {
77 // we're about to release it for the last time
78 Flush();
81 recursive_lock_unlock(&fLock);
85 addr_t
86 ARMVMTranslationMap::MappedSize() const
88 return fMapCount;
92 void
93 ARMVMTranslationMap::Flush()
95 if (fInvalidPagesCount <= 0)
96 return;
98 Thread* thread = thread_get_current_thread();
99 thread_pin_to_current_cpu(thread);
101 if (fInvalidPagesCount > PAGE_INVALIDATE_CACHE_SIZE) {
102 // invalidate all pages
103 TRACE("flush_tmap: %d pages to invalidate, invalidate all\n",
104 fInvalidPagesCount);
106 if (fIsKernelMap) {
107 arch_cpu_global_TLB_invalidate();
108 smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVALIDATE_PAGES, 0, 0, 0,
109 NULL, SMP_MSG_FLAG_SYNC);
110 } else {
111 cpu_status state = disable_interrupts();
112 arch_cpu_user_TLB_invalidate();
113 restore_interrupts(state);
115 int cpu = smp_get_current_cpu();
116 CPUSet cpuMask = PagingStructures()->active_on_cpus;
117 cpuMask.ClearBit(cpu);
119 if (!cpuMask.IsEmpty()) {
120 smp_send_multicast_ici(cpuMask, SMP_MSG_USER_INVALIDATE_PAGES,
121 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
124 } else {
125 TRACE("flush_tmap: %d pages to invalidate, invalidate list\n",
126 fInvalidPagesCount);
128 arch_cpu_invalidate_TLB_list(fInvalidPages, fInvalidPagesCount);
130 if (fIsKernelMap) {
131 smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST,
132 (addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
133 SMP_MSG_FLAG_SYNC);
134 } else {
135 int cpu = smp_get_current_cpu();
136 CPUSet cpuMask = PagingStructures()->active_on_cpus;
137 cpuMask.ClearBit(cpu);
139 if (!cpuMask.IsEmpty()) {
140 smp_send_multicast_ici(cpuMask, SMP_MSG_INVALIDATE_PAGE_LIST,
141 (addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
142 SMP_MSG_FLAG_SYNC);
146 fInvalidPagesCount = 0;
148 thread_unpin_from_current_cpu(thread);