headers/bsd: Add sys/queue.h.
[haiku.git] / src / system / kernel / arch / x86 / paging / X86VMTranslationMap.cpp
blob5ed05001eabdfba83781537dc8c77129f6e43999
1 /*
2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
11 #include "paging/X86VMTranslationMap.h"
13 #include <thread.h>
14 #include <smp.h>
16 #include "paging/X86PagingStructures.h"
19 //#define TRACE_X86_VM_TRANSLATION_MAP
20 #ifdef TRACE_X86_VM_TRANSLATION_MAP
21 # define TRACE(x...) dprintf(x)
22 #else
23 # define TRACE(x...) ;
24 #endif
27 X86VMTranslationMap::X86VMTranslationMap()
29 fPageMapper(NULL),
30 fInvalidPagesCount(0)
35 X86VMTranslationMap::~X86VMTranslationMap()
40 status_t
41 X86VMTranslationMap::Init(bool kernel)
43 fIsKernelMap = kernel;
44 return B_OK;
48 /*! Acquires the map's recursive lock, and resets the invalidate pages counter
49 in case it's the first locking recursion.
51 bool
52 X86VMTranslationMap::Lock()
54 TRACE("%p->X86VMTranslationMap::Lock()\n", this);
56 recursive_lock_lock(&fLock);
57 if (recursive_lock_get_recursion(&fLock) == 1) {
58 // we were the first one to grab the lock
59 TRACE("clearing invalidated page count\n");
60 fInvalidPagesCount = 0;
63 return true;
67 /*! Unlocks the map, and, if we are actually losing the recursive lock,
68 flush all pending changes of this map (ie. flush TLB caches as
69 needed).
71 void
72 X86VMTranslationMap::Unlock()
74 TRACE("%p->X86VMTranslationMap::Unlock()\n", this);
76 if (recursive_lock_get_recursion(&fLock) == 1) {
77 // we're about to release it for the last time
78 Flush();
81 recursive_lock_unlock(&fLock);
85 addr_t
86 X86VMTranslationMap::MappedSize() const
88 return fMapCount;
92 void
93 X86VMTranslationMap::Flush()
95 if (fInvalidPagesCount <= 0)
96 return;
98 Thread* thread = thread_get_current_thread();
99 thread_pin_to_current_cpu(thread);
101 if (fInvalidPagesCount > PAGE_INVALIDATE_CACHE_SIZE) {
102 // invalidate all pages
103 TRACE("flush_tmap: %d pages to invalidate, invalidate all\n",
104 fInvalidPagesCount);
106 if (fIsKernelMap) {
107 arch_cpu_global_TLB_invalidate();
108 smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVALIDATE_PAGES, 0, 0, 0,
109 NULL, SMP_MSG_FLAG_SYNC);
110 } else {
111 cpu_status state = disable_interrupts();
112 arch_cpu_user_TLB_invalidate();
113 restore_interrupts(state);
115 int cpu = smp_get_current_cpu();
116 CPUSet cpuMask = PagingStructures()->active_on_cpus;
117 cpuMask.ClearBit(cpu);
119 if (!cpuMask.IsEmpty()) {
120 smp_send_multicast_ici(cpuMask, SMP_MSG_USER_INVALIDATE_PAGES,
121 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
124 } else {
125 TRACE("flush_tmap: %d pages to invalidate, invalidate list\n",
126 fInvalidPagesCount);
128 arch_cpu_invalidate_TLB_list(fInvalidPages, fInvalidPagesCount);
130 if (fIsKernelMap) {
131 smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST,
132 (addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
133 SMP_MSG_FLAG_SYNC);
134 } else {
135 int cpu = smp_get_current_cpu();
136 CPUSet cpuMask = PagingStructures()->active_on_cpus;
137 cpuMask.ClearBit(cpu);
139 if (!cpuMask.IsEmpty()) {
140 smp_send_multicast_ici(cpuMask, SMP_MSG_INVALIDATE_PAGE_LIST,
141 (addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
142 SMP_MSG_FLAG_SYNC);
146 fInvalidPagesCount = 0;
148 thread_unpin_from_current_cpu(thread);