Add gfx950 mfma instructions to ROCDL dialect (#123361)
[llvm-project.git] / llvm / lib / ExecutionEngine / Orc / MemoryMapper.cpp
blob944fca000d61f7d88d39116729f8f4028d530204
1 //===- MemoryMapper.cpp - Cross-process memory mapper ------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "llvm/ExecutionEngine/Orc/MemoryMapper.h"
11 #include "llvm/Config/llvm-config.h" // for LLVM_ON_UNIX
12 #include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
13 #include "llvm/Support/WindowsError.h"
15 #if defined(LLVM_ON_UNIX) && !defined(__ANDROID__)
16 #include <fcntl.h>
17 #include <sys/mman.h>
18 #if defined(__MVS__)
19 #include "llvm/Support/BLAKE3.h"
20 #include <sys/shm.h>
21 #endif
22 #include <unistd.h>
23 #elif defined(_WIN32)
24 #include <windows.h>
25 #endif
27 namespace llvm {
28 namespace orc {
30 MemoryMapper::~MemoryMapper() {}
32 InProcessMemoryMapper::InProcessMemoryMapper(size_t PageSize)
33 : PageSize(PageSize) {}
35 Expected<std::unique_ptr<InProcessMemoryMapper>>
36 InProcessMemoryMapper::Create() {
37 auto PageSize = sys::Process::getPageSize();
38 if (!PageSize)
39 return PageSize.takeError();
40 return std::make_unique<InProcessMemoryMapper>(*PageSize);
43 void InProcessMemoryMapper::reserve(size_t NumBytes,
44 OnReservedFunction OnReserved) {
45 std::error_code EC;
46 auto MB = sys::Memory::allocateMappedMemory(
47 NumBytes, nullptr, sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC);
49 if (EC)
50 return OnReserved(errorCodeToError(EC));
53 std::lock_guard<std::mutex> Lock(Mutex);
54 Reservations[MB.base()].Size = MB.allocatedSize();
57 OnReserved(
58 ExecutorAddrRange(ExecutorAddr::fromPtr(MB.base()), MB.allocatedSize()));
61 char *InProcessMemoryMapper::prepare(ExecutorAddr Addr, size_t ContentSize) {
62 return Addr.toPtr<char *>();
65 void InProcessMemoryMapper::initialize(MemoryMapper::AllocInfo &AI,
66 OnInitializedFunction OnInitialized) {
67 ExecutorAddr MinAddr(~0ULL);
68 ExecutorAddr MaxAddr(0);
70 // FIXME: Release finalize lifetime segments.
71 for (auto &Segment : AI.Segments) {
72 auto Base = AI.MappingBase + Segment.Offset;
73 auto Size = Segment.ContentSize + Segment.ZeroFillSize;
75 if (Base < MinAddr)
76 MinAddr = Base;
78 if (Base + Size > MaxAddr)
79 MaxAddr = Base + Size;
81 std::memset((Base + Segment.ContentSize).toPtr<void *>(), 0,
82 Segment.ZeroFillSize);
84 if (auto EC = sys::Memory::protectMappedMemory(
85 {Base.toPtr<void *>(), Size},
86 toSysMemoryProtectionFlags(Segment.AG.getMemProt()))) {
87 return OnInitialized(errorCodeToError(EC));
89 if ((Segment.AG.getMemProt() & MemProt::Exec) == MemProt::Exec)
90 sys::Memory::InvalidateInstructionCache(Base.toPtr<void *>(), Size);
93 auto DeinitializeActions = shared::runFinalizeActions(AI.Actions);
94 if (!DeinitializeActions)
95 return OnInitialized(DeinitializeActions.takeError());
98 std::lock_guard<std::mutex> Lock(Mutex);
100 // This is the maximum range whose permission have been possibly modified
101 Allocations[MinAddr].Size = MaxAddr - MinAddr;
102 Allocations[MinAddr].DeinitializationActions =
103 std::move(*DeinitializeActions);
104 Reservations[AI.MappingBase.toPtr<void *>()].Allocations.push_back(MinAddr);
107 OnInitialized(MinAddr);
110 void InProcessMemoryMapper::deinitialize(
111 ArrayRef<ExecutorAddr> Bases,
112 MemoryMapper::OnDeinitializedFunction OnDeinitialized) {
113 Error AllErr = Error::success();
116 std::lock_guard<std::mutex> Lock(Mutex);
118 for (auto Base : llvm::reverse(Bases)) {
120 if (Error Err = shared::runDeallocActions(
121 Allocations[Base].DeinitializationActions)) {
122 AllErr = joinErrors(std::move(AllErr), std::move(Err));
125 // Reset protections to read/write so the area can be reused
126 if (auto EC = sys::Memory::protectMappedMemory(
127 {Base.toPtr<void *>(), Allocations[Base].Size},
128 sys::Memory::ProtectionFlags::MF_READ |
129 sys::Memory::ProtectionFlags::MF_WRITE)) {
130 AllErr = joinErrors(std::move(AllErr), errorCodeToError(EC));
133 Allocations.erase(Base);
137 OnDeinitialized(std::move(AllErr));
140 void InProcessMemoryMapper::release(ArrayRef<ExecutorAddr> Bases,
141 OnReleasedFunction OnReleased) {
142 Error Err = Error::success();
144 for (auto Base : Bases) {
145 std::vector<ExecutorAddr> AllocAddrs;
146 size_t Size;
148 std::lock_guard<std::mutex> Lock(Mutex);
149 auto &R = Reservations[Base.toPtr<void *>()];
150 Size = R.Size;
151 AllocAddrs.swap(R.Allocations);
154 // deinitialize sub allocations
155 std::promise<MSVCPError> P;
156 auto F = P.get_future();
157 deinitialize(AllocAddrs, [&](Error Err) { P.set_value(std::move(Err)); });
158 if (Error E = F.get()) {
159 Err = joinErrors(std::move(Err), std::move(E));
162 // free the memory
163 auto MB = sys::MemoryBlock(Base.toPtr<void *>(), Size);
165 auto EC = sys::Memory::releaseMappedMemory(MB);
166 if (EC) {
167 Err = joinErrors(std::move(Err), errorCodeToError(EC));
170 std::lock_guard<std::mutex> Lock(Mutex);
171 Reservations.erase(Base.toPtr<void *>());
174 OnReleased(std::move(Err));
177 InProcessMemoryMapper::~InProcessMemoryMapper() {
178 std::vector<ExecutorAddr> ReservationAddrs;
180 std::lock_guard<std::mutex> Lock(Mutex);
182 ReservationAddrs.reserve(Reservations.size());
183 for (const auto &R : Reservations) {
184 ReservationAddrs.push_back(ExecutorAddr::fromPtr(R.getFirst()));
188 std::promise<MSVCPError> P;
189 auto F = P.get_future();
190 release(ReservationAddrs, [&](Error Err) { P.set_value(std::move(Err)); });
191 cantFail(F.get());
194 // SharedMemoryMapper
196 SharedMemoryMapper::SharedMemoryMapper(ExecutorProcessControl &EPC,
197 SymbolAddrs SAs, size_t PageSize)
198 : EPC(EPC), SAs(SAs), PageSize(PageSize) {
199 #if (!defined(LLVM_ON_UNIX) || defined(__ANDROID__)) && !defined(_WIN32)
200 llvm_unreachable("SharedMemoryMapper is not supported on this platform yet");
201 #endif
204 Expected<std::unique_ptr<SharedMemoryMapper>>
205 SharedMemoryMapper::Create(ExecutorProcessControl &EPC, SymbolAddrs SAs) {
206 #if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
207 auto PageSize = sys::Process::getPageSize();
208 if (!PageSize)
209 return PageSize.takeError();
211 return std::make_unique<SharedMemoryMapper>(EPC, SAs, *PageSize);
212 #else
213 return make_error<StringError>(
214 "SharedMemoryMapper is not supported on this platform yet",
215 inconvertibleErrorCode());
216 #endif
219 void SharedMemoryMapper::reserve(size_t NumBytes,
220 OnReservedFunction OnReserved) {
221 #if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
223 EPC.callSPSWrapperAsync<
224 rt::SPSExecutorSharedMemoryMapperServiceReserveSignature>(
225 SAs.Reserve,
226 [this, NumBytes, OnReserved = std::move(OnReserved)](
227 Error SerializationErr,
228 Expected<std::pair<ExecutorAddr, std::string>> Result) mutable {
229 if (SerializationErr) {
230 cantFail(Result.takeError());
231 return OnReserved(std::move(SerializationErr));
234 if (!Result)
235 return OnReserved(Result.takeError());
237 ExecutorAddr RemoteAddr;
238 std::string SharedMemoryName;
239 std::tie(RemoteAddr, SharedMemoryName) = std::move(*Result);
241 void *LocalAddr = nullptr;
243 #if defined(LLVM_ON_UNIX)
245 #if defined(__MVS__)
246 ArrayRef<uint8_t> Data(
247 reinterpret_cast<const uint8_t *>(SharedMemoryName.c_str()),
248 SharedMemoryName.size());
249 auto HashedName = BLAKE3::hash<sizeof(key_t)>(Data);
250 key_t Key = *reinterpret_cast<key_t *>(HashedName.data());
251 int SharedMemoryId =
252 shmget(Key, NumBytes, IPC_CREAT | __IPC_SHAREAS | 0700);
253 if (SharedMemoryId < 0) {
254 return OnReserved(errorCodeToError(
255 std::error_code(errno, std::generic_category())));
257 LocalAddr = shmat(SharedMemoryId, nullptr, 0);
258 if (LocalAddr == reinterpret_cast<void *>(-1)) {
259 return OnReserved(errorCodeToError(
260 std::error_code(errno, std::generic_category())));
262 #else
263 int SharedMemoryFile = shm_open(SharedMemoryName.c_str(), O_RDWR, 0700);
264 if (SharedMemoryFile < 0) {
265 return OnReserved(errorCodeToError(errnoAsErrorCode()));
268 // this prevents other processes from accessing it by name
269 shm_unlink(SharedMemoryName.c_str());
271 LocalAddr = mmap(nullptr, NumBytes, PROT_READ | PROT_WRITE, MAP_SHARED,
272 SharedMemoryFile, 0);
273 if (LocalAddr == MAP_FAILED) {
274 return OnReserved(errorCodeToError(errnoAsErrorCode()));
277 close(SharedMemoryFile);
278 #endif
280 #elif defined(_WIN32)
282 std::wstring WideSharedMemoryName(SharedMemoryName.begin(),
283 SharedMemoryName.end());
284 HANDLE SharedMemoryFile = OpenFileMappingW(
285 FILE_MAP_ALL_ACCESS, FALSE, WideSharedMemoryName.c_str());
286 if (!SharedMemoryFile)
287 return OnReserved(errorCodeToError(mapWindowsError(GetLastError())));
289 LocalAddr =
290 MapViewOfFile(SharedMemoryFile, FILE_MAP_ALL_ACCESS, 0, 0, 0);
291 if (!LocalAddr) {
292 CloseHandle(SharedMemoryFile);
293 return OnReserved(errorCodeToError(mapWindowsError(GetLastError())));
296 CloseHandle(SharedMemoryFile);
298 #endif
300 std::lock_guard<std::mutex> Lock(Mutex);
301 Reservations.insert({RemoteAddr, {LocalAddr, NumBytes}});
304 OnReserved(ExecutorAddrRange(RemoteAddr, NumBytes));
306 SAs.Instance, static_cast<uint64_t>(NumBytes));
308 #else
309 OnReserved(make_error<StringError>(
310 "SharedMemoryMapper is not supported on this platform yet",
311 inconvertibleErrorCode()));
312 #endif
315 char *SharedMemoryMapper::prepare(ExecutorAddr Addr, size_t ContentSize) {
316 auto R = Reservations.upper_bound(Addr);
317 assert(R != Reservations.begin() && "Attempt to prepare unreserved range");
318 R--;
320 ExecutorAddrDiff Offset = Addr - R->first;
322 return static_cast<char *>(R->second.LocalAddr) + Offset;
325 void SharedMemoryMapper::initialize(MemoryMapper::AllocInfo &AI,
326 OnInitializedFunction OnInitialized) {
327 auto Reservation = Reservations.upper_bound(AI.MappingBase);
328 assert(Reservation != Reservations.begin() && "Attempt to initialize unreserved range");
329 Reservation--;
331 auto AllocationOffset = AI.MappingBase - Reservation->first;
333 tpctypes::SharedMemoryFinalizeRequest FR;
335 AI.Actions.swap(FR.Actions);
337 FR.Segments.reserve(AI.Segments.size());
339 for (auto Segment : AI.Segments) {
340 char *Base = static_cast<char *>(Reservation->second.LocalAddr) +
341 AllocationOffset + Segment.Offset;
342 std::memset(Base + Segment.ContentSize, 0, Segment.ZeroFillSize);
344 tpctypes::SharedMemorySegFinalizeRequest SegReq;
345 SegReq.RAG = {Segment.AG.getMemProt(),
346 Segment.AG.getMemLifetime() == MemLifetime::Finalize};
347 SegReq.Addr = AI.MappingBase + Segment.Offset;
348 SegReq.Size = Segment.ContentSize + Segment.ZeroFillSize;
350 FR.Segments.push_back(SegReq);
353 EPC.callSPSWrapperAsync<
354 rt::SPSExecutorSharedMemoryMapperServiceInitializeSignature>(
355 SAs.Initialize,
356 [OnInitialized = std::move(OnInitialized)](
357 Error SerializationErr, Expected<ExecutorAddr> Result) mutable {
358 if (SerializationErr) {
359 cantFail(Result.takeError());
360 return OnInitialized(std::move(SerializationErr));
363 OnInitialized(std::move(Result));
365 SAs.Instance, Reservation->first, std::move(FR));
368 void SharedMemoryMapper::deinitialize(
369 ArrayRef<ExecutorAddr> Allocations,
370 MemoryMapper::OnDeinitializedFunction OnDeinitialized) {
371 EPC.callSPSWrapperAsync<
372 rt::SPSExecutorSharedMemoryMapperServiceDeinitializeSignature>(
373 SAs.Deinitialize,
374 [OnDeinitialized = std::move(OnDeinitialized)](Error SerializationErr,
375 Error Result) mutable {
376 if (SerializationErr) {
377 cantFail(std::move(Result));
378 return OnDeinitialized(std::move(SerializationErr));
381 OnDeinitialized(std::move(Result));
383 SAs.Instance, Allocations);
386 void SharedMemoryMapper::release(ArrayRef<ExecutorAddr> Bases,
387 OnReleasedFunction OnReleased) {
388 #if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
389 Error Err = Error::success();
392 std::lock_guard<std::mutex> Lock(Mutex);
394 for (auto Base : Bases) {
396 #if defined(LLVM_ON_UNIX)
398 #if defined(__MVS__)
399 if (shmdt(Reservations[Base].LocalAddr) < 0)
400 Err = joinErrors(std::move(Err), errorCodeToError(errnoAsErrorCode()));
401 #else
402 if (munmap(Reservations[Base].LocalAddr, Reservations[Base].Size) != 0)
403 Err = joinErrors(std::move(Err), errorCodeToError(errnoAsErrorCode()));
404 #endif
406 #elif defined(_WIN32)
408 if (!UnmapViewOfFile(Reservations[Base].LocalAddr))
409 Err = joinErrors(std::move(Err),
410 errorCodeToError(mapWindowsError(GetLastError())));
412 #endif
414 Reservations.erase(Base);
418 EPC.callSPSWrapperAsync<
419 rt::SPSExecutorSharedMemoryMapperServiceReleaseSignature>(
420 SAs.Release,
421 [OnReleased = std::move(OnReleased),
422 Err = std::move(Err)](Error SerializationErr, Error Result) mutable {
423 if (SerializationErr) {
424 cantFail(std::move(Result));
425 return OnReleased(
426 joinErrors(std::move(Err), std::move(SerializationErr)));
429 return OnReleased(joinErrors(std::move(Err), std::move(Result)));
431 SAs.Instance, Bases);
432 #else
433 OnReleased(make_error<StringError>(
434 "SharedMemoryMapper is not supported on this platform yet",
435 inconvertibleErrorCode()));
436 #endif
439 SharedMemoryMapper::~SharedMemoryMapper() {
440 std::lock_guard<std::mutex> Lock(Mutex);
441 for (const auto &R : Reservations) {
443 #if defined(LLVM_ON_UNIX) && !defined(__ANDROID__)
445 #if defined(__MVS__)
446 shmdt(R.second.LocalAddr);
447 #else
448 munmap(R.second.LocalAddr, R.second.Size);
449 #endif
451 #elif defined(_WIN32)
453 UnmapViewOfFile(R.second.LocalAddr);
455 #else
457 (void)R;
459 #endif
463 } // namespace orc
465 } // namespace llvm