Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / libc / src / __support / RPC / rpc.h
blob08c1dfd10d6d7f3cc84495c720ef7a5985d1ba03
1 //===-- Shared memory RPC client / server interface -------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a remote procedure call mechanism to communicate between
10 // heterogeneous devices that can share an address space atomically. We provide
11 // a client and a server to facilitate the remote call. The client makes request
12 // to the server using a shared communication channel. We use separate atomic
13 // signals to indicate which side, the client or the server is in ownership of
14 // the buffer.
16 //===----------------------------------------------------------------------===//
18 #ifndef LLVM_LIBC_SRC___SUPPORT_RPC_RPC_H
19 #define LLVM_LIBC_SRC___SUPPORT_RPC_RPC_H
21 #include "rpc_util.h"
22 #include "src/__support/CPP/algorithm.h" // max
23 #include "src/__support/CPP/atomic.h"
24 #include "src/__support/CPP/functional.h"
25 #include "src/__support/CPP/optional.h"
26 #include "src/__support/GPU/utils.h"
28 #include <stdint.h>
30 namespace LIBC_NAMESPACE {
31 namespace rpc {
33 /// A fixed size channel used to communicate between the RPC client and server.
34 struct Buffer {
35 uint64_t data[8];
37 static_assert(sizeof(Buffer) == 64, "Buffer size mismatch");
39 /// The information associated with a packet. This indicates which operations to
40 /// perform and which threads are active in the slots.
41 struct Header {
42 uint64_t mask;
43 uint16_t opcode;
46 /// The data payload for the associated packet. We provide enough space for each
47 /// thread in the cooperating lane to have a buffer.
48 template <uint32_t lane_size = gpu::LANE_SIZE> struct Payload {
49 Buffer slot[lane_size];
52 /// A packet used to share data between the client and server across an entire
53 /// lane. We use a lane as the minimum granularity for execution.
54 template <uint32_t lane_size = gpu::LANE_SIZE> struct alignas(64) Packet {
55 Header header;
56 Payload<lane_size> payload;
59 /// The maximum number of parallel ports that the RPC interface can support.
60 constexpr uint64_t MAX_PORT_COUNT = 512;
62 /// A common process used to synchronize communication between a client and a
63 /// server. The process contains a read-only inbox and a write-only outbox used
64 /// for signaling ownership of the shared buffer between both sides. We assign
65 /// ownership of the buffer to the client if the inbox and outbox bits match,
66 /// otherwise it is owned by the server.
67 ///
68 /// This process is designed to allow the client and the server to exchange data
69 /// using a fixed size packet in a mostly arbitrary order using the 'send' and
70 /// 'recv' operations. The following restrictions to this scheme apply:
71 /// - The client will always start with a 'send' operation.
72 /// - The server will always start with a 'recv' operation.
73 /// - Every 'send' or 'recv' call is mirrored by the other process.
74 template <bool Invert, typename Packet> struct Process {
75 LIBC_INLINE Process() = default;
76 LIBC_INLINE Process(const Process &) = delete;
77 LIBC_INLINE Process &operator=(const Process &) = delete;
78 LIBC_INLINE Process(Process &&) = default;
79 LIBC_INLINE Process &operator=(Process &&) = default;
80 LIBC_INLINE ~Process() = default;
82 uint32_t port_count = 0;
83 cpp::Atomic<uint32_t> *inbox = nullptr;
84 cpp::Atomic<uint32_t> *outbox = nullptr;
85 Packet *packet = nullptr;
87 static constexpr uint64_t NUM_BITS_IN_WORD = sizeof(uint32_t) * 8;
88 cpp::Atomic<uint32_t> lock[MAX_PORT_COUNT / NUM_BITS_IN_WORD] = {0};
90 LIBC_INLINE Process(uint32_t port_count, void *buffer)
91 : port_count(port_count), inbox(reinterpret_cast<cpp::Atomic<uint32_t> *>(
92 advance(buffer, inbox_offset(port_count)))),
93 outbox(reinterpret_cast<cpp::Atomic<uint32_t> *>(
94 advance(buffer, outbox_offset(port_count)))),
95 packet(reinterpret_cast<Packet *>(
96 advance(buffer, buffer_offset(port_count)))) {}
98 /// Allocate a memory buffer sufficient to store the following equivalent
99 /// representation in memory.
101 /// struct Equivalent {
102 /// Atomic<uint32_t> primary[port_count];
103 /// Atomic<uint32_t> secondary[port_count];
104 /// Packet buffer[port_count];
105 /// };
106 LIBC_INLINE static constexpr uint64_t allocation_size(uint32_t port_count) {
107 return buffer_offset(port_count) + buffer_bytes(port_count);
110 /// Retrieve the inbox state from memory shared between processes.
111 LIBC_INLINE uint32_t load_inbox(uint64_t lane_mask, uint32_t index) const {
112 return gpu::broadcast_value(lane_mask,
113 inbox[index].load(cpp::MemoryOrder::RELAXED));
116 /// Retrieve the outbox state from memory shared between processes.
117 LIBC_INLINE uint32_t load_outbox(uint64_t lane_mask, uint32_t index) const {
118 return gpu::broadcast_value(lane_mask,
119 outbox[index].load(cpp::MemoryOrder::RELAXED));
122 /// Signal to the other process that this one is finished with the buffer.
123 /// Equivalent to loading outbox followed by store of the inverted value
124 /// The outbox is write only by this warp and tracking the value locally is
125 /// cheaper than calling load_outbox to get the value to store.
126 LIBC_INLINE uint32_t invert_outbox(uint32_t index, uint32_t current_outbox) {
127 uint32_t inverted_outbox = !current_outbox;
128 atomic_thread_fence(cpp::MemoryOrder::RELEASE);
129 outbox[index].store(inverted_outbox, cpp::MemoryOrder::RELAXED);
130 return inverted_outbox;
133 // Given the current outbox and inbox values, wait until the inbox changes
134 // to indicate that this thread owns the buffer element.
135 LIBC_INLINE void wait_for_ownership(uint64_t lane_mask, uint32_t index,
136 uint32_t outbox, uint32_t in) {
137 while (buffer_unavailable(in, outbox)) {
138 sleep_briefly();
139 in = load_inbox(lane_mask, index);
141 atomic_thread_fence(cpp::MemoryOrder::ACQUIRE);
144 /// Determines if this process needs to wait for ownership of the buffer. We
145 /// invert the condition on one of the processes to indicate that if one
146 /// process owns the buffer then the other does not.
147 LIBC_INLINE static bool buffer_unavailable(uint32_t in, uint32_t out) {
148 bool cond = in != out;
149 return Invert ? !cond : cond;
152 /// Attempt to claim the lock at index. Return true on lock taken.
153 /// lane_mask is a bitmap of the threads in the warp that would hold the
154 /// single lock on success, e.g. the result of gpu::get_lane_mask()
155 /// The lock is held when the n-th bit of the lock bitfield is set.
156 [[clang::convergent]] LIBC_INLINE bool try_lock(uint64_t lane_mask,
157 uint32_t index) {
158 // On amdgpu, test and set to the nth lock bit and a sync_lane would suffice
159 // On volta, need to handle differences between the threads running and
160 // the threads that were detected in the previous call to get_lane_mask()
162 // All threads in lane_mask try to claim the lock. At most one can succeed.
163 // There may be threads active which are not in lane mask which must not
164 // succeed in taking the lock, as otherwise it will leak. This is handled
165 // by making threads which are not in lane_mask or with 0, a no-op.
166 uint32_t id = gpu::get_lane_id();
167 bool id_in_lane_mask = lane_mask & (1ul << id);
169 // All threads in the warp call fetch_or. Possibly at the same time.
170 bool before = set_nth(lock, index, id_in_lane_mask);
171 uint64_t packed = gpu::ballot(lane_mask, before);
173 // If every bit set in lane_mask is also set in packed, every single thread
174 // in the warp failed to get the lock. Ballot returns unset for threads not
175 // in the lane mask.
177 // Cases, per thread:
178 // mask==0 -> unspecified before, discarded by ballot -> 0
179 // mask==1 and before==0 (success), set zero by ballot -> 0
180 // mask==1 and before==1 (failure), set one by ballot -> 1
182 // mask != packed implies at least one of the threads got the lock
183 // atomic semantics of fetch_or mean at most one of the threads for the lock
185 // If holding the lock then the caller can load values knowing said loads
186 // won't move past the lock. No such guarantee is needed if the lock acquire
187 // failed. This conditional branch is expected to fold in the caller after
188 // inlining the current function.
189 bool holding_lock = lane_mask != packed;
190 if (holding_lock)
191 atomic_thread_fence(cpp::MemoryOrder::ACQUIRE);
192 return holding_lock;
195 /// Unlock the lock at index. We need a lane sync to keep this function
196 /// convergent, otherwise the compiler will sink the store and deadlock.
197 [[clang::convergent]] LIBC_INLINE void unlock(uint64_t lane_mask,
198 uint32_t index) {
199 // Do not move any writes past the unlock
200 atomic_thread_fence(cpp::MemoryOrder::RELEASE);
202 // Wait for other threads in the warp to finish using the lock
203 gpu::sync_lane(lane_mask);
205 // Use exactly one thread to clear the nth bit in the lock array Must
206 // restrict to a single thread to avoid one thread dropping the lock, then
207 // an unrelated warp claiming the lock, then a second thread in this warp
208 // dropping the lock again.
209 clear_nth(lock, index, gpu::is_first_lane(lane_mask));
210 gpu::sync_lane(lane_mask);
213 /// Number of bytes to allocate for an inbox or outbox.
214 LIBC_INLINE static constexpr uint64_t mailbox_bytes(uint32_t port_count) {
215 return port_count * sizeof(cpp::Atomic<uint32_t>);
218 /// Number of bytes to allocate for the buffer containing the packets.
219 LIBC_INLINE static constexpr uint64_t buffer_bytes(uint32_t port_count) {
220 return port_count * sizeof(Packet);
223 /// Offset of the inbox in memory. This is the same as the outbox if inverted.
224 LIBC_INLINE static constexpr uint64_t inbox_offset(uint32_t port_count) {
225 return Invert ? mailbox_bytes(port_count) : 0;
228 /// Offset of the outbox in memory. This is the same as the inbox if inverted.
229 LIBC_INLINE static constexpr uint64_t outbox_offset(uint32_t port_count) {
230 return Invert ? 0 : mailbox_bytes(port_count);
233 /// Offset of the buffer containing the packets after the inbox and outbox.
234 LIBC_INLINE static constexpr uint64_t buffer_offset(uint32_t port_count) {
235 return align_up(2 * mailbox_bytes(port_count), alignof(Packet));
238 /// Conditionally set the n-th bit in the atomic bitfield.
239 LIBC_INLINE static constexpr uint32_t set_nth(cpp::Atomic<uint32_t> *bits,
240 uint32_t index, bool cond) {
241 uint32_t slot = index / NUM_BITS_IN_WORD;
242 uint32_t bit = index % NUM_BITS_IN_WORD;
243 return bits[slot].fetch_or(static_cast<uint32_t>(cond) << bit,
244 cpp::MemoryOrder::RELAXED) &
245 (1u << bit);
248 /// Conditionally clear the n-th bit in the atomic bitfield.
249 LIBC_INLINE static constexpr uint32_t clear_nth(cpp::Atomic<uint32_t> *bits,
250 uint32_t index, bool cond) {
251 uint32_t slot = index / NUM_BITS_IN_WORD;
252 uint32_t bit = index % NUM_BITS_IN_WORD;
253 return bits[slot].fetch_and(~0u ^ (static_cast<uint32_t>(cond) << bit),
254 cpp::MemoryOrder::RELAXED) &
255 (1u << bit);
259 /// Invokes a function accross every active buffer across the total lane size.
260 template <uint32_t lane_size>
261 static LIBC_INLINE void invoke_rpc(cpp::function<void(Buffer *)> fn,
262 Packet<lane_size> &packet) {
263 if constexpr (is_process_gpu()) {
264 fn(&packet.payload.slot[gpu::get_lane_id()]);
265 } else {
266 for (uint32_t i = 0; i < lane_size; i += gpu::get_lane_size())
267 if (packet.header.mask & 1ul << i)
268 fn(&packet.payload.slot[i]);
272 /// Alternate version that also provides the index of the current lane.
273 template <uint32_t lane_size>
274 static LIBC_INLINE void invoke_rpc(cpp::function<void(Buffer *, uint32_t)> fn,
275 Packet<lane_size> &packet) {
276 if constexpr (is_process_gpu()) {
277 fn(&packet.payload.slot[gpu::get_lane_id()], gpu::get_lane_id());
278 } else {
279 for (uint32_t i = 0; i < lane_size; i += gpu::get_lane_size())
280 if (packet.header.mask & 1ul << i)
281 fn(&packet.payload.slot[i], i);
285 /// The port provides the interface to communicate between the multiple
286 /// processes. A port is conceptually an index into the memory provided by the
287 /// underlying process that is guarded by a lock bit.
288 template <bool T, typename S> struct Port {
289 LIBC_INLINE Port(Process<T, S> &process, uint64_t lane_mask, uint32_t index,
290 uint32_t out)
291 : process(process), lane_mask(lane_mask), index(index), out(out),
292 receive(false), owns_buffer(true) {}
293 LIBC_INLINE ~Port() = default;
295 private:
296 LIBC_INLINE Port(const Port &) = delete;
297 LIBC_INLINE Port &operator=(const Port &) = delete;
298 LIBC_INLINE Port(Port &&) = default;
299 LIBC_INLINE Port &operator=(Port &&) = default;
301 friend struct Client;
302 template <uint32_t U> friend struct Server;
303 friend class cpp::optional<Port<T, S>>;
305 public:
306 template <typename U> LIBC_INLINE void recv(U use);
307 template <typename F> LIBC_INLINE void send(F fill);
308 template <typename F, typename U>
309 LIBC_INLINE void send_and_recv(F fill, U use);
310 template <typename W> LIBC_INLINE void recv_and_send(W work);
311 LIBC_INLINE void send_n(const void *const *src, uint64_t *size);
312 LIBC_INLINE void send_n(const void *src, uint64_t size);
313 template <typename A>
314 LIBC_INLINE void recv_n(void **dst, uint64_t *size, A &&alloc);
316 LIBC_INLINE uint16_t get_opcode() const {
317 return process.packet[index].header.opcode;
320 LIBC_INLINE uint16_t get_index() const { return index; }
322 LIBC_INLINE void close() {
323 // The server is passive, if it own the buffer when it closes we need to
324 // give ownership back to the client.
325 if (owns_buffer && T)
326 out = process.invert_outbox(index, out);
327 process.unlock(lane_mask, index);
330 private:
331 Process<T, S> &process;
332 uint64_t lane_mask;
333 uint32_t index;
334 uint32_t out;
335 bool receive;
336 bool owns_buffer;
339 /// The RPC client used to make requests to the server.
340 struct Client {
341 LIBC_INLINE Client() = default;
342 LIBC_INLINE Client(const Client &) = delete;
343 LIBC_INLINE Client &operator=(const Client &) = delete;
344 LIBC_INLINE ~Client() = default;
346 LIBC_INLINE Client(uint32_t port_count, void *buffer)
347 : process(port_count, buffer) {}
349 using Port = rpc::Port<false, Packet<gpu::LANE_SIZE>>;
350 template <uint16_t opcode> LIBC_INLINE Port open();
352 private:
353 Process<false, Packet<gpu::LANE_SIZE>> process;
355 static_assert(cpp::is_trivially_copyable<Client>::value &&
356 sizeof(Process<false, Packet<1>>) ==
357 sizeof(Process<false, Packet<32>>),
358 "The client is not trivially copyable from the server");
360 /// The RPC server used to respond to the client.
361 template <uint32_t lane_size> struct Server {
362 LIBC_INLINE Server() = default;
363 LIBC_INLINE Server(const Server &) = delete;
364 LIBC_INLINE Server &operator=(const Server &) = delete;
365 LIBC_INLINE ~Server() = default;
367 LIBC_INLINE Server(uint32_t port_count, void *buffer)
368 : process(port_count, buffer) {}
370 using Port = rpc::Port<true, Packet<lane_size>>;
371 LIBC_INLINE cpp::optional<Port> try_open(uint32_t start = 0);
372 LIBC_INLINE Port open();
374 LIBC_INLINE static uint64_t allocation_size(uint32_t port_count) {
375 return Process<true, Packet<lane_size>>::allocation_size(port_count);
378 private:
379 Process<true, Packet<lane_size>> process;
382 /// Applies \p fill to the shared buffer and initiates a send operation.
383 template <bool T, typename S>
384 template <typename F>
385 LIBC_INLINE void Port<T, S>::send(F fill) {
386 uint32_t in = owns_buffer ? out ^ T : process.load_inbox(lane_mask, index);
388 // We need to wait until we own the buffer before sending.
389 process.wait_for_ownership(lane_mask, index, out, in);
391 // Apply the \p fill function to initialize the buffer and release the memory.
392 invoke_rpc(fill, process.packet[index]);
393 out = process.invert_outbox(index, out);
394 owns_buffer = false;
395 receive = false;
398 /// Applies \p use to the shared buffer and acknowledges the send.
399 template <bool T, typename S>
400 template <typename U>
401 LIBC_INLINE void Port<T, S>::recv(U use) {
402 // We only exchange ownership of the buffer during a receive if we are waiting
403 // for a previous receive to finish.
404 if (receive) {
405 out = process.invert_outbox(index, out);
406 owns_buffer = false;
409 uint32_t in = owns_buffer ? out ^ T : process.load_inbox(lane_mask, index);
411 // We need to wait until we own the buffer before receiving.
412 process.wait_for_ownership(lane_mask, index, out, in);
414 // Apply the \p use function to read the memory out of the buffer.
415 invoke_rpc(use, process.packet[index]);
416 receive = true;
417 owns_buffer = true;
420 /// Combines a send and receive into a single function.
421 template <bool T, typename S>
422 template <typename F, typename U>
423 LIBC_INLINE void Port<T, S>::send_and_recv(F fill, U use) {
424 send(fill);
425 recv(use);
428 /// Combines a receive and send operation into a single function. The \p work
429 /// function modifies the buffer in-place and the send is only used to initiate
430 /// the copy back.
431 template <bool T, typename S>
432 template <typename W>
433 LIBC_INLINE void Port<T, S>::recv_and_send(W work) {
434 recv(work);
435 send([](Buffer *) { /* no-op */ });
438 /// Helper routine to simplify the interface when sending from the GPU using
439 /// thread private pointers to the underlying value.
440 template <bool T, typename S>
441 LIBC_INLINE void Port<T, S>::send_n(const void *src, uint64_t size) {
442 const void **src_ptr = &src;
443 uint64_t *size_ptr = &size;
444 send_n(src_ptr, size_ptr);
447 /// Sends an arbitrarily sized data buffer \p src across the shared channel in
448 /// multiples of the packet length.
449 template <bool T, typename S>
450 LIBC_INLINE void Port<T, S>::send_n(const void *const *src, uint64_t *size) {
451 uint64_t num_sends = 0;
452 send([&](Buffer *buffer, uint32_t id) {
453 reinterpret_cast<uint64_t *>(buffer->data)[0] = lane_value(size, id);
454 num_sends = is_process_gpu() ? lane_value(size, id)
455 : cpp::max(lane_value(size, id), num_sends);
456 uint64_t len =
457 lane_value(size, id) > sizeof(Buffer::data) - sizeof(uint64_t)
458 ? sizeof(Buffer::data) - sizeof(uint64_t)
459 : lane_value(size, id);
460 rpc_memcpy(&buffer->data[1], lane_value(src, id), len);
462 uint64_t idx = sizeof(Buffer::data) - sizeof(uint64_t);
463 uint64_t mask = process.packet[index].header.mask;
464 while (gpu::ballot(mask, idx < num_sends)) {
465 send([=](Buffer *buffer, uint32_t id) {
466 uint64_t len = lane_value(size, id) - idx > sizeof(Buffer::data)
467 ? sizeof(Buffer::data)
468 : lane_value(size, id) - idx;
469 if (idx < lane_value(size, id))
470 rpc_memcpy(buffer->data, advance(lane_value(src, id), idx), len);
472 idx += sizeof(Buffer::data);
476 /// Receives an arbitrarily sized data buffer across the shared channel in
477 /// multiples of the packet length. The \p alloc function is called with the
478 /// size of the data so that we can initialize the size of the \p dst buffer.
479 template <bool T, typename S>
480 template <typename A>
481 LIBC_INLINE void Port<T, S>::recv_n(void **dst, uint64_t *size, A &&alloc) {
482 uint64_t num_recvs = 0;
483 recv([&](Buffer *buffer, uint32_t id) {
484 lane_value(size, id) = reinterpret_cast<uint64_t *>(buffer->data)[0];
485 lane_value(dst, id) =
486 reinterpret_cast<uint8_t *>(alloc(lane_value(size, id)));
487 num_recvs = is_process_gpu() ? lane_value(size, id)
488 : cpp::max(lane_value(size, id), num_recvs);
489 uint64_t len =
490 lane_value(size, id) > sizeof(Buffer::data) - sizeof(uint64_t)
491 ? sizeof(Buffer::data) - sizeof(uint64_t)
492 : lane_value(size, id);
493 rpc_memcpy(lane_value(dst, id), &buffer->data[1], len);
495 uint64_t idx = sizeof(Buffer::data) - sizeof(uint64_t);
496 uint64_t mask = process.packet[index].header.mask;
497 while (gpu::ballot(mask, idx < num_recvs)) {
498 recv([=](Buffer *buffer, uint32_t id) {
499 uint64_t len = lane_value(size, id) - idx > sizeof(Buffer::data)
500 ? sizeof(Buffer::data)
501 : lane_value(size, id) - idx;
502 if (idx < lane_value(size, id))
503 rpc_memcpy(advance(lane_value(dst, id), idx), buffer->data, len);
505 idx += sizeof(Buffer::data);
509 /// Continually attempts to open a port to use as the client. The client can
510 /// only open a port if we find an index that is in a valid sending state. That
511 /// is, there are send operations pending that haven't been serviced on this
512 /// port. Each port instance uses an associated \p opcode to tell the server
513 /// what to do.
514 template <uint16_t opcode> LIBC_INLINE Client::Port Client::open() {
515 // Repeatedly perform a naive linear scan for a port that can be opened to
516 // send data.
517 for (uint32_t index = 0;; ++index) {
518 // Start from the beginning if we run out of ports to check.
519 if (index >= process.port_count)
520 index = 0;
522 // Attempt to acquire the lock on this index.
523 uint64_t lane_mask = gpu::get_lane_mask();
524 if (!process.try_lock(lane_mask, index))
525 continue;
527 uint32_t in = process.load_inbox(lane_mask, index);
528 uint32_t out = process.load_outbox(lane_mask, index);
530 // Once we acquire the index we need to check if we are in a valid sending
531 // state.
532 if (process.buffer_unavailable(in, out)) {
533 process.unlock(lane_mask, index);
534 continue;
537 if (gpu::is_first_lane(lane_mask)) {
538 process.packet[index].header.opcode = opcode;
539 process.packet[index].header.mask = lane_mask;
541 gpu::sync_lane(lane_mask);
542 return Port(process, lane_mask, index, out);
546 /// Attempts to open a port to use as the server. The server can only open a
547 /// port if it has a pending receive operation
548 template <uint32_t lane_size>
549 [[clang::convergent]] LIBC_INLINE
550 cpp::optional<typename Server<lane_size>::Port>
551 Server<lane_size>::try_open(uint32_t start) {
552 // Perform a naive linear scan for a port that has a pending request.
553 for (uint32_t index = start; index < process.port_count; ++index) {
554 uint64_t lane_mask = gpu::get_lane_mask();
555 uint32_t in = process.load_inbox(lane_mask, index);
556 uint32_t out = process.load_outbox(lane_mask, index);
558 // The server is passive, if there is no work pending don't bother
559 // opening a port.
560 if (process.buffer_unavailable(in, out))
561 continue;
563 // Attempt to acquire the lock on this index.
564 if (!process.try_lock(lane_mask, index))
565 continue;
567 in = process.load_inbox(lane_mask, index);
568 out = process.load_outbox(lane_mask, index);
570 if (process.buffer_unavailable(in, out)) {
571 process.unlock(lane_mask, index);
572 continue;
575 return Port(process, lane_mask, index, out);
577 return cpp::nullopt;
580 template <uint32_t lane_size>
581 LIBC_INLINE typename Server<lane_size>::Port Server<lane_size>::open() {
582 for (;;) {
583 if (cpp::optional<Server::Port> p = try_open())
584 return cpp::move(p.value());
585 sleep_briefly();
589 } // namespace rpc
590 } // namespace LIBC_NAMESPACE
592 #endif