perf tools: Don't clone maps from parent when synthesizing forks
[linux/fpc-iii.git] / net / rds / rds.h
blob6bfaf05b63b21efddec1b9fd03e80bc196276c3d
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _RDS_RDS_H
3 #define _RDS_RDS_H
5 #include <net/sock.h>
6 #include <linux/scatterlist.h>
7 #include <linux/highmem.h>
8 #include <rdma/rdma_cm.h>
9 #include <linux/mutex.h>
10 #include <linux/rds.h>
11 #include <linux/rhashtable.h>
12 #include <linux/refcount.h>
13 #include <linux/in6.h>
15 #include "info.h"
18 * RDS Network protocol version
20 #define RDS_PROTOCOL_3_0 0x0300
21 #define RDS_PROTOCOL_3_1 0x0301
22 #define RDS_PROTOCOL_VERSION RDS_PROTOCOL_3_1
23 #define RDS_PROTOCOL_MAJOR(v) ((v) >> 8)
24 #define RDS_PROTOCOL_MINOR(v) ((v) & 255)
25 #define RDS_PROTOCOL(maj, min) (((maj) << 8) | min)
27 /* The following ports, 16385, 18634, 18635, are registered with IANA as
28 * the ports to be used for RDS over TCP and UDP. Currently, only RDS over
29 * TCP and RDS over IB/RDMA are implemented. 18634 is the historical value
30 * used for the RDMA_CM listener port. RDS/TCP uses port 16385. After
31 * IPv6 work, RDMA_CM also uses 16385 as the listener port. 18634 is kept
32 * to ensure compatibility with older RDS modules. Those ports are defined
33 * in each transport's header file.
35 #define RDS_PORT 18634
37 #ifdef ATOMIC64_INIT
38 #define KERNEL_HAS_ATOMIC64
39 #endif
41 #ifdef RDS_DEBUG
42 #define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
43 #else
44 /* sigh, pr_debug() causes unused variable warnings */
45 static inline __printf(1, 2)
46 void rdsdebug(char *fmt, ...)
49 #endif
51 /* XXX is there one of these somewhere? */
52 #define ceil(x, y) \
53 ({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
55 #define RDS_FRAG_SHIFT 12
56 #define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
58 /* Used to limit both RDMA and non-RDMA RDS message to 1MB */
59 #define RDS_MAX_MSG_SIZE ((unsigned int)(1 << 20))
61 #define RDS_CONG_MAP_BYTES (65536 / 8)
62 #define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
63 #define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
65 struct rds_cong_map {
66 struct rb_node m_rb_node;
67 struct in6_addr m_addr;
68 wait_queue_head_t m_waitq;
69 struct list_head m_conn_list;
70 unsigned long m_page_addrs[RDS_CONG_MAP_PAGES];
75 * This is how we will track the connection state:
76 * A connection is always in one of the following
77 * states. Updates to the state are atomic and imply
78 * a memory barrier.
80 enum {
81 RDS_CONN_DOWN = 0,
82 RDS_CONN_CONNECTING,
83 RDS_CONN_DISCONNECTING,
84 RDS_CONN_UP,
85 RDS_CONN_RESETTING,
86 RDS_CONN_ERROR,
89 /* Bits for c_flags */
90 #define RDS_LL_SEND_FULL 0
91 #define RDS_RECONNECT_PENDING 1
92 #define RDS_IN_XMIT 2
93 #define RDS_RECV_REFILL 3
94 #define RDS_DESTROY_PENDING 4
96 /* Max number of multipaths per RDS connection. Must be a power of 2 */
97 #define RDS_MPATH_WORKERS 8
98 #define RDS_MPATH_HASH(rs, n) (jhash_1word((rs)->rs_bound_port, \
99 (rs)->rs_hash_initval) & ((n) - 1))
101 #define IS_CANONICAL(laddr, faddr) (htonl(laddr) < htonl(faddr))
103 /* Per mpath connection state */
104 struct rds_conn_path {
105 struct rds_connection *cp_conn;
106 struct rds_message *cp_xmit_rm;
107 unsigned long cp_xmit_sg;
108 unsigned int cp_xmit_hdr_off;
109 unsigned int cp_xmit_data_off;
110 unsigned int cp_xmit_atomic_sent;
111 unsigned int cp_xmit_rdma_sent;
112 unsigned int cp_xmit_data_sent;
114 spinlock_t cp_lock; /* protect msg queues */
115 u64 cp_next_tx_seq;
116 struct list_head cp_send_queue;
117 struct list_head cp_retrans;
119 u64 cp_next_rx_seq;
121 void *cp_transport_data;
123 atomic_t cp_state;
124 unsigned long cp_send_gen;
125 unsigned long cp_flags;
126 unsigned long cp_reconnect_jiffies;
127 struct delayed_work cp_send_w;
128 struct delayed_work cp_recv_w;
129 struct delayed_work cp_conn_w;
130 struct work_struct cp_down_w;
131 struct mutex cp_cm_lock; /* protect cp_state & cm */
132 wait_queue_head_t cp_waitq;
134 unsigned int cp_unacked_packets;
135 unsigned int cp_unacked_bytes;
136 unsigned int cp_index;
139 /* One rds_connection per RDS address pair */
140 struct rds_connection {
141 struct hlist_node c_hash_node;
142 struct in6_addr c_laddr;
143 struct in6_addr c_faddr;
144 int c_dev_if; /* ifindex used for this conn */
145 int c_bound_if; /* ifindex of c_laddr */
146 unsigned int c_loopback:1,
147 c_isv6:1,
148 c_ping_triggered:1,
149 c_pad_to_32:29;
150 int c_npaths;
151 struct rds_connection *c_passive;
152 struct rds_transport *c_trans;
154 struct rds_cong_map *c_lcong;
155 struct rds_cong_map *c_fcong;
157 /* Protocol version */
158 unsigned int c_version;
159 possible_net_t c_net;
161 struct list_head c_map_item;
162 unsigned long c_map_queued;
164 struct rds_conn_path *c_path;
165 wait_queue_head_t c_hs_waitq; /* handshake waitq */
167 u32 c_my_gen_num;
168 u32 c_peer_gen_num;
171 static inline
172 struct net *rds_conn_net(struct rds_connection *conn)
174 return read_pnet(&conn->c_net);
177 static inline
178 void rds_conn_net_set(struct rds_connection *conn, struct net *net)
180 write_pnet(&conn->c_net, net);
183 #define RDS_FLAG_CONG_BITMAP 0x01
184 #define RDS_FLAG_ACK_REQUIRED 0x02
185 #define RDS_FLAG_RETRANSMITTED 0x04
186 #define RDS_MAX_ADV_CREDIT 255
188 /* RDS_FLAG_PROBE_PORT is the reserved sport used for sending a ping
189 * probe to exchange control information before establishing a connection.
190 * Currently the control information that is exchanged is the number of
191 * supported paths. If the peer is a legacy (older kernel revision) peer,
192 * it would return a pong message without additional control information
193 * that would then alert the sender that the peer was an older rev.
195 #define RDS_FLAG_PROBE_PORT 1
196 #define RDS_HS_PROBE(sport, dport) \
197 ((sport == RDS_FLAG_PROBE_PORT && dport == 0) || \
198 (sport == 0 && dport == RDS_FLAG_PROBE_PORT))
200 * Maximum space available for extension headers.
202 #define RDS_HEADER_EXT_SPACE 16
204 struct rds_header {
205 __be64 h_sequence;
206 __be64 h_ack;
207 __be32 h_len;
208 __be16 h_sport;
209 __be16 h_dport;
210 u8 h_flags;
211 u8 h_credit;
212 u8 h_padding[4];
213 __sum16 h_csum;
215 u8 h_exthdr[RDS_HEADER_EXT_SPACE];
219 * Reserved - indicates end of extensions
221 #define RDS_EXTHDR_NONE 0
224 * This extension header is included in the very
225 * first message that is sent on a new connection,
226 * and identifies the protocol level. This will help
227 * rolling updates if a future change requires breaking
228 * the protocol.
229 * NB: This is no longer true for IB, where we do a version
230 * negotiation during the connection setup phase (protocol
231 * version information is included in the RDMA CM private data).
233 #define RDS_EXTHDR_VERSION 1
234 struct rds_ext_header_version {
235 __be32 h_version;
239 * This extension header is included in the RDS message
240 * chasing an RDMA operation.
242 #define RDS_EXTHDR_RDMA 2
243 struct rds_ext_header_rdma {
244 __be32 h_rdma_rkey;
248 * This extension header tells the peer about the
249 * destination <R_Key,offset> of the requested RDMA
250 * operation.
252 #define RDS_EXTHDR_RDMA_DEST 3
253 struct rds_ext_header_rdma_dest {
254 __be32 h_rdma_rkey;
255 __be32 h_rdma_offset;
258 /* Extension header announcing number of paths.
259 * Implicit length = 2 bytes.
261 #define RDS_EXTHDR_NPATHS 5
262 #define RDS_EXTHDR_GEN_NUM 6
264 #define __RDS_EXTHDR_MAX 16 /* for now */
265 #define RDS_RX_MAX_TRACES (RDS_MSG_RX_DGRAM_TRACE_MAX + 1)
266 #define RDS_MSG_RX_HDR 0
267 #define RDS_MSG_RX_START 1
268 #define RDS_MSG_RX_END 2
269 #define RDS_MSG_RX_CMSG 3
271 struct rds_incoming {
272 refcount_t i_refcount;
273 struct list_head i_item;
274 struct rds_connection *i_conn;
275 struct rds_conn_path *i_conn_path;
276 struct rds_header i_hdr;
277 unsigned long i_rx_jiffies;
278 struct in6_addr i_saddr;
280 rds_rdma_cookie_t i_rdma_cookie;
281 ktime_t i_rx_tstamp;
282 u64 i_rx_lat_trace[RDS_RX_MAX_TRACES];
285 struct rds_mr {
286 struct rb_node r_rb_node;
287 refcount_t r_refcount;
288 u32 r_key;
290 /* A copy of the creation flags */
291 unsigned int r_use_once:1;
292 unsigned int r_invalidate:1;
293 unsigned int r_write:1;
295 /* This is for RDS_MR_DEAD.
296 * It would be nice & consistent to make this part of the above
297 * bit field here, but we need to use test_and_set_bit.
299 unsigned long r_state;
300 struct rds_sock *r_sock; /* back pointer to the socket that owns us */
301 struct rds_transport *r_trans;
302 void *r_trans_private;
305 /* Flags for mr->r_state */
306 #define RDS_MR_DEAD 0
308 static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
310 return r_key | (((u64) offset) << 32);
313 static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
315 return cookie;
318 static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
320 return cookie >> 32;
323 /* atomic operation types */
324 #define RDS_ATOMIC_TYPE_CSWP 0
325 #define RDS_ATOMIC_TYPE_FADD 1
328 * m_sock_item and m_conn_item are on lists that are serialized under
329 * conn->c_lock. m_sock_item has additional meaning in that once it is empty
330 * the message will not be put back on the retransmit list after being sent.
331 * messages that are canceled while being sent rely on this.
333 * m_inc is used by loopback so that it can pass an incoming message straight
334 * back up into the rx path. It embeds a wire header which is also used by
335 * the send path, which is kind of awkward.
337 * m_sock_item indicates the message's presence on a socket's send or receive
338 * queue. m_rs will point to that socket.
340 * m_daddr is used by cancellation to prune messages to a given destination.
342 * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
343 * nesting. As paths iterate over messages on a sock, or conn, they must
344 * also lock the conn, or sock, to remove the message from those lists too.
345 * Testing the flag to determine if the message is still on the lists lets
346 * us avoid testing the list_head directly. That means each path can use
347 * the message's list_head to keep it on a local list while juggling locks
348 * without confusing the other path.
350 * m_ack_seq is an optional field set by transports who need a different
351 * sequence number range to invalidate. They can use this in a callback
352 * that they pass to rds_send_drop_acked() to see if each message has been
353 * acked. The HAS_ACK_SEQ flag can be used to detect messages which haven't
354 * had ack_seq set yet.
356 #define RDS_MSG_ON_SOCK 1
357 #define RDS_MSG_ON_CONN 2
358 #define RDS_MSG_HAS_ACK_SEQ 3
359 #define RDS_MSG_ACK_REQUIRED 4
360 #define RDS_MSG_RETRANSMITTED 5
361 #define RDS_MSG_MAPPED 6
362 #define RDS_MSG_PAGEVEC 7
363 #define RDS_MSG_FLUSH 8
365 struct rds_znotifier {
366 struct mmpin z_mmp;
367 u32 z_cookie;
370 struct rds_msg_zcopy_info {
371 struct list_head rs_zcookie_next;
372 union {
373 struct rds_znotifier znotif;
374 struct rds_zcopy_cookies zcookies;
378 struct rds_msg_zcopy_queue {
379 struct list_head zcookie_head;
380 spinlock_t lock; /* protects zcookie_head queue */
383 static inline void rds_message_zcopy_queue_init(struct rds_msg_zcopy_queue *q)
385 spin_lock_init(&q->lock);
386 INIT_LIST_HEAD(&q->zcookie_head);
389 struct rds_message {
390 refcount_t m_refcount;
391 struct list_head m_sock_item;
392 struct list_head m_conn_item;
393 struct rds_incoming m_inc;
394 u64 m_ack_seq;
395 struct in6_addr m_daddr;
396 unsigned long m_flags;
398 /* Never access m_rs without holding m_rs_lock.
399 * Lock nesting is
400 * rm->m_rs_lock
401 * -> rs->rs_lock
403 spinlock_t m_rs_lock;
404 wait_queue_head_t m_flush_wait;
406 struct rds_sock *m_rs;
408 /* cookie to send to remote, in rds header */
409 rds_rdma_cookie_t m_rdma_cookie;
411 unsigned int m_used_sgs;
412 unsigned int m_total_sgs;
414 void *m_final_op;
416 struct {
417 struct rm_atomic_op {
418 int op_type;
419 union {
420 struct {
421 uint64_t compare;
422 uint64_t swap;
423 uint64_t compare_mask;
424 uint64_t swap_mask;
425 } op_m_cswp;
426 struct {
427 uint64_t add;
428 uint64_t nocarry_mask;
429 } op_m_fadd;
432 u32 op_rkey;
433 u64 op_remote_addr;
434 unsigned int op_notify:1;
435 unsigned int op_recverr:1;
436 unsigned int op_mapped:1;
437 unsigned int op_silent:1;
438 unsigned int op_active:1;
439 struct scatterlist *op_sg;
440 struct rds_notifier *op_notifier;
442 struct rds_mr *op_rdma_mr;
443 } atomic;
444 struct rm_rdma_op {
445 u32 op_rkey;
446 u64 op_remote_addr;
447 unsigned int op_write:1;
448 unsigned int op_fence:1;
449 unsigned int op_notify:1;
450 unsigned int op_recverr:1;
451 unsigned int op_mapped:1;
452 unsigned int op_silent:1;
453 unsigned int op_active:1;
454 unsigned int op_bytes;
455 unsigned int op_nents;
456 unsigned int op_count;
457 struct scatterlist *op_sg;
458 struct rds_notifier *op_notifier;
460 struct rds_mr *op_rdma_mr;
461 } rdma;
462 struct rm_data_op {
463 unsigned int op_active:1;
464 unsigned int op_notify:1;
465 unsigned int op_nents;
466 unsigned int op_count;
467 unsigned int op_dmasg;
468 unsigned int op_dmaoff;
469 struct rds_znotifier *op_mmp_znotifier;
470 struct scatterlist *op_sg;
471 } data;
474 struct rds_conn_path *m_conn_path;
478 * The RDS notifier is used (optionally) to tell the application about
479 * completed RDMA operations. Rather than keeping the whole rds message
480 * around on the queue, we allocate a small notifier that is put on the
481 * socket's notifier_list. Notifications are delivered to the application
482 * through control messages.
484 struct rds_notifier {
485 struct list_head n_list;
486 uint64_t n_user_token;
487 int n_status;
490 /* Available as part of RDS core, so doesn't need to participate
491 * in get_preferred transport etc
493 #define RDS_TRANS_LOOP 3
496 * struct rds_transport - transport specific behavioural hooks
498 * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
499 * part of a message. The caller serializes on the send_sem so this
500 * doesn't need to be reentrant for a given conn. The header must be
501 * sent before the data payload. .xmit must be prepared to send a
502 * message with no data payload. .xmit should return the number of
503 * bytes that were sent down the connection, including header bytes.
504 * Returning 0 tells the caller that it doesn't need to perform any
505 * additional work now. This is usually the case when the transport has
506 * filled the sending queue for its connection and will handle
507 * triggering the rds thread to continue the send when space becomes
508 * available. Returning -EAGAIN tells the caller to retry the send
509 * immediately. Returning -ENOMEM tells the caller to retry the send at
510 * some point in the future.
512 * @conn_shutdown: conn_shutdown stops traffic on the given connection. Once
513 * it returns the connection can not call rds_recv_incoming().
514 * This will only be called once after conn_connect returns
515 * non-zero success and will The caller serializes this with
516 * the send and connecting paths (xmit_* and conn_*). The
517 * transport is responsible for other serialization, including
518 * rds_recv_incoming(). This is called in process context but
519 * should try hard not to block.
522 struct rds_transport {
523 char t_name[TRANSNAMSIZ];
524 struct list_head t_item;
525 struct module *t_owner;
526 unsigned int t_prefer_loopback:1,
527 t_mp_capable:1;
528 unsigned int t_type;
530 int (*laddr_check)(struct net *net, const struct in6_addr *addr,
531 __u32 scope_id);
532 int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
533 void (*conn_free)(void *data);
534 int (*conn_path_connect)(struct rds_conn_path *cp);
535 void (*conn_path_shutdown)(struct rds_conn_path *conn);
536 void (*xmit_path_prepare)(struct rds_conn_path *cp);
537 void (*xmit_path_complete)(struct rds_conn_path *cp);
538 int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
539 unsigned int hdr_off, unsigned int sg, unsigned int off);
540 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
541 int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
542 int (*recv_path)(struct rds_conn_path *cp);
543 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
544 void (*inc_free)(struct rds_incoming *inc);
546 int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
547 struct rdma_cm_event *event, bool isv6);
548 int (*cm_initiate_connect)(struct rdma_cm_id *cm_id, bool isv6);
549 void (*cm_connect_complete)(struct rds_connection *conn,
550 struct rdma_cm_event *event);
552 unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
553 unsigned int avail);
554 void (*exit)(void);
555 void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
556 struct rds_sock *rs, u32 *key_ret,
557 struct rds_connection *conn);
558 void (*sync_mr)(void *trans_private, int direction);
559 void (*free_mr)(void *trans_private, int invalidate);
560 void (*flush_mrs)(void);
561 bool (*t_unloading)(struct rds_connection *conn);
564 /* Bind hash table key length. It is the sum of the size of a struct
565 * in6_addr, a scope_id and a port.
567 #define RDS_BOUND_KEY_LEN \
568 (sizeof(struct in6_addr) + sizeof(__u32) + sizeof(__be16))
570 struct rds_sock {
571 struct sock rs_sk;
573 u64 rs_user_addr;
574 u64 rs_user_bytes;
577 * bound_addr used for both incoming and outgoing, no INADDR_ANY
578 * support.
580 struct rhash_head rs_bound_node;
581 u8 rs_bound_key[RDS_BOUND_KEY_LEN];
582 struct sockaddr_in6 rs_bound_sin6;
583 #define rs_bound_addr rs_bound_sin6.sin6_addr
584 #define rs_bound_addr_v4 rs_bound_sin6.sin6_addr.s6_addr32[3]
585 #define rs_bound_port rs_bound_sin6.sin6_port
586 #define rs_bound_scope_id rs_bound_sin6.sin6_scope_id
587 struct in6_addr rs_conn_addr;
588 #define rs_conn_addr_v4 rs_conn_addr.s6_addr32[3]
589 __be16 rs_conn_port;
590 struct rds_transport *rs_transport;
593 * rds_sendmsg caches the conn it used the last time around.
594 * This helps avoid costly lookups.
596 struct rds_connection *rs_conn;
598 /* flag indicating we were congested or not */
599 int rs_congested;
600 /* seen congestion (ENOBUFS) when sending? */
601 int rs_seen_congestion;
603 /* rs_lock protects all these adjacent members before the newline */
604 spinlock_t rs_lock;
605 struct list_head rs_send_queue;
606 u32 rs_snd_bytes;
607 int rs_rcv_bytes;
608 struct list_head rs_notify_queue; /* currently used for failed RDMAs */
610 /* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
611 * to decide whether the application should be woken up.
612 * If not set, we use rs_cong_track to find out whether a cong map
613 * update arrived.
615 uint64_t rs_cong_mask;
616 uint64_t rs_cong_notify;
617 struct list_head rs_cong_list;
618 unsigned long rs_cong_track;
621 * rs_recv_lock protects the receive queue, and is
622 * used to serialize with rds_release.
624 rwlock_t rs_recv_lock;
625 struct list_head rs_recv_queue;
627 /* just for stats reporting */
628 struct list_head rs_item;
630 /* these have their own lock */
631 spinlock_t rs_rdma_lock;
632 struct rb_root rs_rdma_keys;
634 /* Socket options - in case there will be more */
635 unsigned char rs_recverr,
636 rs_cong_monitor;
637 u32 rs_hash_initval;
639 /* Socket receive path trace points*/
640 u8 rs_rx_traces;
641 u8 rs_rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX];
642 struct rds_msg_zcopy_queue rs_zcookie_queue;
645 static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
647 return container_of(sk, struct rds_sock, rs_sk);
649 static inline struct sock *rds_rs_to_sk(struct rds_sock *rs)
651 return &rs->rs_sk;
655 * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
656 * to account for overhead. We don't account for overhead, we just apply
657 * the number of payload bytes to the specified value.
659 static inline int rds_sk_sndbuf(struct rds_sock *rs)
661 return rds_rs_to_sk(rs)->sk_sndbuf / 2;
663 static inline int rds_sk_rcvbuf(struct rds_sock *rs)
665 return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
668 struct rds_statistics {
669 uint64_t s_conn_reset;
670 uint64_t s_recv_drop_bad_checksum;
671 uint64_t s_recv_drop_old_seq;
672 uint64_t s_recv_drop_no_sock;
673 uint64_t s_recv_drop_dead_sock;
674 uint64_t s_recv_deliver_raced;
675 uint64_t s_recv_delivered;
676 uint64_t s_recv_queued;
677 uint64_t s_recv_immediate_retry;
678 uint64_t s_recv_delayed_retry;
679 uint64_t s_recv_ack_required;
680 uint64_t s_recv_rdma_bytes;
681 uint64_t s_recv_ping;
682 uint64_t s_send_queue_empty;
683 uint64_t s_send_queue_full;
684 uint64_t s_send_lock_contention;
685 uint64_t s_send_lock_queue_raced;
686 uint64_t s_send_immediate_retry;
687 uint64_t s_send_delayed_retry;
688 uint64_t s_send_drop_acked;
689 uint64_t s_send_ack_required;
690 uint64_t s_send_queued;
691 uint64_t s_send_rdma;
692 uint64_t s_send_rdma_bytes;
693 uint64_t s_send_pong;
694 uint64_t s_page_remainder_hit;
695 uint64_t s_page_remainder_miss;
696 uint64_t s_copy_to_user;
697 uint64_t s_copy_from_user;
698 uint64_t s_cong_update_queued;
699 uint64_t s_cong_update_received;
700 uint64_t s_cong_send_error;
701 uint64_t s_cong_send_blocked;
702 uint64_t s_recv_bytes_added_to_socket;
703 uint64_t s_recv_bytes_removed_from_socket;
707 /* af_rds.c */
708 void rds_sock_addref(struct rds_sock *rs);
709 void rds_sock_put(struct rds_sock *rs);
710 void rds_wake_sk_sleep(struct rds_sock *rs);
711 static inline void __rds_wake_sk_sleep(struct sock *sk)
713 wait_queue_head_t *waitq = sk_sleep(sk);
715 if (!sock_flag(sk, SOCK_DEAD) && waitq)
716 wake_up(waitq);
718 extern wait_queue_head_t rds_poll_waitq;
721 /* bind.c */
722 int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
723 void rds_remove_bound(struct rds_sock *rs);
724 struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
725 __u32 scope_id);
726 int rds_bind_lock_init(void);
727 void rds_bind_lock_destroy(void);
729 /* cong.c */
730 int rds_cong_get_maps(struct rds_connection *conn);
731 void rds_cong_add_conn(struct rds_connection *conn);
732 void rds_cong_remove_conn(struct rds_connection *conn);
733 void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
734 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
735 int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
736 void rds_cong_queue_updates(struct rds_cong_map *map);
737 void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
738 int rds_cong_updated_since(unsigned long *recent);
739 void rds_cong_add_socket(struct rds_sock *);
740 void rds_cong_remove_socket(struct rds_sock *);
741 void rds_cong_exit(void);
742 struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
744 /* connection.c */
745 extern u32 rds_gen_num;
746 int rds_conn_init(void);
747 void rds_conn_exit(void);
748 struct rds_connection *rds_conn_create(struct net *net,
749 const struct in6_addr *laddr,
750 const struct in6_addr *faddr,
751 struct rds_transport *trans, gfp_t gfp,
752 int dev_if);
753 struct rds_connection *rds_conn_create_outgoing(struct net *net,
754 const struct in6_addr *laddr,
755 const struct in6_addr *faddr,
756 struct rds_transport *trans,
757 gfp_t gfp, int dev_if);
758 void rds_conn_shutdown(struct rds_conn_path *cpath);
759 void rds_conn_destroy(struct rds_connection *conn);
760 void rds_conn_drop(struct rds_connection *conn);
761 void rds_conn_path_drop(struct rds_conn_path *cpath, bool destroy);
762 void rds_conn_connect_if_down(struct rds_connection *conn);
763 void rds_conn_path_connect_if_down(struct rds_conn_path *cp);
764 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
765 struct rds_info_iterator *iter,
766 struct rds_info_lengths *lens,
767 int (*visitor)(struct rds_connection *, void *),
768 u64 *buffer,
769 size_t item_len);
771 __printf(2, 3)
772 void __rds_conn_path_error(struct rds_conn_path *cp, const char *, ...);
773 #define rds_conn_path_error(cp, fmt...) \
774 __rds_conn_path_error(cp, KERN_WARNING "RDS: " fmt)
776 static inline int
777 rds_conn_path_transition(struct rds_conn_path *cp, int old, int new)
779 return atomic_cmpxchg(&cp->cp_state, old, new) == old;
782 static inline int
783 rds_conn_transition(struct rds_connection *conn, int old, int new)
785 WARN_ON(conn->c_trans->t_mp_capable);
786 return rds_conn_path_transition(&conn->c_path[0], old, new);
789 static inline int
790 rds_conn_path_state(struct rds_conn_path *cp)
792 return atomic_read(&cp->cp_state);
795 static inline int
796 rds_conn_state(struct rds_connection *conn)
798 WARN_ON(conn->c_trans->t_mp_capable);
799 return rds_conn_path_state(&conn->c_path[0]);
802 static inline int
803 rds_conn_path_up(struct rds_conn_path *cp)
805 return atomic_read(&cp->cp_state) == RDS_CONN_UP;
808 static inline int
809 rds_conn_up(struct rds_connection *conn)
811 WARN_ON(conn->c_trans->t_mp_capable);
812 return rds_conn_path_up(&conn->c_path[0]);
815 static inline int
816 rds_conn_path_connecting(struct rds_conn_path *cp)
818 return atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING;
821 static inline int
822 rds_conn_connecting(struct rds_connection *conn)
824 WARN_ON(conn->c_trans->t_mp_capable);
825 return rds_conn_path_connecting(&conn->c_path[0]);
828 /* message.c */
829 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
830 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
831 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from,
832 bool zcopy);
833 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
834 void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
835 __be16 dport, u64 seq);
836 int rds_message_add_extension(struct rds_header *hdr,
837 unsigned int type, const void *data, unsigned int len);
838 int rds_message_next_extension(struct rds_header *hdr,
839 unsigned int *pos, void *buf, unsigned int *buflen);
840 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
841 int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
842 void rds_message_inc_free(struct rds_incoming *inc);
843 void rds_message_addref(struct rds_message *rm);
844 void rds_message_put(struct rds_message *rm);
845 void rds_message_wait(struct rds_message *rm);
846 void rds_message_unmapped(struct rds_message *rm);
847 void rds_notify_msg_zcopy_purge(struct rds_msg_zcopy_queue *info);
849 static inline void rds_message_make_checksum(struct rds_header *hdr)
851 hdr->h_csum = 0;
852 hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2);
855 static inline int rds_message_verify_checksum(const struct rds_header *hdr)
857 return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0;
861 /* page.c */
862 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
863 gfp_t gfp);
864 void rds_page_exit(void);
866 /* recv.c */
867 void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
868 struct in6_addr *saddr);
869 void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *conn,
870 struct in6_addr *saddr);
871 void rds_inc_put(struct rds_incoming *inc);
872 void rds_recv_incoming(struct rds_connection *conn, struct in6_addr *saddr,
873 struct in6_addr *daddr,
874 struct rds_incoming *inc, gfp_t gfp);
875 int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
876 int msg_flags);
877 void rds_clear_recv_queue(struct rds_sock *rs);
878 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
879 void rds_inc_info_copy(struct rds_incoming *inc,
880 struct rds_info_iterator *iter,
881 __be32 saddr, __be32 daddr, int flip);
882 void rds6_inc_info_copy(struct rds_incoming *inc,
883 struct rds_info_iterator *iter,
884 struct in6_addr *saddr, struct in6_addr *daddr,
885 int flip);
887 /* send.c */
888 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
889 void rds_send_path_reset(struct rds_conn_path *conn);
890 int rds_send_xmit(struct rds_conn_path *cp);
891 struct sockaddr_in;
892 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in6 *dest);
893 typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
894 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
895 is_acked_func is_acked);
896 void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
897 is_acked_func is_acked);
898 void rds_send_ping(struct rds_connection *conn, int cp_index);
899 int rds_send_pong(struct rds_conn_path *cp, __be16 dport);
901 /* rdma.c */
902 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
903 int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
904 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
905 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
906 void rds_rdma_drop_keys(struct rds_sock *rs);
907 int rds_rdma_extra_size(struct rds_rdma_args *args);
908 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
909 struct cmsghdr *cmsg);
910 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
911 struct cmsghdr *cmsg);
912 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
913 struct cmsghdr *cmsg);
914 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
915 struct cmsghdr *cmsg);
916 void rds_rdma_free_op(struct rm_rdma_op *ro);
917 void rds_atomic_free_op(struct rm_atomic_op *ao);
918 void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
919 void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
920 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
921 struct cmsghdr *cmsg);
923 void __rds_put_mr_final(struct rds_mr *mr);
924 static inline void rds_mr_put(struct rds_mr *mr)
926 if (refcount_dec_and_test(&mr->r_refcount))
927 __rds_put_mr_final(mr);
930 static inline bool rds_destroy_pending(struct rds_connection *conn)
932 return !check_net(rds_conn_net(conn)) ||
933 (conn->c_trans->t_unloading && conn->c_trans->t_unloading(conn));
936 /* stats.c */
937 DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
938 #define rds_stats_inc_which(which, member) do { \
939 per_cpu(which, get_cpu()).member++; \
940 put_cpu(); \
941 } while (0)
942 #define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
943 #define rds_stats_add_which(which, member, count) do { \
944 per_cpu(which, get_cpu()).member += count; \
945 put_cpu(); \
946 } while (0)
947 #define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
948 int rds_stats_init(void);
949 void rds_stats_exit(void);
950 void rds_stats_info_copy(struct rds_info_iterator *iter,
951 uint64_t *values, const char *const *names,
952 size_t nr);
954 /* sysctl.c */
955 int rds_sysctl_init(void);
956 void rds_sysctl_exit(void);
957 extern unsigned long rds_sysctl_sndbuf_min;
958 extern unsigned long rds_sysctl_sndbuf_default;
959 extern unsigned long rds_sysctl_sndbuf_max;
960 extern unsigned long rds_sysctl_reconnect_min_jiffies;
961 extern unsigned long rds_sysctl_reconnect_max_jiffies;
962 extern unsigned int rds_sysctl_max_unacked_packets;
963 extern unsigned int rds_sysctl_max_unacked_bytes;
964 extern unsigned int rds_sysctl_ping_enable;
965 extern unsigned long rds_sysctl_trace_flags;
966 extern unsigned int rds_sysctl_trace_level;
968 /* threads.c */
969 int rds_threads_init(void);
970 void rds_threads_exit(void);
971 extern struct workqueue_struct *rds_wq;
972 void rds_queue_reconnect(struct rds_conn_path *cp);
973 void rds_connect_worker(struct work_struct *);
974 void rds_shutdown_worker(struct work_struct *);
975 void rds_send_worker(struct work_struct *);
976 void rds_recv_worker(struct work_struct *);
977 void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
978 void rds_connect_complete(struct rds_connection *conn);
979 int rds_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2);
981 /* transport.c */
982 void rds_trans_register(struct rds_transport *trans);
983 void rds_trans_unregister(struct rds_transport *trans);
984 struct rds_transport *rds_trans_get_preferred(struct net *net,
985 const struct in6_addr *addr,
986 __u32 scope_id);
987 void rds_trans_put(struct rds_transport *trans);
988 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
989 unsigned int avail);
990 struct rds_transport *rds_trans_get(int t_type);
991 int rds_trans_init(void);
992 void rds_trans_exit(void);
994 #endif