Merge tag 'upstream-4.19-rc1' of git://git.infradead.org/linux-ubifs
[linux/fpc-iii.git] / fs / cifs / smbdirect.h
bloba11096254f2965d02478132af55e9ccf6613c578
1 /*
2 * Copyright (C) 2017, Microsoft Corporation.
4 * Author(s): Long Li <longli@microsoft.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
14 * the GNU General Public License for more details.
16 #ifndef _SMBDIRECT_H
17 #define _SMBDIRECT_H
19 #ifdef CONFIG_CIFS_SMB_DIRECT
20 #define cifs_rdma_enabled(server) ((server)->rdma)
22 #include "cifsglob.h"
23 #include <rdma/ib_verbs.h>
24 #include <rdma/rdma_cm.h>
25 #include <linux/mempool.h>
27 extern int rdma_readwrite_threshold;
28 extern int smbd_max_frmr_depth;
29 extern int smbd_keep_alive_interval;
30 extern int smbd_max_receive_size;
31 extern int smbd_max_fragmented_recv_size;
32 extern int smbd_max_send_size;
33 extern int smbd_send_credit_target;
34 extern int smbd_receive_credit_max;
36 enum keep_alive_status {
37 KEEP_ALIVE_NONE,
38 KEEP_ALIVE_PENDING,
39 KEEP_ALIVE_SENT,
42 enum smbd_connection_status {
43 SMBD_CREATED,
44 SMBD_CONNECTING,
45 SMBD_CONNECTED,
46 SMBD_NEGOTIATE_FAILED,
47 SMBD_DISCONNECTING,
48 SMBD_DISCONNECTED,
49 SMBD_DESTROYED
53 * The context for the SMBDirect transport
54 * Everything related to the transport is here. It has several logical parts
55 * 1. RDMA related structures
56 * 2. SMBDirect connection parameters
57 * 3. Memory registrations
58 * 4. Receive and reassembly queues for data receive path
59 * 5. mempools for allocating packets
61 struct smbd_connection {
62 enum smbd_connection_status transport_status;
64 /* RDMA related */
65 struct rdma_cm_id *id;
66 struct ib_qp_init_attr qp_attr;
67 struct ib_pd *pd;
68 struct ib_cq *send_cq, *recv_cq;
69 struct ib_device_attr dev_attr;
70 int ri_rc;
71 struct completion ri_done;
72 wait_queue_head_t conn_wait;
73 wait_queue_head_t wait_destroy;
75 struct completion negotiate_completion;
76 bool negotiate_done;
78 struct work_struct destroy_work;
79 struct work_struct disconnect_work;
80 struct work_struct recv_done_work;
81 struct work_struct post_send_credits_work;
83 spinlock_t lock_new_credits_offered;
84 int new_credits_offered;
86 /* Connection parameters defined in [MS-SMBD] 3.1.1.1 */
87 int receive_credit_max;
88 int send_credit_target;
89 int max_send_size;
90 int max_fragmented_recv_size;
91 int max_fragmented_send_size;
92 int max_receive_size;
93 int keep_alive_interval;
94 int max_readwrite_size;
95 enum keep_alive_status keep_alive_requested;
96 int protocol;
97 atomic_t send_credits;
98 atomic_t receive_credits;
99 int receive_credit_target;
100 int fragment_reassembly_remaining;
102 /* Memory registrations */
103 /* Maximum number of RDMA read/write outstanding on this connection */
104 int responder_resources;
105 /* Maximum number of SGEs in a RDMA write/read */
106 int max_frmr_depth;
108 * If payload is less than or equal to the threshold,
109 * use RDMA send/recv to send upper layer I/O.
110 * If payload is more than the threshold,
111 * use RDMA read/write through memory registration for I/O.
113 int rdma_readwrite_threshold;
114 enum ib_mr_type mr_type;
115 struct list_head mr_list;
116 spinlock_t mr_list_lock;
117 /* The number of available MRs ready for memory registration */
118 atomic_t mr_ready_count;
119 atomic_t mr_used_count;
120 wait_queue_head_t wait_mr;
121 struct work_struct mr_recovery_work;
122 /* Used by transport to wait until all MRs are returned */
123 wait_queue_head_t wait_for_mr_cleanup;
125 /* Activity accoutning */
126 /* Pending reqeusts issued from upper layer */
127 int smbd_send_pending;
128 wait_queue_head_t wait_smbd_send_pending;
130 int smbd_recv_pending;
131 wait_queue_head_t wait_smbd_recv_pending;
133 atomic_t send_pending;
134 wait_queue_head_t wait_send_pending;
135 atomic_t send_payload_pending;
136 wait_queue_head_t wait_send_payload_pending;
138 /* Receive queue */
139 struct list_head receive_queue;
140 int count_receive_queue;
141 spinlock_t receive_queue_lock;
143 struct list_head empty_packet_queue;
144 int count_empty_packet_queue;
145 spinlock_t empty_packet_queue_lock;
147 wait_queue_head_t wait_receive_queues;
149 /* Reassembly queue */
150 struct list_head reassembly_queue;
151 spinlock_t reassembly_queue_lock;
152 wait_queue_head_t wait_reassembly_queue;
154 /* total data length of reassembly queue */
155 int reassembly_data_length;
156 int reassembly_queue_length;
157 /* the offset to first buffer in reassembly queue */
158 int first_entry_offset;
160 bool send_immediate;
162 wait_queue_head_t wait_send_queue;
165 * Indicate if we have received a full packet on the connection
166 * This is used to identify the first SMBD packet of a assembled
167 * payload (SMB packet) in reassembly queue so we can return a
168 * RFC1002 length to upper layer to indicate the length of the SMB
169 * packet received
171 bool full_packet_received;
173 struct workqueue_struct *workqueue;
174 struct delayed_work idle_timer_work;
175 struct delayed_work send_immediate_work;
177 /* Memory pool for preallocating buffers */
178 /* request pool for RDMA send */
179 struct kmem_cache *request_cache;
180 mempool_t *request_mempool;
182 /* response pool for RDMA receive */
183 struct kmem_cache *response_cache;
184 mempool_t *response_mempool;
186 /* for debug purposes */
187 unsigned int count_get_receive_buffer;
188 unsigned int count_put_receive_buffer;
189 unsigned int count_reassembly_queue;
190 unsigned int count_enqueue_reassembly_queue;
191 unsigned int count_dequeue_reassembly_queue;
192 unsigned int count_send_empty;
195 enum smbd_message_type {
196 SMBD_NEGOTIATE_RESP,
197 SMBD_TRANSFER_DATA,
200 #define SMB_DIRECT_RESPONSE_REQUESTED 0x0001
202 /* SMBD negotiation request packet [MS-SMBD] 2.2.1 */
203 struct smbd_negotiate_req {
204 __le16 min_version;
205 __le16 max_version;
206 __le16 reserved;
207 __le16 credits_requested;
208 __le32 preferred_send_size;
209 __le32 max_receive_size;
210 __le32 max_fragmented_size;
211 } __packed;
213 /* SMBD negotiation response packet [MS-SMBD] 2.2.2 */
214 struct smbd_negotiate_resp {
215 __le16 min_version;
216 __le16 max_version;
217 __le16 negotiated_version;
218 __le16 reserved;
219 __le16 credits_requested;
220 __le16 credits_granted;
221 __le32 status;
222 __le32 max_readwrite_size;
223 __le32 preferred_send_size;
224 __le32 max_receive_size;
225 __le32 max_fragmented_size;
226 } __packed;
228 /* SMBD data transfer packet with payload [MS-SMBD] 2.2.3 */
229 struct smbd_data_transfer {
230 __le16 credits_requested;
231 __le16 credits_granted;
232 __le16 flags;
233 __le16 reserved;
234 __le32 remaining_data_length;
235 __le32 data_offset;
236 __le32 data_length;
237 __le32 padding;
238 __u8 buffer[];
239 } __packed;
241 /* The packet fields for a registered RDMA buffer */
242 struct smbd_buffer_descriptor_v1 {
243 __le64 offset;
244 __le32 token;
245 __le32 length;
246 } __packed;
248 /* Default maximum number of SGEs in a RDMA send/recv */
249 #define SMBDIRECT_MAX_SGE 16
250 /* The context for a SMBD request */
251 struct smbd_request {
252 struct smbd_connection *info;
253 struct ib_cqe cqe;
255 /* true if this request carries upper layer payload */
256 bool has_payload;
258 /* the SGE entries for this packet */
259 struct ib_sge sge[SMBDIRECT_MAX_SGE];
260 int num_sge;
262 /* SMBD packet header follows this structure */
263 u8 packet[];
266 /* The context for a SMBD response */
267 struct smbd_response {
268 struct smbd_connection *info;
269 struct ib_cqe cqe;
270 struct ib_sge sge;
272 enum smbd_message_type type;
274 /* Link to receive queue or reassembly queue */
275 struct list_head list;
277 /* Indicate if this is the 1st packet of a payload */
278 bool first_segment;
280 /* SMBD packet header and payload follows this structure */
281 u8 packet[];
284 /* Create a SMBDirect session */
285 struct smbd_connection *smbd_get_connection(
286 struct TCP_Server_Info *server, struct sockaddr *dstaddr);
288 /* Reconnect SMBDirect session */
289 int smbd_reconnect(struct TCP_Server_Info *server);
290 /* Destroy SMBDirect session */
291 void smbd_destroy(struct smbd_connection *info);
293 /* Interface for carrying upper layer I/O through send/recv */
294 int smbd_recv(struct smbd_connection *info, struct msghdr *msg);
295 int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst);
297 enum mr_state {
298 MR_READY,
299 MR_REGISTERED,
300 MR_INVALIDATED,
301 MR_ERROR
304 struct smbd_mr {
305 struct smbd_connection *conn;
306 struct list_head list;
307 enum mr_state state;
308 struct ib_mr *mr;
309 struct scatterlist *sgl;
310 int sgl_count;
311 enum dma_data_direction dir;
312 union {
313 struct ib_reg_wr wr;
314 struct ib_send_wr inv_wr;
316 struct ib_cqe cqe;
317 bool need_invalidate;
318 struct completion invalidate_done;
321 /* Interfaces to register and deregister MR for RDMA read/write */
322 struct smbd_mr *smbd_register_mr(
323 struct smbd_connection *info, struct page *pages[], int num_pages,
324 int offset, int tailsz, bool writing, bool need_invalidate);
325 int smbd_deregister_mr(struct smbd_mr *mr);
327 #else
328 #define cifs_rdma_enabled(server) 0
329 struct smbd_connection {};
330 static inline void *smbd_get_connection(
331 struct TCP_Server_Info *server, struct sockaddr *dstaddr) {return NULL;}
332 static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; }
333 static inline void smbd_destroy(struct smbd_connection *info) {}
334 static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; }
335 static inline int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst) {return -1; }
336 #endif
338 #endif