1 /* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
3 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4 /* Copyright (c) 2008-2019, IBM Corporation */
9 #include <rdma/ib_verbs.h>
10 #include <rdma/restrack.h>
11 #include <linux/socket.h>
12 #include <linux/skbuff.h>
13 #include <crypto/hash.h>
14 #include <linux/crc32.h>
15 #include <linux/crc32c.h>
17 #include <rdma/siw-abi.h>
20 #define SIW_VENDOR_ID 0x626d74 /* ascii 'bmt' for now */
21 #define SIW_VENDORT_PART_ID 0
22 #define SIW_MAX_QP (1024 * 100)
23 #define SIW_MAX_QP_WR (1024 * 32)
24 #define SIW_MAX_ORD_QP 128
25 #define SIW_MAX_IRD_QP 128
26 #define SIW_MAX_SGE_PBL 256 /* max num sge's for PBL */
27 #define SIW_MAX_SGE_RD 1 /* iwarp limitation. we could relax */
28 #define SIW_MAX_CQ (1024 * 100)
29 #define SIW_MAX_CQE (SIW_MAX_QP_WR * 100)
30 #define SIW_MAX_MR (SIW_MAX_QP * 10)
31 #define SIW_MAX_PD SIW_MAX_QP
32 #define SIW_MAX_MW 0 /* to be set if MW's are supported */
33 #define SIW_MAX_SRQ SIW_MAX_QP
34 #define SIW_MAX_SRQ_WR (SIW_MAX_QP_WR * 10)
35 #define SIW_MAX_CONTEXT SIW_MAX_PD
37 /* Min number of bytes for using zero copy transmit */
38 #define SENDPAGE_THRESH PAGE_SIZE
40 /* Maximum number of frames which can be send in one SQ processing */
41 #define SQ_USER_MAXBURST 100
43 /* Maximum number of consecutive IRQ elements which get served
44 * if SQ has pending work. Prevents starving local SQ processing
45 * by serving peer Read Requests.
47 #define SIW_IRQ_MAXBURST_SQ_ACTIVE 4
52 int max_ord
; /* max. outbound read queue depth */
53 int max_ird
; /* max. inbound read queue depth */
71 struct ib_device base_dev
;
72 struct net_device
*netdev
;
73 struct siw_dev_cap attrs
;
78 /* physical port state (only one port per device) */
79 enum ib_port_state state
;
86 struct list_head cep_list
;
87 struct list_head qp_list
;
89 /* active objects statistics to enforce limits */
97 struct work_struct netdev_down
;
100 struct siw_ucontext
{
101 struct ib_ucontext base_ucontext
;
102 struct siw_device
*sdev
;
106 * The RDMA core does not define LOCAL_READ access, which is always
107 * enabled implictely.
109 #define IWARP_ACCESS_MASK \
110 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | \
111 IB_ACCESS_REMOTE_READ)
114 * siw presentation of user memory registered as source
115 * or target of RDMA operations.
118 struct siw_page_chunk
{
123 struct siw_page_chunk
*page_chunk
;
126 u64 fp_addr
; /* First page base address */
127 struct mm_struct
*owning_mm
;
131 dma_addr_t addr
; /* Address of assigned buffer */
132 unsigned int size
; /* Size of this entry */
133 unsigned long pbl_off
; /* Total offset from start of PBL */
137 unsigned int num_buf
;
138 unsigned int max_buf
;
139 struct siw_pble pbe
[];
143 * Generic memory representation for registered siw memory.
144 * Memory lookup always via higher 24 bit of STag (STag index).
147 struct siw_device
*sdev
;
149 u64 va
; /* VA of memory */
150 u64 len
; /* length of the memory buffer in bytes */
151 u32 stag
; /* iWarp memory access steering tag */
152 u8 stag_valid
; /* VALID or INVALID */
153 u8 is_pbl
; /* PBL or user space mem */
154 u8 is_mw
; /* Memory Region or Memory Window */
155 enum ib_access_flags perms
; /* local/remote READ & WRITE */
157 struct siw_umem
*umem
;
165 struct ib_mr base_mr
;
171 * Error codes for local or remote
172 * access to registered memory
174 enum siw_access_state
{
184 SIW_WR_QUEUED
, /* processing has not started yet */
185 SIW_WR_INPROGRESS
/* initiated processing of the WR */
188 /* The WQE currently being processed (RX or TX) */
190 /* Copy of applications SQE or RQE */
195 struct siw_mem
*mem
[SIW_MAX_SGE
]; /* per sge's resolved mem */
196 enum siw_wr_state wr_status
;
197 enum siw_wc_status wc_status
;
198 u32 bytes
; /* total bytes to process */
199 u32 processed
; /* bytes processed */
203 struct ib_cq base_cq
;
205 struct siw_cq_ctrl
*notify
;
206 struct siw_cqe
*queue
;
210 struct rdma_user_mmap_entry
*cq_entry
; /* mmap info for CQE array */
211 u32 id
; /* For debugging only */
218 SIW_QP_STATE_CLOSING
,
219 SIW_QP_STATE_TERMINATE
,
225 SIW_RDMA_BIND_ENABLED
= (1 << 0),
226 SIW_RDMA_WRITE_ENABLED
= (1 << 1),
227 SIW_RDMA_READ_ENABLED
= (1 << 2),
228 SIW_SIGNAL_ALL_WR
= (1 << 3),
229 SIW_MPA_CRC
= (1 << 4),
230 SIW_QP_IN_DESTROY
= (1 << 5)
233 enum siw_qp_attr_mask
{
234 SIW_QP_ATTR_STATE
= (1 << 0),
235 SIW_QP_ATTR_ACCESS_FLAGS
= (1 << 1),
236 SIW_QP_ATTR_LLP_HANDLE
= (1 << 2),
237 SIW_QP_ATTR_ORD
= (1 << 3),
238 SIW_QP_ATTR_IRD
= (1 << 4),
239 SIW_QP_ATTR_SQ_SIZE
= (1 << 5),
240 SIW_QP_ATTR_RQ_SIZE
= (1 << 6),
241 SIW_QP_ATTR_MPA
= (1 << 7)
245 struct ib_srq base_srq
;
248 u32 limit
; /* low watermark for async event */
249 struct siw_rqe
*recvq
;
252 u32 num_rqe
; /* max # of wqe's allowed */
253 struct rdma_user_mmap_entry
*srq_entry
; /* mmap info for SRQ array */
254 bool armed
:1; /* inform user if limit hit */
255 bool is_kernel_res
:1; /* true if kernel client */
258 struct siw_qp_attrs
{
259 enum siw_qp_state state
;
266 enum siw_qp_flags flags
;
272 SIW_SEND_HDR
, /* start or continue sending HDR */
273 SIW_SEND_DATA
, /* start or continue sending DDP payload */
274 SIW_SEND_TRAILER
, /* start or continue sending TRAILER */
275 SIW_SEND_SHORT_FPDU
/* send whole FPDU hdr|data|trailer at once */
279 SIW_GET_HDR
, /* await new hdr or within hdr */
280 SIW_GET_DATA_START
, /* start of inbound DDP payload */
281 SIW_GET_DATA_MORE
, /* continuation of (misaligned) DDP payload */
282 SIW_GET_TRAILER
/* await new trailer or within trailer */
285 struct siw_rx_stream
{
287 int skb_new
; /* pending unread bytes in skb */
288 int skb_offset
; /* offset in skb */
289 int skb_copied
; /* processed bytes in skb */
292 struct mpa_trailer trailer
;
294 enum siw_rx_state state
;
297 * For each FPDU, main RX loop runs through 3 stages:
298 * Receiving protocol headers, placing DDP payload and receiving
299 * trailer information (CRC + possibly padding).
300 * Next two variables keep state on receive status of the
301 * current FPDU part (hdr, data, trailer).
303 int fpdu_part_rcvd
; /* bytes in pkt part copied */
304 int fpdu_part_rem
; /* bytes in pkt part not seen */
307 * Next expected DDP MSN for each QN +
308 * expected steering tag +
309 * expected DDP tagget offset (all HBO)
311 u32 ddp_msn
[RDMAP_UNTAGGED_QN_COUNT
];
314 u32 inval_stag
; /* Stag to be invalidated */
316 struct shash_desc
*mpa_crc_hd
;
318 u8 pad
: 2; /* # of pad bytes expected */
319 u8 rdmap_op
: 4; /* opcode of current frame */
324 * Local destination memory of inbound RDMA operation.
325 * Valid, according to wqe->wr_status
327 struct siw_wqe wqe_active
;
329 unsigned int pbl_idx
; /* Index into current PBL */
330 unsigned int sge_idx
; /* current sge in rx */
331 unsigned int sge_off
; /* already rcvd in curr. sge */
333 char first_ddp_seg
; /* this is the first DDP seg */
334 char more_ddp_segs
; /* more DDP segs expected */
335 u8 prev_rdmap_op
: 4; /* opcode of prev frame */
339 * Shorthands for short packets w/o payload
340 * to be transmitted more efficient.
342 struct siw_send_pkt
{
343 struct iwarp_send send
;
347 struct siw_write_pkt
{
348 struct iwarp_rdma_write write
;
352 struct siw_rreq_pkt
{
353 struct iwarp_rdma_rreq rreq
;
357 struct siw_rresp_pkt
{
358 struct iwarp_rdma_rresp rresp
;
362 struct siw_iwarp_tx
{
366 /* Generic part of FPDU header */
367 struct iwarp_ctrl ctrl
;
368 struct iwarp_ctrl_untagged c_untagged
;
369 struct iwarp_ctrl_tagged c_tagged
;
372 struct iwarp_rdma_write rwrite
;
373 struct iwarp_rdma_rreq rreq
;
374 struct iwarp_rdma_rresp rresp
;
375 struct iwarp_terminate terminate
;
376 struct iwarp_send send
;
377 struct iwarp_send_inv send_inv
;
379 /* complete short FPDUs */
380 struct siw_send_pkt send_pkt
;
381 struct siw_write_pkt write_pkt
;
382 struct siw_rreq_pkt rreq_pkt
;
383 struct siw_rresp_pkt rresp_pkt
;
386 struct mpa_trailer trailer
;
387 /* DDP MSN for untagged messages */
388 u32 ddp_msn
[RDMAP_UNTAGGED_QN_COUNT
];
390 enum siw_tx_ctx state
;
391 u16 ctrl_len
; /* ddp+rdmap hdr */
394 int bytes_unsent
; /* ddp payload bytes */
396 struct shash_desc
*mpa_crc_hd
;
398 u8 do_crc
: 1; /* do crc for segment */
399 u8 use_sendpage
: 1; /* send w/o copy */
400 u8 tx_suspend
: 1; /* stop sending DDP segs. */
401 u8 pad
: 2; /* # pad in current fpdu */
402 u8 orq_fence
: 1; /* ORQ full or Send fenced */
403 u8 in_syscall
: 1; /* TX out of user context */
404 u8 zcopy_tx
: 1; /* Use TCP_SENDPAGE if possible */
405 u8 gso_seg_limit
; /* Maximum segments for GSO, 0 = unbound */
407 u16 fpdu_len
; /* len of FPDU to tx */
408 unsigned int tcp_seglen
; /* remaining tcp seg space */
410 struct siw_wqe wqe_active
;
412 int pbl_idx
; /* Index into current PBL */
413 int sge_idx
; /* current sge in tx */
414 u32 sge_off
; /* already sent in curr. sge */
418 struct ib_qp base_qp
;
419 struct siw_device
*sdev
;
421 struct list_head devq
;
423 struct siw_qp_attrs attrs
;
426 struct rw_semaphore state_lock
;
433 struct siw_iwarp_tx tx_ctx
; /* Transmit context */
435 struct siw_sqe
*sendq
; /* send queue element array */
436 uint32_t sq_get
; /* consumer index into sq array */
437 uint32_t sq_put
; /* kernel prod. index into sq array */
438 struct llist_node tx_list
;
440 struct siw_sqe
*orq
; /* outbound read queue element array */
442 uint32_t orq_get
; /* consumer index into orq array */
443 uint32_t orq_put
; /* shared producer index for ORQ */
445 struct siw_rx_stream rx_stream
;
446 struct siw_rx_fpdu
*rx_fpdu
;
447 struct siw_rx_fpdu rx_tagged
;
448 struct siw_rx_fpdu rx_untagged
;
450 struct siw_rqe
*recvq
; /* recv queue element array */
451 uint32_t rq_get
; /* consumer index into rq array */
452 uint32_t rq_put
; /* kernel prod. index into rq array */
454 struct siw_sqe
*irq
; /* inbound read queue element array */
455 uint32_t irq_get
; /* consumer index into irq array */
456 uint32_t irq_put
; /* producer index into irq array */
459 struct { /* information to be carried in TERMINATE pkt, if valid */
462 u8 layer
: 4, etype
: 4;
465 struct rdma_user_mmap_entry
*sq_entry
; /* mmap info for SQE array */
466 struct rdma_user_mmap_entry
*rq_entry
; /* mmap info for RQE array */
471 #define rx_qp(rx) container_of(rx, struct siw_qp, rx_stream)
472 #define tx_qp(tx) container_of(tx, struct siw_qp, tx_ctx)
473 #define tx_wqe(qp) (&(qp)->tx_ctx.wqe_active)
474 #define rx_wqe(rctx) (&(rctx)->wqe_active)
475 #define rx_mem(rctx) ((rctx)->wqe_active.mem[0])
476 #define tx_type(wqe) ((wqe)->sqe.opcode)
477 #define rx_type(wqe) ((wqe)->rqe.opcode)
478 #define tx_flags(wqe) ((wqe)->sqe.flags)
480 struct iwarp_msg_info
{
482 struct iwarp_ctrl ctrl
;
483 int (*rx_data
)(struct siw_qp
*qp
);
486 struct siw_user_mmap_entry
{
487 struct rdma_user_mmap_entry rdma_entry
;
491 /* Global siw parameters. Currently set in siw_main.c */
492 extern const bool zcopy_tx
;
493 extern const bool try_gso
;
494 extern const bool loopback_enabled
;
495 extern const bool mpa_crc_required
;
496 extern const bool mpa_crc_strict
;
497 extern const bool siw_tcp_nagle
;
498 extern u_char mpa_version
;
499 extern const bool peer_to_peer
;
500 extern struct task_struct
*siw_tx_thread
[];
502 extern struct crypto_shash
*siw_crypto_shash
;
503 extern struct iwarp_msg_info iwarp_pktinfo
[RDMAP_TERMINATE
+ 1];
505 /* QP general functions */
506 int siw_qp_modify(struct siw_qp
*qp
, struct siw_qp_attrs
*attr
,
507 enum siw_qp_attr_mask mask
);
508 int siw_qp_mpa_rts(struct siw_qp
*qp
, enum mpa_v2_ctrl ctrl
);
509 void siw_qp_llp_close(struct siw_qp
*qp
);
510 void siw_qp_cm_drop(struct siw_qp
*qp
, int schedule
);
511 void siw_send_terminate(struct siw_qp
*qp
);
513 void siw_qp_get_ref(struct ib_qp
*qp
);
514 void siw_qp_put_ref(struct ib_qp
*qp
);
515 int siw_qp_add(struct siw_device
*sdev
, struct siw_qp
*qp
);
516 void siw_free_qp(struct kref
*ref
);
518 void siw_init_terminate(struct siw_qp
*qp
, enum term_elayer layer
,
519 u8 etype
, u8 ecode
, int in_tx
);
520 enum ddp_ecode
siw_tagged_error(enum siw_access_state state
);
521 enum rdmap_ecode
siw_rdmap_error(enum siw_access_state state
);
523 void siw_read_to_orq(struct siw_sqe
*rreq
, struct siw_sqe
*sqe
);
524 int siw_sqe_complete(struct siw_qp
*qp
, struct siw_sqe
*sqe
, u32 bytes
,
525 enum siw_wc_status status
);
526 int siw_rqe_complete(struct siw_qp
*qp
, struct siw_rqe
*rqe
, u32 bytes
,
527 u32 inval_stag
, enum siw_wc_status status
);
528 void siw_qp_llp_data_ready(struct sock
*sk
);
529 void siw_qp_llp_write_space(struct sock
*sk
);
531 /* QP TX path functions */
532 int siw_run_sq(void *arg
);
533 int siw_qp_sq_process(struct siw_qp
*qp
);
534 int siw_sq_start(struct siw_qp
*qp
);
535 int siw_activate_tx(struct siw_qp
*qp
);
536 void siw_stop_tx_thread(int nr_cpu
);
537 int siw_get_tx_cpu(struct siw_device
*sdev
);
538 void siw_put_tx_cpu(int cpu
);
540 /* QP RX path functions */
541 int siw_proc_send(struct siw_qp
*qp
);
542 int siw_proc_rreq(struct siw_qp
*qp
);
543 int siw_proc_rresp(struct siw_qp
*qp
);
544 int siw_proc_write(struct siw_qp
*qp
);
545 int siw_proc_terminate(struct siw_qp
*qp
);
547 int siw_tcp_rx_data(read_descriptor_t
*rd_desc
, struct sk_buff
*skb
,
548 unsigned int off
, size_t len
);
550 static inline void set_rx_fpdu_context(struct siw_qp
*qp
, u8 opcode
)
552 if (opcode
== RDMAP_RDMA_WRITE
|| opcode
== RDMAP_RDMA_READ_RESP
)
553 qp
->rx_fpdu
= &qp
->rx_tagged
;
555 qp
->rx_fpdu
= &qp
->rx_untagged
;
557 qp
->rx_stream
.rdmap_op
= opcode
;
560 static inline struct siw_ucontext
*to_siw_ctx(struct ib_ucontext
*base_ctx
)
562 return container_of(base_ctx
, struct siw_ucontext
, base_ucontext
);
565 static inline struct siw_qp
*to_siw_qp(struct ib_qp
*base_qp
)
567 return container_of(base_qp
, struct siw_qp
, base_qp
);
570 static inline struct siw_cq
*to_siw_cq(struct ib_cq
*base_cq
)
572 return container_of(base_cq
, struct siw_cq
, base_cq
);
575 static inline struct siw_srq
*to_siw_srq(struct ib_srq
*base_srq
)
577 return container_of(base_srq
, struct siw_srq
, base_srq
);
580 static inline struct siw_device
*to_siw_dev(struct ib_device
*base_dev
)
582 return container_of(base_dev
, struct siw_device
, base_dev
);
585 static inline struct siw_mr
*to_siw_mr(struct ib_mr
*base_mr
)
587 return container_of(base_mr
, struct siw_mr
, base_mr
);
590 static inline struct siw_user_mmap_entry
*
591 to_siw_mmap_entry(struct rdma_user_mmap_entry
*rdma_mmap
)
593 return container_of(rdma_mmap
, struct siw_user_mmap_entry
, rdma_entry
);
596 static inline struct siw_qp
*siw_qp_id2obj(struct siw_device
*sdev
, int id
)
601 qp
= xa_load(&sdev
->qp_xa
, id
);
602 if (likely(qp
&& kref_get_unless_zero(&qp
->ref
))) {
610 static inline u32
qp_id(struct siw_qp
*qp
)
612 return qp
->base_qp
.qp_num
;
615 static inline void siw_qp_get(struct siw_qp
*qp
)
620 static inline void siw_qp_put(struct siw_qp
*qp
)
622 kref_put(&qp
->ref
, siw_free_qp
);
625 static inline int siw_sq_empty(struct siw_qp
*qp
)
627 struct siw_sqe
*sqe
= &qp
->sendq
[qp
->sq_get
% qp
->attrs
.sq_size
];
629 return READ_ONCE(sqe
->flags
) == 0;
632 static inline struct siw_sqe
*sq_get_next(struct siw_qp
*qp
)
634 struct siw_sqe
*sqe
= &qp
->sendq
[qp
->sq_get
% qp
->attrs
.sq_size
];
636 if (READ_ONCE(sqe
->flags
) & SIW_WQE_VALID
)
642 static inline struct siw_sqe
*orq_get_current(struct siw_qp
*qp
)
644 return &qp
->orq
[qp
->orq_get
% qp
->attrs
.orq_size
];
647 static inline struct siw_sqe
*orq_get_tail(struct siw_qp
*qp
)
649 return &qp
->orq
[qp
->orq_put
% qp
->attrs
.orq_size
];
652 static inline struct siw_sqe
*orq_get_free(struct siw_qp
*qp
)
654 struct siw_sqe
*orq_e
= orq_get_tail(qp
);
656 if (orq_e
&& READ_ONCE(orq_e
->flags
) == 0)
662 static inline int siw_orq_empty(struct siw_qp
*qp
)
664 return qp
->orq
[qp
->orq_get
% qp
->attrs
.orq_size
].flags
== 0 ? 1 : 0;
667 static inline struct siw_sqe
*irq_alloc_free(struct siw_qp
*qp
)
669 struct siw_sqe
*irq_e
= &qp
->irq
[qp
->irq_put
% qp
->attrs
.irq_size
];
671 if (READ_ONCE(irq_e
->flags
) == 0) {
678 static inline __wsum
siw_csum_update(const void *buff
, int len
, __wsum sum
)
680 return (__force __wsum
)crc32c((__force __u32
)sum
, buff
, len
);
683 static inline __wsum
siw_csum_combine(__wsum csum
, __wsum csum2
, int offset
,
686 return (__force __wsum
)__crc32c_le_combine((__force __u32
)csum
,
687 (__force __u32
)csum2
, len
);
690 static inline void siw_crc_skb(struct siw_rx_stream
*srx
, unsigned int len
)
692 const struct skb_checksum_ops siw_cs_ops
= {
693 .update
= siw_csum_update
,
694 .combine
= siw_csum_combine
,
696 __wsum crc
= *(u32
*)shash_desc_ctx(srx
->mpa_crc_hd
);
698 crc
= __skb_checksum(srx
->skb
, srx
->skb_offset
, len
, crc
,
700 *(u32
*)shash_desc_ctx(srx
->mpa_crc_hd
) = crc
;
703 #define siw_dbg(ibdev, fmt, ...) \
704 ibdev_dbg(ibdev, "%s: " fmt, __func__, ##__VA_ARGS__)
706 #define siw_dbg_qp(qp, fmt, ...) \
707 ibdev_dbg(&qp->sdev->base_dev, "QP[%u] %s: " fmt, qp_id(qp), __func__, \
710 #define siw_dbg_cq(cq, fmt, ...) \
711 ibdev_dbg(cq->base_cq.device, "CQ[%u] %s: " fmt, cq->id, __func__, \
714 #define siw_dbg_pd(pd, fmt, ...) \
715 ibdev_dbg(pd->device, "PD[%u] %s: " fmt, pd->res.id, __func__, \
718 #define siw_dbg_mem(mem, fmt, ...) \
719 ibdev_dbg(&mem->sdev->base_dev, \
720 "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
722 #define siw_dbg_cep(cep, fmt, ...) \
723 ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
724 cep, __func__, ##__VA_ARGS__)
726 void siw_cq_flush(struct siw_cq
*cq
);
727 void siw_sq_flush(struct siw_qp
*qp
);
728 void siw_rq_flush(struct siw_qp
*qp
);
729 int siw_reap_cqe(struct siw_cq
*cq
, struct ib_wc
*wc
);