1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
3 * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
12 MLX5_IB_SQ_UMR_INLINE_THRESHOLD
= 64,
15 struct mlx5_wqe_eth_pad
{
20 /* get_sq_edge - Get the next nearby edge.
22 * An 'edge' is defined as the first following address after the end
23 * of the fragment or the SQ. Accordingly, during the WQE construction
24 * which repetitively increases the pointer to write the next data, it
25 * simply should check if it gets to an edge.
28 * @idx - Stride index in the SQ buffer.
33 static inline void *get_sq_edge(struct mlx5_ib_wq
*sq
, u32 idx
)
37 fragment_end
= mlx5_frag_buf_get_wqe
39 mlx5_frag_buf_get_idx_last_contig_stride(&sq
->fbc
, idx
));
41 return fragment_end
+ MLX5_SEND_WQE_BB
;
44 /* handle_post_send_edge - Check if we get to SQ edge. If yes, update to the
45 * next nearby edge and get new address translation for current WQE position.
47 * @seg: Current WQE position (16B aligned).
48 * @wqe_sz: Total current WQE size [16B].
49 * @cur_edge: Updated current edge.
51 static inline void handle_post_send_edge(struct mlx5_ib_wq
*sq
, void **seg
,
52 u32 wqe_sz
, void **cur_edge
)
56 if (likely(*seg
!= *cur_edge
))
59 idx
= (sq
->cur_post
+ (wqe_sz
>> 2)) & (sq
->wqe_cnt
- 1);
60 *cur_edge
= get_sq_edge(sq
, idx
);
62 *seg
= mlx5_frag_buf_get_wqe(&sq
->fbc
, idx
);
65 /* mlx5r_memcpy_send_wqe - copy data from src to WQE and update the relevant
66 * WQ's pointers. At the end @seg is aligned to 16B regardless the copied size.
68 * @cur_edge: Updated current edge.
69 * @seg: Current WQE position (16B aligned).
70 * @wqe_sz: Total current WQE size [16B].
71 * @src: Pointer to copy from.
72 * @n: Number of bytes to copy.
74 static inline void mlx5r_memcpy_send_wqe(struct mlx5_ib_wq
*sq
, void **cur_edge
,
75 void **seg
, u32
*wqe_sz
,
76 const void *src
, size_t n
)
79 size_t leftlen
= *cur_edge
- *seg
;
80 size_t copysz
= min_t(size_t, leftlen
, n
);
83 memcpy(*seg
, src
, copysz
);
87 stride
= !n
? ALIGN(copysz
, 16) : copysz
;
89 *wqe_sz
+= stride
>> 4;
90 handle_post_send_edge(sq
, seg
, *wqe_sz
, cur_edge
);
94 int mlx5r_wq_overflow(struct mlx5_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
);
95 int mlx5r_begin_wqe(struct mlx5_ib_qp
*qp
, void **seg
,
96 struct mlx5_wqe_ctrl_seg
**ctrl
, unsigned int *idx
,
97 int *size
, void **cur_edge
, int nreq
, __be32 general_id
,
98 bool send_signaled
, bool solicited
);
99 void mlx5r_finish_wqe(struct mlx5_ib_qp
*qp
, struct mlx5_wqe_ctrl_seg
*ctrl
,
100 void *seg
, u8 size
, void *cur_edge
, unsigned int idx
,
101 u64 wr_id
, int nreq
, u8 fence
, u32 mlx5_opcode
);
102 void mlx5r_ring_db(struct mlx5_ib_qp
*qp
, unsigned int nreq
,
103 struct mlx5_wqe_ctrl_seg
*ctrl
);
104 int mlx5_ib_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
105 const struct ib_send_wr
**bad_wr
, bool drain
);
106 int mlx5_ib_post_recv(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
107 const struct ib_recv_wr
**bad_wr
, bool drain
);
109 static inline int mlx5_ib_post_send_nodrain(struct ib_qp
*ibqp
,
110 const struct ib_send_wr
*wr
,
111 const struct ib_send_wr
**bad_wr
)
113 return mlx5_ib_post_send(ibqp
, wr
, bad_wr
, false);
116 static inline int mlx5_ib_post_send_drain(struct ib_qp
*ibqp
,
117 const struct ib_send_wr
*wr
,
118 const struct ib_send_wr
**bad_wr
)
120 return mlx5_ib_post_send(ibqp
, wr
, bad_wr
, true);
123 static inline int mlx5_ib_post_recv_nodrain(struct ib_qp
*ibqp
,
124 const struct ib_recv_wr
*wr
,
125 const struct ib_recv_wr
**bad_wr
)
127 return mlx5_ib_post_recv(ibqp
, wr
, bad_wr
, false);
130 static inline int mlx5_ib_post_recv_drain(struct ib_qp
*ibqp
,
131 const struct ib_recv_wr
*wr
,
132 const struct ib_recv_wr
**bad_wr
)
134 return mlx5_ib_post_recv(ibqp
, wr
, bad_wr
, true);
136 #endif /* _MLX5_IB_WR_H */