1 /* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
3 * Copyright(c) 2018 Intel Corporation.
10 /* cut down ridiculously long IB macro names */
11 #define OP(x) IB_OPCODE_RC_##x
13 static inline void update_ack_queue(struct rvt_qp
*qp
, unsigned int n
)
18 if (next
> rvt_size_atomic(ib_to_rvt(qp
->ibqp
.device
)))
20 qp
->s_tail_ack_queue
= next
;
21 qp
->s_acked_ack_queue
= next
;
22 qp
->s_ack_state
= OP(ACKNOWLEDGE
);
25 static inline void rc_defered_ack(struct hfi1_ctxtdata
*rcd
,
28 if (list_empty(&qp
->rspwait
)) {
29 qp
->r_flags
|= RVT_R_RSP_NAK
;
31 list_add_tail(&qp
->rspwait
, &rcd
->qp_wait_list
);
35 static inline u32
restart_sge(struct rvt_sge_state
*ss
, struct rvt_swqe
*wqe
,
40 len
= delta_psn(psn
, wqe
->psn
) * pmtu
;
41 return rvt_restart_sge(ss
, wqe
, len
);
44 static inline void release_rdma_sge_mr(struct rvt_ack_entry
*e
)
47 rvt_put_mr(e
->rdma_sge
.mr
);
48 e
->rdma_sge
.mr
= NULL
;
52 struct rvt_ack_entry
*find_prev_entry(struct rvt_qp
*qp
, u32 psn
, u8
*prev
,
53 u8
*prev_ack
, bool *scheduled
);
54 int do_rc_ack(struct rvt_qp
*qp
, u32 aeth
, u32 psn
, int opcode
, u64 val
,
55 struct hfi1_ctxtdata
*rcd
);
56 struct rvt_swqe
*do_rc_completion(struct rvt_qp
*qp
, struct rvt_swqe
*wqe
,
57 struct hfi1_ibport
*ibp
);
59 #endif /* HFI1_RC_H */