monitor/qmp: Update comment for commit 4eaca8de268
[qemu/armbru.git] / include / standard-headers / rdma / vmw_pvrdma-abi.h
blob336a8d596f2425479fd799d9d943f2379a7a5b50
1 /* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
2 /*
3 * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of EITHER the GNU General Public License
7 * version 2 as published by the Free Software Foundation or the BSD
8 * 2-Clause License. This program is distributed in the hope that it
9 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
10 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
11 * See the GNU General Public License version 2 for more details at
12 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
14 * You should have received a copy of the GNU General Public License
15 * along with this program available in the file COPYING in the main
16 * directory of this source tree.
18 * The BSD 2-Clause License
20 * Redistribution and use in source and binary forms, with or
21 * without modification, are permitted provided that the following
22 * conditions are met:
24 * - Redistributions of source code must retain the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer.
28 * - Redistributions in binary form must reproduce the above
29 * copyright notice, this list of conditions and the following
30 * disclaimer in the documentation and/or other materials
31 * provided with the distribution.
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
36 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
37 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
38 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
39 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
40 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
42 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
43 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
44 * OF THE POSSIBILITY OF SUCH DAMAGE.
47 #ifndef __VMW_PVRDMA_ABI_H__
48 #define __VMW_PVRDMA_ABI_H__
50 #include "standard-headers/linux/types.h"
52 #define PVRDMA_UVERBS_ABI_VERSION 3 /* ABI Version. */
53 #define PVRDMA_UAR_HANDLE_MASK 0x00FFFFFF /* Bottom 24 bits. */
54 #define PVRDMA_UAR_QP_OFFSET 0 /* QP doorbell. */
55 #define PVRDMA_UAR_QP_SEND (1 << 30) /* Send bit. */
56 #define PVRDMA_UAR_QP_RECV (1 << 31) /* Recv bit. */
57 #define PVRDMA_UAR_CQ_OFFSET 4 /* CQ doorbell. */
58 #define PVRDMA_UAR_CQ_ARM_SOL (1 << 29) /* Arm solicited bit. */
59 #define PVRDMA_UAR_CQ_ARM (1 << 30) /* Arm bit. */
60 #define PVRDMA_UAR_CQ_POLL (1 << 31) /* Poll bit. */
61 #define PVRDMA_UAR_SRQ_OFFSET 8 /* SRQ doorbell. */
62 #define PVRDMA_UAR_SRQ_RECV (1 << 30) /* Recv bit. */
64 enum pvrdma_wr_opcode {
65 PVRDMA_WR_RDMA_WRITE,
66 PVRDMA_WR_RDMA_WRITE_WITH_IMM,
67 PVRDMA_WR_SEND,
68 PVRDMA_WR_SEND_WITH_IMM,
69 PVRDMA_WR_RDMA_READ,
70 PVRDMA_WR_ATOMIC_CMP_AND_SWP,
71 PVRDMA_WR_ATOMIC_FETCH_AND_ADD,
72 PVRDMA_WR_LSO,
73 PVRDMA_WR_SEND_WITH_INV,
74 PVRDMA_WR_RDMA_READ_WITH_INV,
75 PVRDMA_WR_LOCAL_INV,
76 PVRDMA_WR_FAST_REG_MR,
77 PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP,
78 PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD,
79 PVRDMA_WR_BIND_MW,
80 PVRDMA_WR_REG_SIG_MR,
81 PVRDMA_WR_ERROR,
84 enum pvrdma_wc_status {
85 PVRDMA_WC_SUCCESS,
86 PVRDMA_WC_LOC_LEN_ERR,
87 PVRDMA_WC_LOC_QP_OP_ERR,
88 PVRDMA_WC_LOC_EEC_OP_ERR,
89 PVRDMA_WC_LOC_PROT_ERR,
90 PVRDMA_WC_WR_FLUSH_ERR,
91 PVRDMA_WC_MW_BIND_ERR,
92 PVRDMA_WC_BAD_RESP_ERR,
93 PVRDMA_WC_LOC_ACCESS_ERR,
94 PVRDMA_WC_REM_INV_REQ_ERR,
95 PVRDMA_WC_REM_ACCESS_ERR,
96 PVRDMA_WC_REM_OP_ERR,
97 PVRDMA_WC_RETRY_EXC_ERR,
98 PVRDMA_WC_RNR_RETRY_EXC_ERR,
99 PVRDMA_WC_LOC_RDD_VIOL_ERR,
100 PVRDMA_WC_REM_INV_RD_REQ_ERR,
101 PVRDMA_WC_REM_ABORT_ERR,
102 PVRDMA_WC_INV_EECN_ERR,
103 PVRDMA_WC_INV_EEC_STATE_ERR,
104 PVRDMA_WC_FATAL_ERR,
105 PVRDMA_WC_RESP_TIMEOUT_ERR,
106 PVRDMA_WC_GENERAL_ERR,
109 enum pvrdma_wc_opcode {
110 PVRDMA_WC_SEND,
111 PVRDMA_WC_RDMA_WRITE,
112 PVRDMA_WC_RDMA_READ,
113 PVRDMA_WC_COMP_SWAP,
114 PVRDMA_WC_FETCH_ADD,
115 PVRDMA_WC_BIND_MW,
116 PVRDMA_WC_LSO,
117 PVRDMA_WC_LOCAL_INV,
118 PVRDMA_WC_FAST_REG_MR,
119 PVRDMA_WC_MASKED_COMP_SWAP,
120 PVRDMA_WC_MASKED_FETCH_ADD,
121 PVRDMA_WC_RECV = 1 << 7,
122 PVRDMA_WC_RECV_RDMA_WITH_IMM,
125 enum pvrdma_wc_flags {
126 PVRDMA_WC_GRH = 1 << 0,
127 PVRDMA_WC_WITH_IMM = 1 << 1,
128 PVRDMA_WC_WITH_INVALIDATE = 1 << 2,
129 PVRDMA_WC_IP_CSUM_OK = 1 << 3,
130 PVRDMA_WC_WITH_SMAC = 1 << 4,
131 PVRDMA_WC_WITH_VLAN = 1 << 5,
132 PVRDMA_WC_WITH_NETWORK_HDR_TYPE = 1 << 6,
133 PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_NETWORK_HDR_TYPE,
136 struct pvrdma_alloc_ucontext_resp {
137 uint32_t qp_tab_size;
138 uint32_t reserved;
141 struct pvrdma_alloc_pd_resp {
142 uint32_t pdn;
143 uint32_t reserved;
146 struct pvrdma_create_cq {
147 uint64_t __attribute__((aligned(8))) buf_addr;
148 uint32_t buf_size;
149 uint32_t reserved;
152 struct pvrdma_create_cq_resp {
153 uint32_t cqn;
154 uint32_t reserved;
157 struct pvrdma_resize_cq {
158 uint64_t __attribute__((aligned(8))) buf_addr;
159 uint32_t buf_size;
160 uint32_t reserved;
163 struct pvrdma_create_srq {
164 uint64_t __attribute__((aligned(8))) buf_addr;
165 uint32_t buf_size;
166 uint32_t reserved;
169 struct pvrdma_create_srq_resp {
170 uint32_t srqn;
171 uint32_t reserved;
174 struct pvrdma_create_qp {
175 uint64_t __attribute__((aligned(8))) rbuf_addr;
176 uint64_t __attribute__((aligned(8))) sbuf_addr;
177 uint32_t rbuf_size;
178 uint32_t sbuf_size;
179 uint64_t __attribute__((aligned(8))) qp_addr;
182 /* PVRDMA masked atomic compare and swap */
183 struct pvrdma_ex_cmp_swap {
184 uint64_t __attribute__((aligned(8))) swap_val;
185 uint64_t __attribute__((aligned(8))) compare_val;
186 uint64_t __attribute__((aligned(8))) swap_mask;
187 uint64_t __attribute__((aligned(8))) compare_mask;
190 /* PVRDMA masked atomic fetch and add */
191 struct pvrdma_ex_fetch_add {
192 uint64_t __attribute__((aligned(8))) add_val;
193 uint64_t __attribute__((aligned(8))) field_boundary;
196 /* PVRDMA address vector. */
197 struct pvrdma_av {
198 uint32_t port_pd;
199 uint32_t sl_tclass_flowlabel;
200 uint8_t dgid[16];
201 uint8_t src_path_bits;
202 uint8_t gid_index;
203 uint8_t stat_rate;
204 uint8_t hop_limit;
205 uint8_t dmac[6];
206 uint8_t reserved[6];
209 /* PVRDMA scatter/gather entry */
210 struct pvrdma_sge {
211 uint64_t __attribute__((aligned(8))) addr;
212 uint32_t length;
213 uint32_t lkey;
216 /* PVRDMA receive queue work request */
217 struct pvrdma_rq_wqe_hdr {
218 uint64_t __attribute__((aligned(8))) wr_id; /* wr id */
219 uint32_t num_sge; /* size of s/g array */
220 uint32_t total_len; /* reserved */
222 /* Use pvrdma_sge (ib_sge) for receive queue s/g array elements. */
224 /* PVRDMA send queue work request */
225 struct pvrdma_sq_wqe_hdr {
226 uint64_t __attribute__((aligned(8))) wr_id; /* wr id */
227 uint32_t num_sge; /* size of s/g array */
228 uint32_t total_len; /* reserved */
229 uint32_t opcode; /* operation type */
230 uint32_t send_flags; /* wr flags */
231 union {
232 uint32_t imm_data;
233 uint32_t invalidate_rkey;
234 } ex;
235 uint32_t reserved;
236 union {
237 struct {
238 uint64_t __attribute__((aligned(8))) remote_addr;
239 uint32_t rkey;
240 uint8_t reserved[4];
241 } rdma;
242 struct {
243 uint64_t __attribute__((aligned(8))) remote_addr;
244 uint64_t __attribute__((aligned(8))) compare_add;
245 uint64_t __attribute__((aligned(8))) swap;
246 uint32_t rkey;
247 uint32_t reserved;
248 } atomic;
249 struct {
250 uint64_t __attribute__((aligned(8))) remote_addr;
251 uint32_t log_arg_sz;
252 uint32_t rkey;
253 union {
254 struct pvrdma_ex_cmp_swap cmp_swap;
255 struct pvrdma_ex_fetch_add fetch_add;
256 } wr_data;
257 } masked_atomics;
258 struct {
259 uint64_t __attribute__((aligned(8))) iova_start;
260 uint64_t __attribute__((aligned(8))) pl_pdir_dma;
261 uint32_t page_shift;
262 uint32_t page_list_len;
263 uint32_t length;
264 uint32_t access_flags;
265 uint32_t rkey;
266 uint32_t reserved;
267 } fast_reg;
268 struct {
269 uint32_t remote_qpn;
270 uint32_t remote_qkey;
271 struct pvrdma_av av;
272 } ud;
273 } wr;
275 /* Use pvrdma_sge (ib_sge) for send queue s/g array elements. */
277 /* Completion queue element. */
278 struct pvrdma_cqe {
279 uint64_t __attribute__((aligned(8))) wr_id;
280 uint64_t __attribute__((aligned(8))) qp;
281 uint32_t opcode;
282 uint32_t status;
283 uint32_t byte_len;
284 uint32_t imm_data;
285 uint32_t src_qp;
286 uint32_t wc_flags;
287 uint32_t vendor_err;
288 uint16_t pkey_index;
289 uint16_t slid;
290 uint8_t sl;
291 uint8_t dlid_path_bits;
292 uint8_t port_num;
293 uint8_t smac[6];
294 uint8_t network_hdr_type;
295 uint8_t reserved2[6]; /* Pad to next power of 2 (64). */
298 #endif /* __VMW_PVRDMA_ABI_H__ */