Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / infiniband / hw / vmw_pvrdma / pvrdma_dev_api.h
blob6fd5a8f4e2f675e7dadc6152a9ae6545c1cd8022
1 /*
2 * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of EITHER the GNU General Public License
6 * version 2 as published by the Free Software Foundation or the BSD
7 * 2-Clause License. This program is distributed in the hope that it
8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10 * See the GNU General Public License version 2 for more details at
11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
13 * You should have received a copy of the GNU General Public License
14 * along with this program available in the file COPYING in the main
15 * directory of this source tree.
17 * The BSD 2-Clause License
19 * Redistribution and use in source and binary forms, with or
20 * without modification, are permitted provided that the following
21 * conditions are met:
23 * - Redistributions of source code must retain the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer.
27 * - Redistributions in binary form must reproduce the above
28 * copyright notice, this list of conditions and the following
29 * disclaimer in the documentation and/or other materials
30 * provided with the distribution.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
46 #ifndef __PVRDMA_DEV_API_H__
47 #define __PVRDMA_DEV_API_H__
49 #include <linux/types.h>
51 #include "pvrdma_verbs.h"
54 * PVRDMA version macros. Some new features require updates to PVRDMA_VERSION.
55 * These macros allow us to check for different features if necessary.
58 #define PVRDMA_ROCEV1_VERSION 17
59 #define PVRDMA_ROCEV2_VERSION 18
60 #define PVRDMA_VERSION PVRDMA_ROCEV2_VERSION
62 #define PVRDMA_BOARD_ID 1
63 #define PVRDMA_REV_ID 1
66 * Masks and accessors for page directory, which is a two-level lookup:
67 * page directory -> page table -> page. Only one directory for now, but we
68 * could expand that easily. 9 bits for tables, 9 bits for pages, gives one
69 * gigabyte for memory regions and so forth.
72 #define PVRDMA_PDIR_SHIFT 18
73 #define PVRDMA_PTABLE_SHIFT 9
74 #define PVRDMA_PAGE_DIR_DIR(x) (((x) >> PVRDMA_PDIR_SHIFT) & 0x1)
75 #define PVRDMA_PAGE_DIR_TABLE(x) (((x) >> PVRDMA_PTABLE_SHIFT) & 0x1ff)
76 #define PVRDMA_PAGE_DIR_PAGE(x) ((x) & 0x1ff)
77 #define PVRDMA_PAGE_DIR_MAX_PAGES (1 * 512 * 512)
78 #define PVRDMA_MAX_FAST_REG_PAGES 128
81 * Max MSI-X vectors.
84 #define PVRDMA_MAX_INTERRUPTS 3
86 /* Register offsets within PCI resource on BAR1. */
87 #define PVRDMA_REG_VERSION 0x00 /* R: Version of device. */
88 #define PVRDMA_REG_DSRLOW 0x04 /* W: Device shared region low PA. */
89 #define PVRDMA_REG_DSRHIGH 0x08 /* W: Device shared region high PA. */
90 #define PVRDMA_REG_CTL 0x0c /* W: PVRDMA_DEVICE_CTL */
91 #define PVRDMA_REG_REQUEST 0x10 /* W: Indicate device request. */
92 #define PVRDMA_REG_ERR 0x14 /* R: Device error. */
93 #define PVRDMA_REG_ICR 0x18 /* R: Interrupt cause. */
94 #define PVRDMA_REG_IMR 0x1c /* R/W: Interrupt mask. */
95 #define PVRDMA_REG_MACL 0x20 /* R/W: MAC address low. */
96 #define PVRDMA_REG_MACH 0x24 /* R/W: MAC address high. */
98 /* Object flags. */
99 #define PVRDMA_CQ_FLAG_ARMED_SOL BIT(0) /* Armed for solicited-only. */
100 #define PVRDMA_CQ_FLAG_ARMED BIT(1) /* Armed. */
101 #define PVRDMA_MR_FLAG_DMA BIT(0) /* DMA region. */
102 #define PVRDMA_MR_FLAG_FRMR BIT(1) /* Fast reg memory region. */
105 * Atomic operation capability (masked versions are extended atomic
106 * operations.
109 #define PVRDMA_ATOMIC_OP_COMP_SWAP BIT(0) /* Compare and swap. */
110 #define PVRDMA_ATOMIC_OP_FETCH_ADD BIT(1) /* Fetch and add. */
111 #define PVRDMA_ATOMIC_OP_MASK_COMP_SWAP BIT(2) /* Masked compare and swap. */
112 #define PVRDMA_ATOMIC_OP_MASK_FETCH_ADD BIT(3) /* Masked fetch and add. */
115 * Base Memory Management Extension flags to support Fast Reg Memory Regions
116 * and Fast Reg Work Requests. Each flag represents a verb operation and we
117 * must support all of them to qualify for the BMME device cap.
120 #define PVRDMA_BMME_FLAG_LOCAL_INV BIT(0) /* Local Invalidate. */
121 #define PVRDMA_BMME_FLAG_REMOTE_INV BIT(1) /* Remote Invalidate. */
122 #define PVRDMA_BMME_FLAG_FAST_REG_WR BIT(2) /* Fast Reg Work Request. */
125 * GID types. The interpretation of the gid_types bit field in the device
126 * capabilities will depend on the device mode. For now, the device only
127 * supports RoCE as mode, so only the different GID types for RoCE are
128 * defined.
131 #define PVRDMA_GID_TYPE_FLAG_ROCE_V1 BIT(0)
132 #define PVRDMA_GID_TYPE_FLAG_ROCE_V2 BIT(1)
135 * Version checks. This checks whether each version supports specific
136 * capabilities from the device.
139 #define PVRDMA_IS_VERSION17(_dev) \
140 (_dev->dsr_version == PVRDMA_ROCEV1_VERSION && \
141 _dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1)
143 #define PVRDMA_IS_VERSION18(_dev) \
144 (_dev->dsr_version >= PVRDMA_ROCEV2_VERSION && \
145 (_dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1 || \
146 _dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V2)) \
148 #define PVRDMA_SUPPORTED(_dev) \
149 ((_dev->dsr->caps.mode == PVRDMA_DEVICE_MODE_ROCE) && \
150 (PVRDMA_IS_VERSION17(_dev) || PVRDMA_IS_VERSION18(_dev)))
153 * Get capability values based on device version.
156 #define PVRDMA_GET_CAP(_dev, _old_val, _val) \
157 ((PVRDMA_IS_VERSION18(_dev)) ? _val : _old_val)
159 enum pvrdma_pci_resource {
160 PVRDMA_PCI_RESOURCE_MSIX, /* BAR0: MSI-X, MMIO. */
161 PVRDMA_PCI_RESOURCE_REG, /* BAR1: Registers, MMIO. */
162 PVRDMA_PCI_RESOURCE_UAR, /* BAR2: UAR pages, MMIO, 64-bit. */
163 PVRDMA_PCI_RESOURCE_LAST, /* Last. */
166 enum pvrdma_device_ctl {
167 PVRDMA_DEVICE_CTL_ACTIVATE, /* Activate device. */
168 PVRDMA_DEVICE_CTL_UNQUIESCE, /* Unquiesce device. */
169 PVRDMA_DEVICE_CTL_RESET, /* Reset device. */
172 enum pvrdma_intr_vector {
173 PVRDMA_INTR_VECTOR_RESPONSE, /* Command response. */
174 PVRDMA_INTR_VECTOR_ASYNC, /* Async events. */
175 PVRDMA_INTR_VECTOR_CQ, /* CQ notification. */
176 /* Additional CQ notification vectors. */
179 enum pvrdma_intr_cause {
180 PVRDMA_INTR_CAUSE_RESPONSE = (1 << PVRDMA_INTR_VECTOR_RESPONSE),
181 PVRDMA_INTR_CAUSE_ASYNC = (1 << PVRDMA_INTR_VECTOR_ASYNC),
182 PVRDMA_INTR_CAUSE_CQ = (1 << PVRDMA_INTR_VECTOR_CQ),
185 enum pvrdma_gos_bits {
186 PVRDMA_GOS_BITS_UNK, /* Unknown. */
187 PVRDMA_GOS_BITS_32, /* 32-bit. */
188 PVRDMA_GOS_BITS_64, /* 64-bit. */
191 enum pvrdma_gos_type {
192 PVRDMA_GOS_TYPE_UNK, /* Unknown. */
193 PVRDMA_GOS_TYPE_LINUX, /* Linux. */
196 enum pvrdma_device_mode {
197 PVRDMA_DEVICE_MODE_ROCE, /* RoCE. */
198 PVRDMA_DEVICE_MODE_IWARP, /* iWarp. */
199 PVRDMA_DEVICE_MODE_IB, /* InfiniBand. */
202 struct pvrdma_gos_info {
203 u32 gos_bits:2; /* W: PVRDMA_GOS_BITS_ */
204 u32 gos_type:4; /* W: PVRDMA_GOS_TYPE_ */
205 u32 gos_ver:16; /* W: Guest OS version. */
206 u32 gos_misc:10; /* W: Other. */
207 u32 pad; /* Pad to 8-byte alignment. */
210 struct pvrdma_device_caps {
211 u64 fw_ver; /* R: Query device. */
212 __be64 node_guid;
213 __be64 sys_image_guid;
214 u64 max_mr_size;
215 u64 page_size_cap;
216 u64 atomic_arg_sizes; /* EX verbs. */
217 u32 ex_comp_mask; /* EX verbs. */
218 u32 device_cap_flags2; /* EX verbs. */
219 u32 max_fa_bit_boundary; /* EX verbs. */
220 u32 log_max_atomic_inline_arg; /* EX verbs. */
221 u32 vendor_id;
222 u32 vendor_part_id;
223 u32 hw_ver;
224 u32 max_qp;
225 u32 max_qp_wr;
226 u32 device_cap_flags;
227 u32 max_sge;
228 u32 max_sge_rd;
229 u32 max_cq;
230 u32 max_cqe;
231 u32 max_mr;
232 u32 max_pd;
233 u32 max_qp_rd_atom;
234 u32 max_ee_rd_atom;
235 u32 max_res_rd_atom;
236 u32 max_qp_init_rd_atom;
237 u32 max_ee_init_rd_atom;
238 u32 max_ee;
239 u32 max_rdd;
240 u32 max_mw;
241 u32 max_raw_ipv6_qp;
242 u32 max_raw_ethy_qp;
243 u32 max_mcast_grp;
244 u32 max_mcast_qp_attach;
245 u32 max_total_mcast_qp_attach;
246 u32 max_ah;
247 u32 max_fmr;
248 u32 max_map_per_fmr;
249 u32 max_srq;
250 u32 max_srq_wr;
251 u32 max_srq_sge;
252 u32 max_uar;
253 u32 gid_tbl_len;
254 u16 max_pkeys;
255 u8 local_ca_ack_delay;
256 u8 phys_port_cnt;
257 u8 mode; /* PVRDMA_DEVICE_MODE_ */
258 u8 atomic_ops; /* PVRDMA_ATOMIC_OP_* bits */
259 u8 bmme_flags; /* FRWR Mem Mgmt Extensions */
260 u8 gid_types; /* PVRDMA_GID_TYPE_FLAG_ */
261 u32 max_fast_reg_page_list_len;
264 struct pvrdma_ring_page_info {
265 u32 num_pages; /* Num pages incl. header. */
266 u32 reserved; /* Reserved. */
267 u64 pdir_dma; /* Page directory PA. */
270 #pragma pack(push, 1)
272 struct pvrdma_device_shared_region {
273 u32 driver_version; /* W: Driver version. */
274 u32 pad; /* Pad to 8-byte align. */
275 struct pvrdma_gos_info gos_info; /* W: Guest OS information. */
276 u64 cmd_slot_dma; /* W: Command slot address. */
277 u64 resp_slot_dma; /* W: Response slot address. */
278 struct pvrdma_ring_page_info async_ring_pages;
279 /* W: Async ring page info. */
280 struct pvrdma_ring_page_info cq_ring_pages;
281 /* W: CQ ring page info. */
282 u32 uar_pfn; /* W: UAR pageframe. */
283 u32 pad2; /* Pad to 8-byte align. */
284 struct pvrdma_device_caps caps; /* R: Device capabilities. */
287 #pragma pack(pop)
289 /* Event types. Currently a 1:1 mapping with enum ib_event. */
290 enum pvrdma_eqe_type {
291 PVRDMA_EVENT_CQ_ERR,
292 PVRDMA_EVENT_QP_FATAL,
293 PVRDMA_EVENT_QP_REQ_ERR,
294 PVRDMA_EVENT_QP_ACCESS_ERR,
295 PVRDMA_EVENT_COMM_EST,
296 PVRDMA_EVENT_SQ_DRAINED,
297 PVRDMA_EVENT_PATH_MIG,
298 PVRDMA_EVENT_PATH_MIG_ERR,
299 PVRDMA_EVENT_DEVICE_FATAL,
300 PVRDMA_EVENT_PORT_ACTIVE,
301 PVRDMA_EVENT_PORT_ERR,
302 PVRDMA_EVENT_LID_CHANGE,
303 PVRDMA_EVENT_PKEY_CHANGE,
304 PVRDMA_EVENT_SM_CHANGE,
305 PVRDMA_EVENT_SRQ_ERR,
306 PVRDMA_EVENT_SRQ_LIMIT_REACHED,
307 PVRDMA_EVENT_QP_LAST_WQE_REACHED,
308 PVRDMA_EVENT_CLIENT_REREGISTER,
309 PVRDMA_EVENT_GID_CHANGE,
312 /* Event queue element. */
313 struct pvrdma_eqe {
314 u32 type; /* Event type. */
315 u32 info; /* Handle, other. */
318 /* CQ notification queue element. */
319 struct pvrdma_cqne {
320 u32 info; /* Handle */
323 enum {
324 PVRDMA_CMD_FIRST,
325 PVRDMA_CMD_QUERY_PORT = PVRDMA_CMD_FIRST,
326 PVRDMA_CMD_QUERY_PKEY,
327 PVRDMA_CMD_CREATE_PD,
328 PVRDMA_CMD_DESTROY_PD,
329 PVRDMA_CMD_CREATE_MR,
330 PVRDMA_CMD_DESTROY_MR,
331 PVRDMA_CMD_CREATE_CQ,
332 PVRDMA_CMD_RESIZE_CQ,
333 PVRDMA_CMD_DESTROY_CQ,
334 PVRDMA_CMD_CREATE_QP,
335 PVRDMA_CMD_MODIFY_QP,
336 PVRDMA_CMD_QUERY_QP,
337 PVRDMA_CMD_DESTROY_QP,
338 PVRDMA_CMD_CREATE_UC,
339 PVRDMA_CMD_DESTROY_UC,
340 PVRDMA_CMD_CREATE_BIND,
341 PVRDMA_CMD_DESTROY_BIND,
342 PVRDMA_CMD_CREATE_SRQ,
343 PVRDMA_CMD_MODIFY_SRQ,
344 PVRDMA_CMD_QUERY_SRQ,
345 PVRDMA_CMD_DESTROY_SRQ,
346 PVRDMA_CMD_MAX,
349 enum {
350 PVRDMA_CMD_FIRST_RESP = (1 << 31),
351 PVRDMA_CMD_QUERY_PORT_RESP = PVRDMA_CMD_FIRST_RESP,
352 PVRDMA_CMD_QUERY_PKEY_RESP,
353 PVRDMA_CMD_CREATE_PD_RESP,
354 PVRDMA_CMD_DESTROY_PD_RESP_NOOP,
355 PVRDMA_CMD_CREATE_MR_RESP,
356 PVRDMA_CMD_DESTROY_MR_RESP_NOOP,
357 PVRDMA_CMD_CREATE_CQ_RESP,
358 PVRDMA_CMD_RESIZE_CQ_RESP,
359 PVRDMA_CMD_DESTROY_CQ_RESP_NOOP,
360 PVRDMA_CMD_CREATE_QP_RESP,
361 PVRDMA_CMD_MODIFY_QP_RESP,
362 PVRDMA_CMD_QUERY_QP_RESP,
363 PVRDMA_CMD_DESTROY_QP_RESP,
364 PVRDMA_CMD_CREATE_UC_RESP,
365 PVRDMA_CMD_DESTROY_UC_RESP_NOOP,
366 PVRDMA_CMD_CREATE_BIND_RESP_NOOP,
367 PVRDMA_CMD_DESTROY_BIND_RESP_NOOP,
368 PVRDMA_CMD_CREATE_SRQ_RESP,
369 PVRDMA_CMD_MODIFY_SRQ_RESP,
370 PVRDMA_CMD_QUERY_SRQ_RESP,
371 PVRDMA_CMD_DESTROY_SRQ_RESP,
372 PVRDMA_CMD_MAX_RESP,
375 struct pvrdma_cmd_hdr {
376 u64 response; /* Key for response lookup. */
377 u32 cmd; /* PVRDMA_CMD_ */
378 u32 reserved; /* Reserved. */
381 struct pvrdma_cmd_resp_hdr {
382 u64 response; /* From cmd hdr. */
383 u32 ack; /* PVRDMA_CMD_XXX_RESP */
384 u8 err; /* Error. */
385 u8 reserved[3]; /* Reserved. */
388 struct pvrdma_cmd_query_port {
389 struct pvrdma_cmd_hdr hdr;
390 u8 port_num;
391 u8 reserved[7];
394 struct pvrdma_cmd_query_port_resp {
395 struct pvrdma_cmd_resp_hdr hdr;
396 struct pvrdma_port_attr attrs;
399 struct pvrdma_cmd_query_pkey {
400 struct pvrdma_cmd_hdr hdr;
401 u8 port_num;
402 u8 index;
403 u8 reserved[6];
406 struct pvrdma_cmd_query_pkey_resp {
407 struct pvrdma_cmd_resp_hdr hdr;
408 u16 pkey;
409 u8 reserved[6];
412 struct pvrdma_cmd_create_uc {
413 struct pvrdma_cmd_hdr hdr;
414 u32 pfn; /* UAR page frame number */
415 u8 reserved[4];
418 struct pvrdma_cmd_create_uc_resp {
419 struct pvrdma_cmd_resp_hdr hdr;
420 u32 ctx_handle;
421 u8 reserved[4];
424 struct pvrdma_cmd_destroy_uc {
425 struct pvrdma_cmd_hdr hdr;
426 u32 ctx_handle;
427 u8 reserved[4];
430 struct pvrdma_cmd_create_pd {
431 struct pvrdma_cmd_hdr hdr;
432 u32 ctx_handle;
433 u8 reserved[4];
436 struct pvrdma_cmd_create_pd_resp {
437 struct pvrdma_cmd_resp_hdr hdr;
438 u32 pd_handle;
439 u8 reserved[4];
442 struct pvrdma_cmd_destroy_pd {
443 struct pvrdma_cmd_hdr hdr;
444 u32 pd_handle;
445 u8 reserved[4];
448 struct pvrdma_cmd_create_mr {
449 struct pvrdma_cmd_hdr hdr;
450 u64 start;
451 u64 length;
452 u64 pdir_dma;
453 u32 pd_handle;
454 u32 access_flags;
455 u32 flags;
456 u32 nchunks;
459 struct pvrdma_cmd_create_mr_resp {
460 struct pvrdma_cmd_resp_hdr hdr;
461 u32 mr_handle;
462 u32 lkey;
463 u32 rkey;
464 u8 reserved[4];
467 struct pvrdma_cmd_destroy_mr {
468 struct pvrdma_cmd_hdr hdr;
469 u32 mr_handle;
470 u8 reserved[4];
473 struct pvrdma_cmd_create_cq {
474 struct pvrdma_cmd_hdr hdr;
475 u64 pdir_dma;
476 u32 ctx_handle;
477 u32 cqe;
478 u32 nchunks;
479 u8 reserved[4];
482 struct pvrdma_cmd_create_cq_resp {
483 struct pvrdma_cmd_resp_hdr hdr;
484 u32 cq_handle;
485 u32 cqe;
488 struct pvrdma_cmd_resize_cq {
489 struct pvrdma_cmd_hdr hdr;
490 u32 cq_handle;
491 u32 cqe;
494 struct pvrdma_cmd_resize_cq_resp {
495 struct pvrdma_cmd_resp_hdr hdr;
496 u32 cqe;
497 u8 reserved[4];
500 struct pvrdma_cmd_destroy_cq {
501 struct pvrdma_cmd_hdr hdr;
502 u32 cq_handle;
503 u8 reserved[4];
506 struct pvrdma_cmd_create_srq {
507 struct pvrdma_cmd_hdr hdr;
508 u64 pdir_dma;
509 u32 pd_handle;
510 u32 nchunks;
511 struct pvrdma_srq_attr attrs;
512 u8 srq_type;
513 u8 reserved[7];
516 struct pvrdma_cmd_create_srq_resp {
517 struct pvrdma_cmd_resp_hdr hdr;
518 u32 srqn;
519 u8 reserved[4];
522 struct pvrdma_cmd_modify_srq {
523 struct pvrdma_cmd_hdr hdr;
524 u32 srq_handle;
525 u32 attr_mask;
526 struct pvrdma_srq_attr attrs;
529 struct pvrdma_cmd_query_srq {
530 struct pvrdma_cmd_hdr hdr;
531 u32 srq_handle;
532 u8 reserved[4];
535 struct pvrdma_cmd_query_srq_resp {
536 struct pvrdma_cmd_resp_hdr hdr;
537 struct pvrdma_srq_attr attrs;
540 struct pvrdma_cmd_destroy_srq {
541 struct pvrdma_cmd_hdr hdr;
542 u32 srq_handle;
543 u8 reserved[4];
546 struct pvrdma_cmd_create_qp {
547 struct pvrdma_cmd_hdr hdr;
548 u64 pdir_dma;
549 u32 pd_handle;
550 u32 send_cq_handle;
551 u32 recv_cq_handle;
552 u32 srq_handle;
553 u32 max_send_wr;
554 u32 max_recv_wr;
555 u32 max_send_sge;
556 u32 max_recv_sge;
557 u32 max_inline_data;
558 u32 lkey;
559 u32 access_flags;
560 u16 total_chunks;
561 u16 send_chunks;
562 u16 max_atomic_arg;
563 u8 sq_sig_all;
564 u8 qp_type;
565 u8 is_srq;
566 u8 reserved[3];
569 struct pvrdma_cmd_create_qp_resp {
570 struct pvrdma_cmd_resp_hdr hdr;
571 u32 qpn;
572 u32 max_send_wr;
573 u32 max_recv_wr;
574 u32 max_send_sge;
575 u32 max_recv_sge;
576 u32 max_inline_data;
579 struct pvrdma_cmd_modify_qp {
580 struct pvrdma_cmd_hdr hdr;
581 u32 qp_handle;
582 u32 attr_mask;
583 struct pvrdma_qp_attr attrs;
586 struct pvrdma_cmd_query_qp {
587 struct pvrdma_cmd_hdr hdr;
588 u32 qp_handle;
589 u32 attr_mask;
592 struct pvrdma_cmd_query_qp_resp {
593 struct pvrdma_cmd_resp_hdr hdr;
594 struct pvrdma_qp_attr attrs;
597 struct pvrdma_cmd_destroy_qp {
598 struct pvrdma_cmd_hdr hdr;
599 u32 qp_handle;
600 u8 reserved[4];
603 struct pvrdma_cmd_destroy_qp_resp {
604 struct pvrdma_cmd_resp_hdr hdr;
605 u32 events_reported;
606 u8 reserved[4];
609 struct pvrdma_cmd_create_bind {
610 struct pvrdma_cmd_hdr hdr;
611 u32 mtu;
612 u32 vlan;
613 u32 index;
614 u8 new_gid[16];
615 u8 gid_type;
616 u8 reserved[3];
619 struct pvrdma_cmd_destroy_bind {
620 struct pvrdma_cmd_hdr hdr;
621 u32 index;
622 u8 dest_gid[16];
623 u8 reserved[4];
626 union pvrdma_cmd_req {
627 struct pvrdma_cmd_hdr hdr;
628 struct pvrdma_cmd_query_port query_port;
629 struct pvrdma_cmd_query_pkey query_pkey;
630 struct pvrdma_cmd_create_uc create_uc;
631 struct pvrdma_cmd_destroy_uc destroy_uc;
632 struct pvrdma_cmd_create_pd create_pd;
633 struct pvrdma_cmd_destroy_pd destroy_pd;
634 struct pvrdma_cmd_create_mr create_mr;
635 struct pvrdma_cmd_destroy_mr destroy_mr;
636 struct pvrdma_cmd_create_cq create_cq;
637 struct pvrdma_cmd_resize_cq resize_cq;
638 struct pvrdma_cmd_destroy_cq destroy_cq;
639 struct pvrdma_cmd_create_qp create_qp;
640 struct pvrdma_cmd_modify_qp modify_qp;
641 struct pvrdma_cmd_query_qp query_qp;
642 struct pvrdma_cmd_destroy_qp destroy_qp;
643 struct pvrdma_cmd_create_bind create_bind;
644 struct pvrdma_cmd_destroy_bind destroy_bind;
645 struct pvrdma_cmd_create_srq create_srq;
646 struct pvrdma_cmd_modify_srq modify_srq;
647 struct pvrdma_cmd_query_srq query_srq;
648 struct pvrdma_cmd_destroy_srq destroy_srq;
651 union pvrdma_cmd_resp {
652 struct pvrdma_cmd_resp_hdr hdr;
653 struct pvrdma_cmd_query_port_resp query_port_resp;
654 struct pvrdma_cmd_query_pkey_resp query_pkey_resp;
655 struct pvrdma_cmd_create_uc_resp create_uc_resp;
656 struct pvrdma_cmd_create_pd_resp create_pd_resp;
657 struct pvrdma_cmd_create_mr_resp create_mr_resp;
658 struct pvrdma_cmd_create_cq_resp create_cq_resp;
659 struct pvrdma_cmd_resize_cq_resp resize_cq_resp;
660 struct pvrdma_cmd_create_qp_resp create_qp_resp;
661 struct pvrdma_cmd_query_qp_resp query_qp_resp;
662 struct pvrdma_cmd_destroy_qp_resp destroy_qp_resp;
663 struct pvrdma_cmd_create_srq_resp create_srq_resp;
664 struct pvrdma_cmd_query_srq_resp query_srq_resp;
667 #endif /* __PVRDMA_DEV_API_H__ */