2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #define CCWR_MAGIC 0xb07700b0
40 #define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF
42 /* Maximum allowed size in bytes of private_data exchange
45 #define C2_MAX_PRIVATE_DATA_SIZE 200
48 * These types are shared among the adapter, host, and CCIL consumer.
50 enum c2_cq_notification_type
{
51 C2_CQ_NOTIFICATION_TYPE_NONE
= 1,
52 C2_CQ_NOTIFICATION_TYPE_NEXT
,
53 C2_CQ_NOTIFICATION_TYPE_NEXT_SE
56 enum c2_setconfig_cmd
{
63 enum c2_getconfig_cmd
{
64 C2_GETCONFIG_ROUTES
= 1,
69 * CCIL Work Request Identifiers
109 CCWR_EP_LISTEN_CREATE
,
110 CCWR_EP_LISTEN_DESTROY
,
121 CCWR_INIT
, /* WARNING: Don't move this ever again! */
125 /* Add new IDs here */
130 * WARNING: CCWR_LAST must always be the last verbs id defined!
131 * All the preceding IDs are fixed, and must not change.
132 * You can add new IDs, but must not remove or reorder
133 * any IDs. If you do, YOU will ruin any hope of
134 * compatability between versions.
139 * Start over at 1 so that arrays indexed by user wr id's
140 * begin at 1. This is OK since the verbs and user wr id's
141 * are always used on disjoint sets of queues.
144 * The order of the CCWR_SEND_XX verbs must
145 * match the order of the RDMA_OPs
156 CCWR_STAG_INVALIDATE
,
160 /* WARNING: This must always be the last user wr id defined! */
162 #define RDMA_SEND_OPCODE_FROM_WR_ID(x) (x+2)
165 * SQ/RQ Work Request Types
168 C2_WR_TYPE_SEND
= CCWR_SEND
,
169 C2_WR_TYPE_SEND_SE
= CCWR_SEND_SE
,
170 C2_WR_TYPE_SEND_INV
= CCWR_SEND_INV
,
171 C2_WR_TYPE_SEND_SE_INV
= CCWR_SEND_SE_INV
,
172 C2_WR_TYPE_RDMA_WRITE
= CCWR_RDMA_WRITE
,
173 C2_WR_TYPE_RDMA_READ
= CCWR_RDMA_READ
,
174 C2_WR_TYPE_RDMA_READ_INV_STAG
= CCWR_RDMA_READ_INV
,
175 C2_WR_TYPE_BIND_MW
= CCWR_MW_BIND
,
176 C2_WR_TYPE_FASTREG_NSMR
= CCWR_NSMR_FASTREG
,
177 C2_WR_TYPE_INV_STAG
= CCWR_STAG_INVALIDATE
,
178 C2_WR_TYPE_RECV
= CCWR_RECV
,
179 C2_WR_TYPE_NOP
= CCWR_NOP
,
189 u32 ip_addr
; /* 0 indicates the default route */
190 u32 netmask
; /* netmask associated with dst */
193 u32 ipaddr
; /* address of the nexthop interface */
199 * A Scatter Gather Entry.
201 struct c2_data_addr
{
208 * MR and MW flags used by the consumer, RI, and RNIC.
211 MEM_REMOTE
= 0x0001, /* allow mw binds with remote access. */
212 MEM_VA_BASED
= 0x0002, /* Not Zero-based */
213 MEM_PBL_COMPLETE
= 0x0004, /* PBL array is complete in this msg */
214 MEM_LOCAL_READ
= 0x0008, /* allow local reads */
215 MEM_LOCAL_WRITE
= 0x0010, /* allow local writes */
216 MEM_REMOTE_READ
= 0x0020, /* allow remote reads */
217 MEM_REMOTE_WRITE
= 0x0040, /* allow remote writes */
218 MEM_WINDOW_BIND
= 0x0080, /* binds allowed */
219 MEM_SHARED
= 0x0100, /* set if MR is shared */
220 MEM_STAG_VALID
= 0x0200 /* set if STAG is in valid state */
224 * CCIL API ACF flags defined in terms of the low level mem flags.
225 * This minimizes translation needed in the user API
228 C2_ACF_LOCAL_READ
= MEM_LOCAL_READ
,
229 C2_ACF_LOCAL_WRITE
= MEM_LOCAL_WRITE
,
230 C2_ACF_REMOTE_READ
= MEM_REMOTE_READ
,
231 C2_ACF_REMOTE_WRITE
= MEM_REMOTE_WRITE
,
232 C2_ACF_WINDOW_BIND
= MEM_WINDOW_BIND
236 * Image types of objects written to flash
238 #define C2_FLASH_IMG_BITFILE 1
239 #define C2_FLASH_IMG_OPTION_ROM 2
240 #define C2_FLASH_IMG_VPD 3
243 * to fix bug 1815 we define the max size allowable of the
244 * terminate message (per the IETF spec).Refer to the IETF
245 * protocal specification, section 12.1.6, page 64)
246 * The message is prefixed by 20 types of DDP info.
248 * Then the message has 6 bytes for the terminate control
249 * and DDP segment length info plus a DDP header (either
250 * 14 or 18 byts) plus 28 bytes for the RDMA header.
251 * Thus the max size in:
252 * 20 + (6 + 18 + 28) = 72
254 #define C2_MAX_TERMINATE_MESSAGE_SIZE (72)
257 * Build String Length. It must be the same as C2_BUILD_STR_LEN in ccil_api.h
259 #define WR_BUILD_STR_LEN 64
262 * WARNING: All of these structs need to align any 64bit types on
263 * 64 bit boundaries! 64bit types include u64 and u64.
267 * Clustercore Work Request Header. Be sensitive to field layout
271 /* wqe_count is part of the cqe. It is put here so the
272 * adapter can write to it while the wr is pending without
273 * clobbering part of the wr. This word need not be dma'd
274 * from the host to adapter by libccil, but we copy it anyway
275 * to make the memcpy to the adapter better aligned.
279 /* Put these fields next so that later 32- and 64-bit
280 * quantities are naturally aligned.
283 u8 result
; /* adapter -> host */
284 u8 sge_count
; /* host -> adapter */
285 u8 flags
; /* host -> adapter */
292 } __attribute__((packed
));
295 *------------------------ RNIC ------------------------
303 * Flags for the RNIC WRs
306 RNIC_IRD_STATIC
= 0x0001,
307 RNIC_ORD_STATIC
= 0x0002,
308 RNIC_QP_STATIC
= 0x0004,
309 RNIC_SRQ_SUPPORTED
= 0x0008,
310 RNIC_PBL_BLOCK_MODE
= 0x0010,
311 RNIC_SRQ_MODEL_ARRIVAL
= 0x0020,
312 RNIC_CQ_OVF_DETECTED
= 0x0040,
313 RNIC_PRIV_MODE
= 0x0080
316 struct c2wr_rnic_open_req
{
319 __be16 flags
; /* See enum c2_rnic_flags */
321 } __attribute__((packed
));
323 struct c2wr_rnic_open_rep
{
326 } __attribute__((packed
));
328 union c2wr_rnic_open
{
329 struct c2wr_rnic_open_req req
;
330 struct c2wr_rnic_open_rep rep
;
331 } __attribute__((packed
));
333 struct c2wr_rnic_query_req
{
336 } __attribute__((packed
));
341 struct c2wr_rnic_query_rep
{
350 char fw_ver_build_str
[WR_BUILD_STR_LEN
];
354 u32 max_send_sgl_depth
;
355 u32 max_rdma_sgl_depth
;
358 u32 max_cq_event_handlers
;
362 __be32 max_global_ird
;
372 } __attribute__((packed
));
374 union c2wr_rnic_query
{
375 struct c2wr_rnic_query_req req
;
376 struct c2wr_rnic_query_rep rep
;
377 } __attribute__((packed
));
383 struct c2wr_rnic_getconfig_req
{
386 u32 option
; /* see c2_getconfig_cmd_t */
389 } __attribute__((packed
)) ;
391 struct c2wr_rnic_getconfig_rep
{
393 u32 option
; /* see c2_getconfig_cmd_t */
394 u32 count_len
; /* length of the number of addresses configured */
395 } __attribute__((packed
)) ;
397 union c2wr_rnic_getconfig
{
398 struct c2wr_rnic_getconfig_req req
;
399 struct c2wr_rnic_getconfig_rep rep
;
400 } __attribute__((packed
)) ;
405 struct c2wr_rnic_setconfig_req
{
408 __be32 option
; /* See c2_setconfig_cmd_t */
409 /* variable data and pad. See c2_netaddr and c2_route */
411 } __attribute__((packed
)) ;
413 struct c2wr_rnic_setconfig_rep
{
415 } __attribute__((packed
)) ;
417 union c2wr_rnic_setconfig
{
418 struct c2wr_rnic_setconfig_req req
;
419 struct c2wr_rnic_setconfig_rep rep
;
420 } __attribute__((packed
)) ;
425 struct c2wr_rnic_close_req
{
428 } __attribute__((packed
)) ;
430 struct c2wr_rnic_close_rep
{
432 } __attribute__((packed
)) ;
434 union c2wr_rnic_close
{
435 struct c2wr_rnic_close_req req
;
436 struct c2wr_rnic_close_rep rep
;
437 } __attribute__((packed
)) ;
440 *------------------------ CQ ------------------------
442 struct c2wr_cq_create_req
{
450 } __attribute__((packed
)) ;
452 struct c2wr_cq_create_rep
{
455 __be32 adapter_shared
;
457 } __attribute__((packed
)) ;
459 union c2wr_cq_create
{
460 struct c2wr_cq_create_req req
;
461 struct c2wr_cq_create_rep rep
;
462 } __attribute__((packed
)) ;
464 struct c2wr_cq_modify_req
{
470 } __attribute__((packed
)) ;
472 struct c2wr_cq_modify_rep
{
474 } __attribute__((packed
)) ;
476 union c2wr_cq_modify
{
477 struct c2wr_cq_modify_req req
;
478 struct c2wr_cq_modify_rep rep
;
479 } __attribute__((packed
)) ;
481 struct c2wr_cq_destroy_req
{
485 } __attribute__((packed
)) ;
487 struct c2wr_cq_destroy_rep
{
489 } __attribute__((packed
)) ;
491 union c2wr_cq_destroy
{
492 struct c2wr_cq_destroy_req req
;
493 struct c2wr_cq_destroy_rep rep
;
494 } __attribute__((packed
)) ;
497 *------------------------ PD ------------------------
499 struct c2wr_pd_alloc_req
{
503 } __attribute__((packed
)) ;
505 struct c2wr_pd_alloc_rep
{
507 } __attribute__((packed
)) ;
509 union c2wr_pd_alloc
{
510 struct c2wr_pd_alloc_req req
;
511 struct c2wr_pd_alloc_rep rep
;
512 } __attribute__((packed
)) ;
514 struct c2wr_pd_dealloc_req
{
518 } __attribute__((packed
)) ;
520 struct c2wr_pd_dealloc_rep
{
522 } __attribute__((packed
)) ;
524 union c2wr_pd_dealloc
{
525 struct c2wr_pd_dealloc_req req
;
526 struct c2wr_pd_dealloc_rep rep
;
527 } __attribute__((packed
)) ;
530 *------------------------ SRQ ------------------------
532 struct c2wr_srq_create_req
{
541 } __attribute__((packed
)) ;
543 struct c2wr_srq_create_rep
{
551 } __attribute__((packed
)) ;
553 union c2wr_srq_create
{
554 struct c2wr_srq_create_req req
;
555 struct c2wr_srq_create_rep rep
;
556 } __attribute__((packed
)) ;
558 struct c2wr_srq_destroy_req
{
562 } __attribute__((packed
)) ;
564 struct c2wr_srq_destroy_rep
{
566 } __attribute__((packed
)) ;
568 union c2wr_srq_destroy
{
569 struct c2wr_srq_destroy_req req
;
570 struct c2wr_srq_destroy_rep rep
;
571 } __attribute__((packed
)) ;
574 *------------------------ QP ------------------------
577 QP_RDMA_READ
= 0x00000001, /* RDMA read enabled? */
578 QP_RDMA_WRITE
= 0x00000002, /* RDMA write enabled? */
579 QP_MW_BIND
= 0x00000004, /* MWs enabled */
580 QP_ZERO_STAG
= 0x00000008, /* enabled? */
581 QP_REMOTE_TERMINATION
= 0x00000010, /* remote end terminated */
582 QP_RDMA_READ_RESPONSE
= 0x00000020 /* Remote RDMA read */
586 struct c2wr_qp_create_req
{
598 __be32 flags
; /* see enum c2wr_qp_flags */
599 __be32 send_sgl_depth
;
600 __be32 recv_sgl_depth
;
601 __be32 rdma_write_sgl_depth
;
605 } __attribute__((packed
)) ;
607 struct c2wr_qp_create_rep
{
613 u32 rdma_write_sgl_depth
;
623 } __attribute__((packed
)) ;
625 union c2wr_qp_create
{
626 struct c2wr_qp_create_req req
;
627 struct c2wr_qp_create_rep rep
;
628 } __attribute__((packed
)) ;
630 struct c2wr_qp_query_req
{
634 } __attribute__((packed
)) ;
636 struct c2wr_qp_query_rep
{
643 u32 rdma_write_sgl_depth
;
648 u16 flags
; /* see c2wr_qp_flags_t */
654 u32 terminate_msg_length
; /* 0 if not present */
656 /* Terminate Message in-line here. */
657 } __attribute__((packed
)) ;
659 union c2wr_qp_query
{
660 struct c2wr_qp_query_req req
;
661 struct c2wr_qp_query_rep rep
;
662 } __attribute__((packed
)) ;
664 struct c2wr_qp_modify_req
{
667 u32 stream_msg_length
;
670 __be32 next_qp_state
;
676 } __attribute__((packed
)) ;
678 struct c2wr_qp_modify_rep
{
690 } __attribute__((packed
)) ;
692 union c2wr_qp_modify
{
693 struct c2wr_qp_modify_req req
;
694 struct c2wr_qp_modify_rep rep
;
695 } __attribute__((packed
)) ;
697 struct c2wr_qp_destroy_req
{
701 } __attribute__((packed
)) ;
703 struct c2wr_qp_destroy_rep
{
705 } __attribute__((packed
)) ;
707 union c2wr_qp_destroy
{
708 struct c2wr_qp_destroy_req req
;
709 struct c2wr_qp_destroy_rep rep
;
710 } __attribute__((packed
)) ;
713 * The CCWR_QP_CONNECT msg is posted on the verbs request queue. It can
714 * only be posted when a QP is in IDLE state. After the connect request is
715 * submitted to the LLP, the adapter moves the QP to CONNECT_PENDING state.
716 * No synchronous reply from adapter to this WR. The results of
717 * connection are passed back in an async event CCAE_ACTIVE_CONNECT_RESULTS
718 * See c2wr_ae_active_connect_results_t
720 struct c2wr_qp_connect_req
{
727 __be32 private_data_length
;
728 u8 private_data
[0]; /* Private data in-line. */
729 } __attribute__((packed
)) ;
731 struct c2wr_qp_connect
{
732 struct c2wr_qp_connect_req req
;
733 /* no synchronous reply. */
734 } __attribute__((packed
)) ;
738 *------------------------ MM ------------------------
741 struct c2wr_nsmr_stag_alloc_req
{
747 } __attribute__((packed
)) ;
749 struct c2wr_nsmr_stag_alloc_rep
{
753 } __attribute__((packed
)) ;
755 union c2wr_nsmr_stag_alloc
{
756 struct c2wr_nsmr_stag_alloc_req req
;
757 struct c2wr_nsmr_stag_alloc_rep rep
;
758 } __attribute__((packed
)) ;
760 struct c2wr_nsmr_register_req
{
773 /* array of paddrs (must be aligned on a 64bit boundary) */
775 } __attribute__((packed
)) ;
777 struct c2wr_nsmr_register_rep
{
781 } __attribute__((packed
)) ;
783 union c2wr_nsmr_register
{
784 struct c2wr_nsmr_register_req req
;
785 struct c2wr_nsmr_register_rep rep
;
786 } __attribute__((packed
)) ;
788 struct c2wr_nsmr_pbl_req
{
794 /* array of paddrs (must be aligned on a 64bit boundary) */
796 } __attribute__((packed
)) ;
798 struct c2wr_nsmr_pbl_rep
{
800 } __attribute__((packed
)) ;
802 union c2wr_nsmr_pbl
{
803 struct c2wr_nsmr_pbl_req req
;
804 struct c2wr_nsmr_pbl_rep rep
;
805 } __attribute__((packed
)) ;
807 struct c2wr_mr_query_req
{
811 } __attribute__((packed
)) ;
813 struct c2wr_mr_query_rep
{
820 } __attribute__((packed
)) ;
822 union c2wr_mr_query
{
823 struct c2wr_mr_query_req req
;
824 struct c2wr_mr_query_rep rep
;
825 } __attribute__((packed
)) ;
827 struct c2wr_mw_query_req
{
831 } __attribute__((packed
)) ;
833 struct c2wr_mw_query_rep
{
839 } __attribute__((packed
)) ;
841 union c2wr_mw_query
{
842 struct c2wr_mw_query_req req
;
843 struct c2wr_mw_query_rep rep
;
844 } __attribute__((packed
)) ;
847 struct c2wr_stag_dealloc_req
{
851 } __attribute__((packed
)) ;
853 struct c2wr_stag_dealloc_rep
{
855 } __attribute__((packed
)) ;
857 union c2wr_stag_dealloc
{
858 struct c2wr_stag_dealloc_req req
;
859 struct c2wr_stag_dealloc_rep rep
;
860 } __attribute__((packed
)) ;
862 struct c2wr_nsmr_reregister_req
{
877 /* array of paddrs (must be aligned on a 64bit boundary) */
879 } __attribute__((packed
)) ;
881 struct c2wr_nsmr_reregister_rep
{
885 } __attribute__((packed
)) ;
887 union c2wr_nsmr_reregister
{
888 struct c2wr_nsmr_reregister_req req
;
889 struct c2wr_nsmr_reregister_rep rep
;
890 } __attribute__((packed
)) ;
892 struct c2wr_smr_register_req
{
901 } __attribute__((packed
)) ;
903 struct c2wr_smr_register_rep
{
906 } __attribute__((packed
)) ;
908 union c2wr_smr_register
{
909 struct c2wr_smr_register_req req
;
910 struct c2wr_smr_register_rep rep
;
911 } __attribute__((packed
)) ;
913 struct c2wr_mw_alloc_req
{
917 } __attribute__((packed
)) ;
919 struct c2wr_mw_alloc_rep
{
922 } __attribute__((packed
)) ;
924 union c2wr_mw_alloc
{
925 struct c2wr_mw_alloc_req req
;
926 struct c2wr_mw_alloc_rep rep
;
927 } __attribute__((packed
)) ;
930 *------------------------ WRs -----------------------
933 struct c2wr_user_hdr
{
934 struct c2wr_hdr hdr
; /* Has status and WR Type */
935 } __attribute__((packed
)) ;
938 C2_QP_STATE_IDLE
= 0x01,
939 C2_QP_STATE_CONNECTING
= 0x02,
940 C2_QP_STATE_RTS
= 0x04,
941 C2_QP_STATE_CLOSING
= 0x08,
942 C2_QP_STATE_TERMINATE
= 0x10,
943 C2_QP_STATE_ERROR
= 0x20,
946 /* Completion queue entry. */
948 struct c2wr_hdr hdr
; /* Has status and WR Type */
949 u64 qp_user_context
; /* c2_user_qp_t * */
950 u32 qp_state
; /* Current QP State */
951 u32 handle
; /* QPID or EP Handle */
952 __be32 bytes_rcvd
; /* valid for RECV WCs */
954 } __attribute__((packed
)) ;
958 * Flags used for all post-sq WRs. These must fit in the flags
959 * field of the struct c2wr_hdr (eight bits).
963 SQ_READ_FENCE
= 0x02,
968 * Common fields for all post-sq WRs. Namely the standard header and a
969 * secondary header with fields common to all post-sq WRs.
972 struct c2wr_user_hdr user_hdr
;
973 } __attribute__((packed
));
976 * Same as above but for post-rq WRs.
979 struct c2wr_user_hdr user_hdr
;
980 } __attribute__((packed
));
983 * use the same struct for all sends.
985 struct c2wr_send_req
{
986 struct c2_sq_hdr sq_hdr
;
989 u8 data
[0]; /* SGE array */
990 } __attribute__((packed
));
993 struct c2wr_send_req req
;
995 } __attribute__((packed
));
997 struct c2wr_rdma_write_req
{
998 struct c2_sq_hdr sq_hdr
;
1002 u8 data
[0]; /* SGE array */
1003 } __attribute__((packed
));
1005 union c2wr_rdma_write
{
1006 struct c2wr_rdma_write_req req
;
1008 } __attribute__((packed
));
1010 struct c2wr_rdma_read_req
{
1011 struct c2_sq_hdr sq_hdr
;
1017 } __attribute__((packed
));
1019 union c2wr_rdma_read
{
1020 struct c2wr_rdma_read_req req
;
1022 } __attribute__((packed
));
1024 struct c2wr_mw_bind_req
{
1025 struct c2_sq_hdr sq_hdr
;
1033 } __attribute__((packed
));
1035 union c2wr_mw_bind
{
1036 struct c2wr_mw_bind_req req
;
1038 } __attribute__((packed
));
1040 struct c2wr_nsmr_fastreg_req
{
1041 struct c2_sq_hdr sq_hdr
;
1050 /* array of paddrs (must be aligned on a 64bit boundary) */
1052 } __attribute__((packed
));
1054 union c2wr_nsmr_fastreg
{
1055 struct c2wr_nsmr_fastreg_req req
;
1057 } __attribute__((packed
));
1059 struct c2wr_stag_invalidate_req
{
1060 struct c2_sq_hdr sq_hdr
;
1064 } __attribute__((packed
));
1066 union c2wr_stag_invalidate
{
1067 struct c2wr_stag_invalidate_req req
;
1069 } __attribute__((packed
));
1072 struct c2_sq_hdr sq_hdr
;
1073 struct c2wr_send_req send
;
1074 struct c2wr_send_req send_se
;
1075 struct c2wr_send_req send_inv
;
1076 struct c2wr_send_req send_se_inv
;
1077 struct c2wr_rdma_write_req rdma_write
;
1078 struct c2wr_rdma_read_req rdma_read
;
1079 struct c2wr_mw_bind_req mw_bind
;
1080 struct c2wr_nsmr_fastreg_req nsmr_fastreg
;
1081 struct c2wr_stag_invalidate_req stag_inv
;
1082 } __attribute__((packed
));
1089 struct c2_rq_hdr rq_hdr
;
1090 u8 data
[0]; /* array of SGEs */
1091 } __attribute__((packed
));
1094 struct c2wr_rqwr req
;
1096 } __attribute__((packed
));
1099 * All AEs start with this header. Most AEs only need to convey the
1100 * information in the header. Some, like LLP connection events, need
1101 * more info. The union typdef c2wr_ae_t has all the possible AEs.
1103 * hdr.context is the user_context from the rnic_open WR. NULL If this
1104 * is not affiliated with an rnic
1106 * hdr.id is the AE identifier (eg; CCAE_REMOTE_SHUTDOWN,
1107 * CCAE_LLP_CLOSE_COMPLETE)
1109 * resource_type is one of: C2_RES_IND_QP, C2_RES_IND_CQ, C2_RES_IND_SRQ
1111 * user_context is the context passed down when the host created the resource.
1113 struct c2wr_ae_hdr
{
1114 struct c2wr_hdr hdr
;
1115 u64 user_context
; /* user context for this res. */
1116 __be32 resource_type
; /* see enum c2_resource_indicator */
1117 __be32 resource
; /* handle for resource */
1118 __be32 qp_state
; /* current QP State */
1119 } __attribute__((packed
));
1122 * After submitting the CCAE_ACTIVE_CONNECT_RESULTS message on the AEQ,
1123 * the adapter moves the QP into RTS state
1125 struct c2wr_ae_active_connect_results
{
1126 struct c2wr_ae_hdr ae_hdr
;
1131 __be32 private_data_length
;
1132 u8 private_data
[0]; /* data is in-line in the msg. */
1133 } __attribute__((packed
));
1136 * When connections are established by the stack (and the private data
1137 * MPA frame is received), the adapter will generate an event to the host.
1138 * The details of the connection, any private data, and the new connection
1139 * request handle is passed up via the CCAE_CONNECTION_REQUEST msg on the
1142 struct c2wr_ae_connection_request
{
1143 struct c2wr_ae_hdr ae_hdr
;
1144 u32 cr_handle
; /* connreq handle (sock ptr) */
1149 __be32 private_data_length
;
1150 u8 private_data
[0]; /* data is in-line in the msg. */
1151 } __attribute__((packed
));
1154 struct c2wr_ae_hdr ae_generic
;
1155 struct c2wr_ae_active_connect_results ae_active_connect_results
;
1156 struct c2wr_ae_connection_request ae_connection_request
;
1157 } __attribute__((packed
));
1159 struct c2wr_init_req
{
1160 struct c2wr_hdr hdr
;
1162 __be64 q0_host_shared
;
1163 __be64 q1_host_shared
;
1164 __be64 q1_host_msg_pool
;
1165 __be64 q2_host_shared
;
1166 __be64 q2_host_msg_pool
;
1167 } __attribute__((packed
));
1169 struct c2wr_init_rep
{
1170 struct c2wr_hdr hdr
;
1171 } __attribute__((packed
));
1174 struct c2wr_init_req req
;
1175 struct c2wr_init_rep rep
;
1176 } __attribute__((packed
));
1179 * For upgrading flash.
1182 struct c2wr_flash_init_req
{
1183 struct c2wr_hdr hdr
;
1185 } __attribute__((packed
));
1187 struct c2wr_flash_init_rep
{
1188 struct c2wr_hdr hdr
;
1189 u32 adapter_flash_buf_offset
;
1190 u32 adapter_flash_len
;
1191 } __attribute__((packed
));
1193 union c2wr_flash_init
{
1194 struct c2wr_flash_init_req req
;
1195 struct c2wr_flash_init_rep rep
;
1196 } __attribute__((packed
));
1198 struct c2wr_flash_req
{
1199 struct c2wr_hdr hdr
;
1202 } __attribute__((packed
));
1204 struct c2wr_flash_rep
{
1205 struct c2wr_hdr hdr
;
1207 } __attribute__((packed
));
1210 struct c2wr_flash_req req
;
1211 struct c2wr_flash_rep rep
;
1212 } __attribute__((packed
));
1214 struct c2wr_buf_alloc_req
{
1215 struct c2wr_hdr hdr
;
1218 } __attribute__((packed
));
1220 struct c2wr_buf_alloc_rep
{
1221 struct c2wr_hdr hdr
;
1222 u32 offset
; /* 0 if mem not available */
1223 u32 size
; /* 0 if mem not available */
1224 } __attribute__((packed
));
1226 union c2wr_buf_alloc
{
1227 struct c2wr_buf_alloc_req req
;
1228 struct c2wr_buf_alloc_rep rep
;
1229 } __attribute__((packed
));
1231 struct c2wr_buf_free_req
{
1232 struct c2wr_hdr hdr
;
1234 u32 offset
; /* Must match value from alloc */
1235 u32 size
; /* Must match value from alloc */
1236 } __attribute__((packed
));
1238 struct c2wr_buf_free_rep
{
1239 struct c2wr_hdr hdr
;
1240 } __attribute__((packed
));
1242 union c2wr_buf_free
{
1243 struct c2wr_buf_free_req req
;
1245 } __attribute__((packed
));
1247 struct c2wr_flash_write_req
{
1248 struct c2wr_hdr hdr
;
1254 } __attribute__((packed
));
1256 struct c2wr_flash_write_rep
{
1257 struct c2wr_hdr hdr
;
1259 } __attribute__((packed
));
1261 union c2wr_flash_write
{
1262 struct c2wr_flash_write_req req
;
1263 struct c2wr_flash_write_rep rep
;
1264 } __attribute__((packed
));
1267 * Messages for LLP connection setup.
1271 * Listen Request. This allocates a listening endpoint to allow passive
1272 * connection setup. Newly established LLP connections are passed up
1273 * via an AE. See c2wr_ae_connection_request_t
1275 struct c2wr_ep_listen_create_req
{
1276 struct c2wr_hdr hdr
;
1277 u64 user_context
; /* returned in AEs. */
1279 __be32 local_addr
; /* local addr, or 0 */
1280 __be16 local_port
; /* 0 means "pick one" */
1282 __be32 backlog
; /* tradional tcp listen bl */
1283 } __attribute__((packed
));
1285 struct c2wr_ep_listen_create_rep
{
1286 struct c2wr_hdr hdr
;
1287 u32 ep_handle
; /* handle to new listening ep */
1288 u16 local_port
; /* resulting port... */
1290 } __attribute__((packed
));
1292 union c2wr_ep_listen_create
{
1293 struct c2wr_ep_listen_create_req req
;
1294 struct c2wr_ep_listen_create_rep rep
;
1295 } __attribute__((packed
));
1297 struct c2wr_ep_listen_destroy_req
{
1298 struct c2wr_hdr hdr
;
1301 } __attribute__((packed
));
1303 struct c2wr_ep_listen_destroy_rep
{
1304 struct c2wr_hdr hdr
;
1305 } __attribute__((packed
));
1307 union c2wr_ep_listen_destroy
{
1308 struct c2wr_ep_listen_destroy_req req
;
1309 struct c2wr_ep_listen_destroy_rep rep
;
1310 } __attribute__((packed
));
1312 struct c2wr_ep_query_req
{
1313 struct c2wr_hdr hdr
;
1316 } __attribute__((packed
));
1318 struct c2wr_ep_query_rep
{
1319 struct c2wr_hdr hdr
;
1325 } __attribute__((packed
));
1327 union c2wr_ep_query
{
1328 struct c2wr_ep_query_req req
;
1329 struct c2wr_ep_query_rep rep
;
1330 } __attribute__((packed
));
1334 * The host passes this down to indicate acceptance of a pending iWARP
1335 * connection. The cr_handle was obtained from the CONNECTION_REQUEST
1336 * AE passed up by the adapter. See c2wr_ae_connection_request_t.
1338 struct c2wr_cr_accept_req
{
1339 struct c2wr_hdr hdr
;
1341 u32 qp_handle
; /* QP to bind to this LLP conn */
1342 u32 ep_handle
; /* LLP handle to accept */
1343 __be32 private_data_length
;
1344 u8 private_data
[0]; /* data in-line in msg. */
1345 } __attribute__((packed
));
1348 * adapter sends reply when private data is successfully submitted to
1351 struct c2wr_cr_accept_rep
{
1352 struct c2wr_hdr hdr
;
1353 } __attribute__((packed
));
1355 union c2wr_cr_accept
{
1356 struct c2wr_cr_accept_req req
;
1357 struct c2wr_cr_accept_rep rep
;
1358 } __attribute__((packed
));
1361 * The host sends this down if a given iWARP connection request was
1362 * rejected by the consumer. The cr_handle was obtained from a
1363 * previous c2wr_ae_connection_request_t AE sent by the adapter.
1365 struct c2wr_cr_reject_req
{
1366 struct c2wr_hdr hdr
;
1368 u32 ep_handle
; /* LLP handle to reject */
1369 } __attribute__((packed
));
1372 * Dunno if this is needed, but we'll add it for now. The adapter will
1373 * send the reject_reply after the LLP endpoint has been destroyed.
1375 struct c2wr_cr_reject_rep
{
1376 struct c2wr_hdr hdr
;
1377 } __attribute__((packed
));
1379 union c2wr_cr_reject
{
1380 struct c2wr_cr_reject_req req
;
1381 struct c2wr_cr_reject_rep rep
;
1382 } __attribute__((packed
));
1385 * console command. Used to implement a debug console over the verbs
1386 * request and reply queues.
1390 * Console request message. It contains:
1391 * - message hdr with id = CCWR_CONSOLE
1392 * - the physaddr/len of host memory to be used for the reply.
1393 * - the command string. eg: "netstat -s" or "zoneinfo"
1395 struct c2wr_console_req
{
1396 struct c2wr_hdr hdr
; /* id = CCWR_CONSOLE */
1397 u64 reply_buf
; /* pinned host buf for reply */
1398 u32 reply_buf_len
; /* length of reply buffer */
1399 u8 command
[0]; /* NUL terminated ascii string */
1400 /* containing the command req */
1401 } __attribute__((packed
));
1404 * flags used in the console reply.
1406 enum c2_console_flags
{
1407 CONS_REPLY_TRUNCATED
= 0x00000001 /* reply was truncated */
1408 } __attribute__((packed
));
1411 * Console reply message.
1412 * hdr.result contains the c2_status_t error if the reply was _not_ generated,
1413 * or C2_OK if the reply was generated.
1415 struct c2wr_console_rep
{
1416 struct c2wr_hdr hdr
; /* id = CCWR_CONSOLE */
1418 } __attribute__((packed
));
1420 union c2wr_console
{
1421 struct c2wr_console_req req
;
1422 struct c2wr_console_rep rep
;
1423 } __attribute__((packed
));
1427 * Giant union with all WRs. Makes life easier...
1430 struct c2wr_hdr hdr
;
1431 struct c2wr_user_hdr user_hdr
;
1432 union c2wr_rnic_open rnic_open
;
1433 union c2wr_rnic_query rnic_query
;
1434 union c2wr_rnic_getconfig rnic_getconfig
;
1435 union c2wr_rnic_setconfig rnic_setconfig
;
1436 union c2wr_rnic_close rnic_close
;
1437 union c2wr_cq_create cq_create
;
1438 union c2wr_cq_modify cq_modify
;
1439 union c2wr_cq_destroy cq_destroy
;
1440 union c2wr_pd_alloc pd_alloc
;
1441 union c2wr_pd_dealloc pd_dealloc
;
1442 union c2wr_srq_create srq_create
;
1443 union c2wr_srq_destroy srq_destroy
;
1444 union c2wr_qp_create qp_create
;
1445 union c2wr_qp_query qp_query
;
1446 union c2wr_qp_modify qp_modify
;
1447 union c2wr_qp_destroy qp_destroy
;
1448 struct c2wr_qp_connect qp_connect
;
1449 union c2wr_nsmr_stag_alloc nsmr_stag_alloc
;
1450 union c2wr_nsmr_register nsmr_register
;
1451 union c2wr_nsmr_pbl nsmr_pbl
;
1452 union c2wr_mr_query mr_query
;
1453 union c2wr_mw_query mw_query
;
1454 union c2wr_stag_dealloc stag_dealloc
;
1455 union c2wr_sqwr sqwr
;
1456 struct c2wr_rqwr rqwr
;
1459 union c2wr_init init
;
1460 union c2wr_ep_listen_create ep_listen_create
;
1461 union c2wr_ep_listen_destroy ep_listen_destroy
;
1462 union c2wr_cr_accept cr_accept
;
1463 union c2wr_cr_reject cr_reject
;
1464 union c2wr_console console
;
1465 union c2wr_flash_init flash_init
;
1466 union c2wr_flash flash
;
1467 union c2wr_buf_alloc buf_alloc
;
1468 union c2wr_buf_free buf_free
;
1469 union c2wr_flash_write flash_write
;
1470 } __attribute__((packed
));
1474 * Accessors for the wr fields that are packed together tightly to
1475 * reduce the wr message size. The wr arguments are void* so that
1476 * either a struct c2wr*, a struct c2wr_hdr*, or a pointer to any of the types
1477 * in the struct c2wr union can be passed in.
1479 static __inline__ u8
c2_wr_get_id(void *wr
)
1481 return ((struct c2wr_hdr
*) wr
)->id
;
1483 static __inline__
void c2_wr_set_id(void *wr
, u8 id
)
1485 ((struct c2wr_hdr
*) wr
)->id
= id
;
1487 static __inline__ u8
c2_wr_get_result(void *wr
)
1489 return ((struct c2wr_hdr
*) wr
)->result
;
1491 static __inline__
void c2_wr_set_result(void *wr
, u8 result
)
1493 ((struct c2wr_hdr
*) wr
)->result
= result
;
1495 static __inline__ u8
c2_wr_get_flags(void *wr
)
1497 return ((struct c2wr_hdr
*) wr
)->flags
;
1499 static __inline__
void c2_wr_set_flags(void *wr
, u8 flags
)
1501 ((struct c2wr_hdr
*) wr
)->flags
= flags
;
1503 static __inline__ u8
c2_wr_get_sge_count(void *wr
)
1505 return ((struct c2wr_hdr
*) wr
)->sge_count
;
1507 static __inline__
void c2_wr_set_sge_count(void *wr
, u8 sge_count
)
1509 ((struct c2wr_hdr
*) wr
)->sge_count
= sge_count
;
1511 static __inline__ __be32
c2_wr_get_wqe_count(void *wr
)
1513 return ((struct c2wr_hdr
*) wr
)->wqe_count
;
1515 static __inline__
void c2_wr_set_wqe_count(void *wr
, u32 wqe_count
)
1517 ((struct c2wr_hdr
*) wr
)->wqe_count
= wqe_count
;
1520 #endif /* _C2_WR_H_ */