spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / drivers / infiniband / core / cm_msgs.h
blob7da9b2102341c2f364f31989aa656d4d750272ee
1 /*
2 * Copyright (c) 2004, 2011 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING the madirectory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use source and binary forms, with or
13 * withmodification, are permitted provided that the following
14 * conditions are met:
16 * - Redistributions of source code must retathe above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
20 * - Redistributions binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHWARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS THE
32 * SOFTWARE.
34 #if !defined(CM_MSGS_H)
35 #define CM_MSGS_H
37 #include <rdma/ib_mad.h>
38 #include <rdma/ib_cm.h>
41 * Parameters to routines below should be in network-byte order, and values
42 * are returned in network-byte order.
45 #define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
47 #define CM_REQ_ATTR_ID cpu_to_be16(0x0010)
48 #define CM_MRA_ATTR_ID cpu_to_be16(0x0011)
49 #define CM_REJ_ATTR_ID cpu_to_be16(0x0012)
50 #define CM_REP_ATTR_ID cpu_to_be16(0x0013)
51 #define CM_RTU_ATTR_ID cpu_to_be16(0x0014)
52 #define CM_DREQ_ATTR_ID cpu_to_be16(0x0015)
53 #define CM_DREP_ATTR_ID cpu_to_be16(0x0016)
54 #define CM_SIDR_REQ_ATTR_ID cpu_to_be16(0x0017)
55 #define CM_SIDR_REP_ATTR_ID cpu_to_be16(0x0018)
56 #define CM_LAP_ATTR_ID cpu_to_be16(0x0019)
57 #define CM_APR_ATTR_ID cpu_to_be16(0x001A)
59 enum cm_msg_sequence {
60 CM_MSG_SEQUENCE_REQ,
61 CM_MSG_SEQUENCE_LAP,
62 CM_MSG_SEQUENCE_DREQ,
63 CM_MSG_SEQUENCE_SIDR
66 struct cm_req_msg {
67 struct ib_mad_hdr hdr;
69 __be32 local_comm_id;
70 __be32 rsvd4;
71 __be64 service_id;
72 __be64 local_ca_guid;
73 __be32 rsvd24;
74 __be32 local_qkey;
75 /* local QPN:24, responder resources:8 */
76 __be32 offset32;
77 /* local EECN:24, initiator depth:8 */
78 __be32 offset36;
80 * remote EECN:24, remote CM response timeout:5,
81 * transport service type:2, end-to-end flow control:1
83 __be32 offset40;
84 /* starting PSN:24, local CM response timeout:5, retry count:3 */
85 __be32 offset44;
86 __be16 pkey;
87 /* path MTU:4, RDC exists:1, RNR retry count:3. */
88 u8 offset50;
89 /* max CM Retries:4, SRQ:1, extended transport type:3 */
90 u8 offset51;
92 __be16 primary_local_lid;
93 __be16 primary_remote_lid;
94 union ib_gid primary_local_gid;
95 union ib_gid primary_remote_gid;
96 /* flow label:20, rsvd:6, packet rate:6 */
97 __be32 primary_offset88;
98 u8 primary_traffic_class;
99 u8 primary_hop_limit;
100 /* SL:4, subnet local:1, rsvd:3 */
101 u8 primary_offset94;
102 /* local ACK timeout:5, rsvd:3 */
103 u8 primary_offset95;
105 __be16 alt_local_lid;
106 __be16 alt_remote_lid;
107 union ib_gid alt_local_gid;
108 union ib_gid alt_remote_gid;
109 /* flow label:20, rsvd:6, packet rate:6 */
110 __be32 alt_offset132;
111 u8 alt_traffic_class;
112 u8 alt_hop_limit;
113 /* SL:4, subnet local:1, rsvd:3 */
114 u8 alt_offset138;
115 /* local ACK timeout:5, rsvd:3 */
116 u8 alt_offset139;
118 u8 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE];
120 } __attribute__ ((packed));
122 static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
124 return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8);
127 static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn)
129 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
130 (be32_to_cpu(req_msg->offset32) &
131 0x000000FF));
134 static inline u8 cm_req_get_resp_res(struct cm_req_msg *req_msg)
136 return (u8) be32_to_cpu(req_msg->offset32);
139 static inline void cm_req_set_resp_res(struct cm_req_msg *req_msg, u8 resp_res)
141 req_msg->offset32 = cpu_to_be32(resp_res |
142 (be32_to_cpu(req_msg->offset32) &
143 0xFFFFFF00));
146 static inline u8 cm_req_get_init_depth(struct cm_req_msg *req_msg)
148 return (u8) be32_to_cpu(req_msg->offset36);
151 static inline void cm_req_set_init_depth(struct cm_req_msg *req_msg,
152 u8 init_depth)
154 req_msg->offset36 = cpu_to_be32(init_depth |
155 (be32_to_cpu(req_msg->offset36) &
156 0xFFFFFF00));
159 static inline u8 cm_req_get_remote_resp_timeout(struct cm_req_msg *req_msg)
161 return (u8) ((be32_to_cpu(req_msg->offset40) & 0xF8) >> 3);
164 static inline void cm_req_set_remote_resp_timeout(struct cm_req_msg *req_msg,
165 u8 resp_timeout)
167 req_msg->offset40 = cpu_to_be32((resp_timeout << 3) |
168 (be32_to_cpu(req_msg->offset40) &
169 0xFFFFFF07));
172 static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
174 u8 transport_type = (u8) (be32_to_cpu(req_msg->offset40) & 0x06) >> 1;
175 switch(transport_type) {
176 case 0: return IB_QPT_RC;
177 case 1: return IB_QPT_UC;
178 case 3:
179 switch (req_msg->offset51 & 0x7) {
180 case 1: return IB_QPT_XRC_TGT;
181 default: return 0;
183 default: return 0;
187 static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
188 enum ib_qp_type qp_type)
190 switch(qp_type) {
191 case IB_QPT_UC:
192 req_msg->offset40 = cpu_to_be32((be32_to_cpu(
193 req_msg->offset40) &
194 0xFFFFFFF9) | 0x2);
195 break;
196 case IB_QPT_XRC_INI:
197 req_msg->offset40 = cpu_to_be32((be32_to_cpu(
198 req_msg->offset40) &
199 0xFFFFFFF9) | 0x6);
200 req_msg->offset51 = (req_msg->offset51 & 0xF8) | 1;
201 break;
202 default:
203 req_msg->offset40 = cpu_to_be32(be32_to_cpu(
204 req_msg->offset40) &
205 0xFFFFFFF9);
209 static inline u8 cm_req_get_flow_ctrl(struct cm_req_msg *req_msg)
211 return be32_to_cpu(req_msg->offset40) & 0x1;
214 static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg,
215 u8 flow_ctrl)
217 req_msg->offset40 = cpu_to_be32((flow_ctrl & 0x1) |
218 (be32_to_cpu(req_msg->offset40) &
219 0xFFFFFFFE));
222 static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
224 return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8);
227 static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg,
228 __be32 starting_psn)
230 req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
231 (be32_to_cpu(req_msg->offset44) & 0x000000FF));
234 static inline u8 cm_req_get_local_resp_timeout(struct cm_req_msg *req_msg)
236 return (u8) ((be32_to_cpu(req_msg->offset44) & 0xF8) >> 3);
239 static inline void cm_req_set_local_resp_timeout(struct cm_req_msg *req_msg,
240 u8 resp_timeout)
242 req_msg->offset44 = cpu_to_be32((resp_timeout << 3) |
243 (be32_to_cpu(req_msg->offset44) & 0xFFFFFF07));
246 static inline u8 cm_req_get_retry_count(struct cm_req_msg *req_msg)
248 return (u8) (be32_to_cpu(req_msg->offset44) & 0x7);
251 static inline void cm_req_set_retry_count(struct cm_req_msg *req_msg,
252 u8 retry_count)
254 req_msg->offset44 = cpu_to_be32((retry_count & 0x7) |
255 (be32_to_cpu(req_msg->offset44) & 0xFFFFFFF8));
258 static inline u8 cm_req_get_path_mtu(struct cm_req_msg *req_msg)
260 return req_msg->offset50 >> 4;
263 static inline void cm_req_set_path_mtu(struct cm_req_msg *req_msg, u8 path_mtu)
265 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF) | (path_mtu << 4));
268 static inline u8 cm_req_get_rnr_retry_count(struct cm_req_msg *req_msg)
270 return req_msg->offset50 & 0x7;
273 static inline void cm_req_set_rnr_retry_count(struct cm_req_msg *req_msg,
274 u8 rnr_retry_count)
276 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF8) |
277 (rnr_retry_count & 0x7));
280 static inline u8 cm_req_get_max_cm_retries(struct cm_req_msg *req_msg)
282 return req_msg->offset51 >> 4;
285 static inline void cm_req_set_max_cm_retries(struct cm_req_msg *req_msg,
286 u8 retries)
288 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF) | (retries << 4));
291 static inline u8 cm_req_get_srq(struct cm_req_msg *req_msg)
293 return (req_msg->offset51 & 0x8) >> 3;
296 static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq)
298 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF7) |
299 ((srq & 0x1) << 3));
302 static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
304 return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12);
307 static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg,
308 __be32 flow_label)
310 req_msg->primary_offset88 = cpu_to_be32(
311 (be32_to_cpu(req_msg->primary_offset88) &
312 0x00000FFF) |
313 (be32_to_cpu(flow_label) << 12));
316 static inline u8 cm_req_get_primary_packet_rate(struct cm_req_msg *req_msg)
318 return (u8) (be32_to_cpu(req_msg->primary_offset88) & 0x3F);
321 static inline void cm_req_set_primary_packet_rate(struct cm_req_msg *req_msg,
322 u8 rate)
324 req_msg->primary_offset88 = cpu_to_be32(
325 (be32_to_cpu(req_msg->primary_offset88) &
326 0xFFFFFFC0) | (rate & 0x3F));
329 static inline u8 cm_req_get_primary_sl(struct cm_req_msg *req_msg)
331 return (u8) (req_msg->primary_offset94 >> 4);
334 static inline void cm_req_set_primary_sl(struct cm_req_msg *req_msg, u8 sl)
336 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0x0F) |
337 (sl << 4));
340 static inline u8 cm_req_get_primary_subnet_local(struct cm_req_msg *req_msg)
342 return (u8) ((req_msg->primary_offset94 & 0x08) >> 3);
345 static inline void cm_req_set_primary_subnet_local(struct cm_req_msg *req_msg,
346 u8 subnet_local)
348 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0xF7) |
349 ((subnet_local & 0x1) << 3));
352 static inline u8 cm_req_get_primary_local_ack_timeout(struct cm_req_msg *req_msg)
354 return (u8) (req_msg->primary_offset95 >> 3);
357 static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_msg,
358 u8 local_ack_timeout)
360 req_msg->primary_offset95 = (u8) ((req_msg->primary_offset95 & 0x07) |
361 (local_ack_timeout << 3));
364 static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
366 return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12);
369 static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg,
370 __be32 flow_label)
372 req_msg->alt_offset132 = cpu_to_be32(
373 (be32_to_cpu(req_msg->alt_offset132) &
374 0x00000FFF) |
375 (be32_to_cpu(flow_label) << 12));
378 static inline u8 cm_req_get_alt_packet_rate(struct cm_req_msg *req_msg)
380 return (u8) (be32_to_cpu(req_msg->alt_offset132) & 0x3F);
383 static inline void cm_req_set_alt_packet_rate(struct cm_req_msg *req_msg,
384 u8 rate)
386 req_msg->alt_offset132 = cpu_to_be32(
387 (be32_to_cpu(req_msg->alt_offset132) &
388 0xFFFFFFC0) | (rate & 0x3F));
391 static inline u8 cm_req_get_alt_sl(struct cm_req_msg *req_msg)
393 return (u8) (req_msg->alt_offset138 >> 4);
396 static inline void cm_req_set_alt_sl(struct cm_req_msg *req_msg, u8 sl)
398 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0x0F) |
399 (sl << 4));
402 static inline u8 cm_req_get_alt_subnet_local(struct cm_req_msg *req_msg)
404 return (u8) ((req_msg->alt_offset138 & 0x08) >> 3);
407 static inline void cm_req_set_alt_subnet_local(struct cm_req_msg *req_msg,
408 u8 subnet_local)
410 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0xF7) |
411 ((subnet_local & 0x1) << 3));
414 static inline u8 cm_req_get_alt_local_ack_timeout(struct cm_req_msg *req_msg)
416 return (u8) (req_msg->alt_offset139 >> 3);
419 static inline void cm_req_set_alt_local_ack_timeout(struct cm_req_msg *req_msg,
420 u8 local_ack_timeout)
422 req_msg->alt_offset139 = (u8) ((req_msg->alt_offset139 & 0x07) |
423 (local_ack_timeout << 3));
426 /* Message REJected or MRAed */
427 enum cm_msg_response {
428 CM_MSG_RESPONSE_REQ = 0x0,
429 CM_MSG_RESPONSE_REP = 0x1,
430 CM_MSG_RESPONSE_OTHER = 0x2
433 struct cm_mra_msg {
434 struct ib_mad_hdr hdr;
436 __be32 local_comm_id;
437 __be32 remote_comm_id;
438 /* message MRAed:2, rsvd:6 */
439 u8 offset8;
440 /* service timeout:5, rsvd:3 */
441 u8 offset9;
443 u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE];
445 } __attribute__ ((packed));
447 static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg)
449 return (u8) (mra_msg->offset8 >> 6);
452 static inline void cm_mra_set_msg_mraed(struct cm_mra_msg *mra_msg, u8 msg)
454 mra_msg->offset8 = (u8) ((mra_msg->offset8 & 0x3F) | (msg << 6));
457 static inline u8 cm_mra_get_service_timeout(struct cm_mra_msg *mra_msg)
459 return (u8) (mra_msg->offset9 >> 3);
462 static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg,
463 u8 service_timeout)
465 mra_msg->offset9 = (u8) ((mra_msg->offset9 & 0x07) |
466 (service_timeout << 3));
469 struct cm_rej_msg {
470 struct ib_mad_hdr hdr;
472 __be32 local_comm_id;
473 __be32 remote_comm_id;
474 /* message REJected:2, rsvd:6 */
475 u8 offset8;
476 /* reject info length:7, rsvd:1. */
477 u8 offset9;
478 __be16 reason;
479 u8 ari[IB_CM_REJ_ARI_LENGTH];
481 u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
483 } __attribute__ ((packed));
485 static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg)
487 return (u8) (rej_msg->offset8 >> 6);
490 static inline void cm_rej_set_msg_rejected(struct cm_rej_msg *rej_msg, u8 msg)
492 rej_msg->offset8 = (u8) ((rej_msg->offset8 & 0x3F) | (msg << 6));
495 static inline u8 cm_rej_get_reject_info_len(struct cm_rej_msg *rej_msg)
497 return (u8) (rej_msg->offset9 >> 1);
500 static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg,
501 u8 len)
503 rej_msg->offset9 = (u8) ((rej_msg->offset9 & 0x1) | (len << 1));
506 struct cm_rep_msg {
507 struct ib_mad_hdr hdr;
509 __be32 local_comm_id;
510 __be32 remote_comm_id;
511 __be32 local_qkey;
512 /* local QPN:24, rsvd:8 */
513 __be32 offset12;
514 /* local EECN:24, rsvd:8 */
515 __be32 offset16;
516 /* starting PSN:24 rsvd:8 */
517 __be32 offset20;
518 u8 resp_resources;
519 u8 initiator_depth;
520 /* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
521 u8 offset26;
522 /* RNR retry count:3, SRQ:1, rsvd:5 */
523 u8 offset27;
524 __be64 local_ca_guid;
526 u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
528 } __attribute__ ((packed));
530 static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
532 return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8);
535 static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn)
537 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
538 (be32_to_cpu(rep_msg->offset12) & 0x000000FF));
541 static inline __be32 cm_rep_get_local_eecn(struct cm_rep_msg *rep_msg)
543 return cpu_to_be32(be32_to_cpu(rep_msg->offset16) >> 8);
546 static inline void cm_rep_set_local_eecn(struct cm_rep_msg *rep_msg, __be32 eecn)
548 rep_msg->offset16 = cpu_to_be32((be32_to_cpu(eecn) << 8) |
549 (be32_to_cpu(rep_msg->offset16) & 0x000000FF));
552 static inline __be32 cm_rep_get_qpn(struct cm_rep_msg *rep_msg, enum ib_qp_type qp_type)
554 return (qp_type == IB_QPT_XRC_INI) ?
555 cm_rep_get_local_eecn(rep_msg) : cm_rep_get_local_qpn(rep_msg);
558 static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
560 return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
563 static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg,
564 __be32 starting_psn)
566 rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
567 (be32_to_cpu(rep_msg->offset20) & 0x000000FF));
570 static inline u8 cm_rep_get_target_ack_delay(struct cm_rep_msg *rep_msg)
572 return (u8) (rep_msg->offset26 >> 3);
575 static inline void cm_rep_set_target_ack_delay(struct cm_rep_msg *rep_msg,
576 u8 target_ack_delay)
578 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0x07) |
579 (target_ack_delay << 3));
582 static inline u8 cm_rep_get_failover(struct cm_rep_msg *rep_msg)
584 return (u8) ((rep_msg->offset26 & 0x06) >> 1);
587 static inline void cm_rep_set_failover(struct cm_rep_msg *rep_msg, u8 failover)
589 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xF9) |
590 ((failover & 0x3) << 1));
593 static inline u8 cm_rep_get_flow_ctrl(struct cm_rep_msg *rep_msg)
595 return (u8) (rep_msg->offset26 & 0x01);
598 static inline void cm_rep_set_flow_ctrl(struct cm_rep_msg *rep_msg,
599 u8 flow_ctrl)
601 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xFE) |
602 (flow_ctrl & 0x1));
605 static inline u8 cm_rep_get_rnr_retry_count(struct cm_rep_msg *rep_msg)
607 return (u8) (rep_msg->offset27 >> 5);
610 static inline void cm_rep_set_rnr_retry_count(struct cm_rep_msg *rep_msg,
611 u8 rnr_retry_count)
613 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0x1F) |
614 (rnr_retry_count << 5));
617 static inline u8 cm_rep_get_srq(struct cm_rep_msg *rep_msg)
619 return (u8) ((rep_msg->offset27 >> 4) & 0x1);
622 static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq)
624 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0xEF) |
625 ((srq & 0x1) << 4));
628 struct cm_rtu_msg {
629 struct ib_mad_hdr hdr;
631 __be32 local_comm_id;
632 __be32 remote_comm_id;
634 u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
636 } __attribute__ ((packed));
638 struct cm_dreq_msg {
639 struct ib_mad_hdr hdr;
641 __be32 local_comm_id;
642 __be32 remote_comm_id;
643 /* remote QPN/EECN:24, rsvd:8 */
644 __be32 offset8;
646 u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
648 } __attribute__ ((packed));
650 static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
652 return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8);
655 static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn)
657 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
658 (be32_to_cpu(dreq_msg->offset8) & 0x000000FF));
661 struct cm_drep_msg {
662 struct ib_mad_hdr hdr;
664 __be32 local_comm_id;
665 __be32 remote_comm_id;
667 u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
669 } __attribute__ ((packed));
671 struct cm_lap_msg {
672 struct ib_mad_hdr hdr;
674 __be32 local_comm_id;
675 __be32 remote_comm_id;
677 __be32 rsvd8;
678 /* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */
679 __be32 offset12;
680 __be32 rsvd16;
682 __be16 alt_local_lid;
683 __be16 alt_remote_lid;
684 union ib_gid alt_local_gid;
685 union ib_gid alt_remote_gid;
686 /* flow label:20, rsvd:4, traffic class:8 */
687 __be32 offset56;
688 u8 alt_hop_limit;
689 /* rsvd:2, packet rate:6 */
690 u8 offset61;
691 /* SL:4, subnet local:1, rsvd:3 */
692 u8 offset62;
693 /* local ACK timeout:5, rsvd:3 */
694 u8 offset63;
696 u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
697 } __attribute__ ((packed));
699 static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
701 return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8);
704 static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn)
706 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
707 (be32_to_cpu(lap_msg->offset12) &
708 0x000000FF));
711 static inline u8 cm_lap_get_remote_resp_timeout(struct cm_lap_msg *lap_msg)
713 return (u8) ((be32_to_cpu(lap_msg->offset12) & 0xF8) >> 3);
716 static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg,
717 u8 resp_timeout)
719 lap_msg->offset12 = cpu_to_be32((resp_timeout << 3) |
720 (be32_to_cpu(lap_msg->offset12) &
721 0xFFFFFF07));
724 static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
726 return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12);
729 static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg,
730 __be32 flow_label)
732 lap_msg->offset56 = cpu_to_be32(
733 (be32_to_cpu(lap_msg->offset56) & 0x00000FFF) |
734 (be32_to_cpu(flow_label) << 12));
737 static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg)
739 return (u8) be32_to_cpu(lap_msg->offset56);
742 static inline void cm_lap_set_traffic_class(struct cm_lap_msg *lap_msg,
743 u8 traffic_class)
745 lap_msg->offset56 = cpu_to_be32(traffic_class |
746 (be32_to_cpu(lap_msg->offset56) &
747 0xFFFFFF00));
750 static inline u8 cm_lap_get_packet_rate(struct cm_lap_msg *lap_msg)
752 return lap_msg->offset61 & 0x3F;
755 static inline void cm_lap_set_packet_rate(struct cm_lap_msg *lap_msg,
756 u8 packet_rate)
758 lap_msg->offset61 = (packet_rate & 0x3F) | (lap_msg->offset61 & 0xC0);
761 static inline u8 cm_lap_get_sl(struct cm_lap_msg *lap_msg)
763 return lap_msg->offset62 >> 4;
766 static inline void cm_lap_set_sl(struct cm_lap_msg *lap_msg, u8 sl)
768 lap_msg->offset62 = (sl << 4) | (lap_msg->offset62 & 0x0F);
771 static inline u8 cm_lap_get_subnet_local(struct cm_lap_msg *lap_msg)
773 return (lap_msg->offset62 >> 3) & 0x1;
776 static inline void cm_lap_set_subnet_local(struct cm_lap_msg *lap_msg,
777 u8 subnet_local)
779 lap_msg->offset62 = ((subnet_local & 0x1) << 3) |
780 (lap_msg->offset61 & 0xF7);
782 static inline u8 cm_lap_get_local_ack_timeout(struct cm_lap_msg *lap_msg)
784 return lap_msg->offset63 >> 3;
787 static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg,
788 u8 local_ack_timeout)
790 lap_msg->offset63 = (local_ack_timeout << 3) |
791 (lap_msg->offset63 & 0x07);
794 struct cm_apr_msg {
795 struct ib_mad_hdr hdr;
797 __be32 local_comm_id;
798 __be32 remote_comm_id;
800 u8 info_length;
801 u8 ap_status;
802 __be16 rsvd;
803 u8 info[IB_CM_APR_INFO_LENGTH];
805 u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
806 } __attribute__ ((packed));
808 struct cm_sidr_req_msg {
809 struct ib_mad_hdr hdr;
811 __be32 request_id;
812 __be16 pkey;
813 __be16 rsvd;
814 __be64 service_id;
816 u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE];
817 } __attribute__ ((packed));
819 struct cm_sidr_rep_msg {
820 struct ib_mad_hdr hdr;
822 __be32 request_id;
823 u8 status;
824 u8 info_length;
825 __be16 rsvd;
826 /* QPN:24, rsvd:8 */
827 __be32 offset8;
828 __be64 service_id;
829 __be32 qkey;
830 u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
832 u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
833 } __attribute__ ((packed));
835 static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
837 return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8);
840 static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg,
841 __be32 qpn)
843 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
844 (be32_to_cpu(sidr_rep_msg->offset8) &
845 0x000000FF));
848 #endif /* CM_MSGS_H */