OMAP3: PM: Added DVFS OPP locking interface for VDD1 and VDD2
[linux-ginger.git] / drivers / infiniband / core / cm_msgs.h
blob7e63c08f697c33f3dcbd806a9a122cae1867c3cb
1 /*
2 * Copyright (c) 2004 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING the madirectory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use source and binary forms, with or
13 * withmodification, are permitted provided that the following
14 * conditions are met:
16 * - Redistributions of source code must retathe above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
20 * - Redistributions binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHWARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS THE
32 * SOFTWARE.
34 #if !defined(CM_MSGS_H)
35 #define CM_MSGS_H
37 #include <rdma/ib_mad.h>
38 #include <rdma/ib_cm.h>
41 * Parameters to routines below should be in network-byte order, and values
42 * are returned in network-byte order.
45 #define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
47 #define CM_REQ_ATTR_ID cpu_to_be16(0x0010)
48 #define CM_MRA_ATTR_ID cpu_to_be16(0x0011)
49 #define CM_REJ_ATTR_ID cpu_to_be16(0x0012)
50 #define CM_REP_ATTR_ID cpu_to_be16(0x0013)
51 #define CM_RTU_ATTR_ID cpu_to_be16(0x0014)
52 #define CM_DREQ_ATTR_ID cpu_to_be16(0x0015)
53 #define CM_DREP_ATTR_ID cpu_to_be16(0x0016)
54 #define CM_SIDR_REQ_ATTR_ID cpu_to_be16(0x0017)
55 #define CM_SIDR_REP_ATTR_ID cpu_to_be16(0x0018)
56 #define CM_LAP_ATTR_ID cpu_to_be16(0x0019)
57 #define CM_APR_ATTR_ID cpu_to_be16(0x001A)
59 enum cm_msg_sequence {
60 CM_MSG_SEQUENCE_REQ,
61 CM_MSG_SEQUENCE_LAP,
62 CM_MSG_SEQUENCE_DREQ,
63 CM_MSG_SEQUENCE_SIDR
66 struct cm_req_msg {
67 struct ib_mad_hdr hdr;
69 __be32 local_comm_id;
70 __be32 rsvd4;
71 __be64 service_id;
72 __be64 local_ca_guid;
73 __be32 rsvd24;
74 __be32 local_qkey;
75 /* local QPN:24, responder resources:8 */
76 __be32 offset32;
77 /* local EECN:24, initiator depth:8 */
78 __be32 offset36;
80 * remote EECN:24, remote CM response timeout:5,
81 * transport service type:2, end-to-end flow control:1
83 __be32 offset40;
84 /* starting PSN:24, local CM response timeout:5, retry count:3 */
85 __be32 offset44;
86 __be16 pkey;
87 /* path MTU:4, RDC exists:1, RNR retry count:3. */
88 u8 offset50;
89 /* max CM Retries:4, SRQ:1, rsvd:3 */
90 u8 offset51;
92 __be16 primary_local_lid;
93 __be16 primary_remote_lid;
94 union ib_gid primary_local_gid;
95 union ib_gid primary_remote_gid;
96 /* flow label:20, rsvd:6, packet rate:6 */
97 __be32 primary_offset88;
98 u8 primary_traffic_class;
99 u8 primary_hop_limit;
100 /* SL:4, subnet local:1, rsvd:3 */
101 u8 primary_offset94;
102 /* local ACK timeout:5, rsvd:3 */
103 u8 primary_offset95;
105 __be16 alt_local_lid;
106 __be16 alt_remote_lid;
107 union ib_gid alt_local_gid;
108 union ib_gid alt_remote_gid;
109 /* flow label:20, rsvd:6, packet rate:6 */
110 __be32 alt_offset132;
111 u8 alt_traffic_class;
112 u8 alt_hop_limit;
113 /* SL:4, subnet local:1, rsvd:3 */
114 u8 alt_offset138;
115 /* local ACK timeout:5, rsvd:3 */
116 u8 alt_offset139;
118 u8 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE];
120 } __attribute__ ((packed));
122 static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
124 return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8);
127 static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn)
129 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
130 (be32_to_cpu(req_msg->offset32) &
131 0x000000FF));
134 static inline u8 cm_req_get_resp_res(struct cm_req_msg *req_msg)
136 return (u8) be32_to_cpu(req_msg->offset32);
139 static inline void cm_req_set_resp_res(struct cm_req_msg *req_msg, u8 resp_res)
141 req_msg->offset32 = cpu_to_be32(resp_res |
142 (be32_to_cpu(req_msg->offset32) &
143 0xFFFFFF00));
146 static inline u8 cm_req_get_init_depth(struct cm_req_msg *req_msg)
148 return (u8) be32_to_cpu(req_msg->offset36);
151 static inline void cm_req_set_init_depth(struct cm_req_msg *req_msg,
152 u8 init_depth)
154 req_msg->offset36 = cpu_to_be32(init_depth |
155 (be32_to_cpu(req_msg->offset36) &
156 0xFFFFFF00));
159 static inline u8 cm_req_get_remote_resp_timeout(struct cm_req_msg *req_msg)
161 return (u8) ((be32_to_cpu(req_msg->offset40) & 0xF8) >> 3);
164 static inline void cm_req_set_remote_resp_timeout(struct cm_req_msg *req_msg,
165 u8 resp_timeout)
167 req_msg->offset40 = cpu_to_be32((resp_timeout << 3) |
168 (be32_to_cpu(req_msg->offset40) &
169 0xFFFFFF07));
172 static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
174 u8 transport_type = (u8) (be32_to_cpu(req_msg->offset40) & 0x06) >> 1;
175 switch(transport_type) {
176 case 0: return IB_QPT_RC;
177 case 1: return IB_QPT_UC;
178 default: return 0;
182 static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
183 enum ib_qp_type qp_type)
185 switch(qp_type) {
186 case IB_QPT_UC:
187 req_msg->offset40 = cpu_to_be32((be32_to_cpu(
188 req_msg->offset40) &
189 0xFFFFFFF9) | 0x2);
190 break;
191 default:
192 req_msg->offset40 = cpu_to_be32(be32_to_cpu(
193 req_msg->offset40) &
194 0xFFFFFFF9);
198 static inline u8 cm_req_get_flow_ctrl(struct cm_req_msg *req_msg)
200 return be32_to_cpu(req_msg->offset40) & 0x1;
203 static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg,
204 u8 flow_ctrl)
206 req_msg->offset40 = cpu_to_be32((flow_ctrl & 0x1) |
207 (be32_to_cpu(req_msg->offset40) &
208 0xFFFFFFFE));
211 static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
213 return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8);
216 static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg,
217 __be32 starting_psn)
219 req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
220 (be32_to_cpu(req_msg->offset44) & 0x000000FF));
223 static inline u8 cm_req_get_local_resp_timeout(struct cm_req_msg *req_msg)
225 return (u8) ((be32_to_cpu(req_msg->offset44) & 0xF8) >> 3);
228 static inline void cm_req_set_local_resp_timeout(struct cm_req_msg *req_msg,
229 u8 resp_timeout)
231 req_msg->offset44 = cpu_to_be32((resp_timeout << 3) |
232 (be32_to_cpu(req_msg->offset44) & 0xFFFFFF07));
235 static inline u8 cm_req_get_retry_count(struct cm_req_msg *req_msg)
237 return (u8) (be32_to_cpu(req_msg->offset44) & 0x7);
240 static inline void cm_req_set_retry_count(struct cm_req_msg *req_msg,
241 u8 retry_count)
243 req_msg->offset44 = cpu_to_be32((retry_count & 0x7) |
244 (be32_to_cpu(req_msg->offset44) & 0xFFFFFFF8));
247 static inline u8 cm_req_get_path_mtu(struct cm_req_msg *req_msg)
249 return req_msg->offset50 >> 4;
252 static inline void cm_req_set_path_mtu(struct cm_req_msg *req_msg, u8 path_mtu)
254 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF) | (path_mtu << 4));
257 static inline u8 cm_req_get_rnr_retry_count(struct cm_req_msg *req_msg)
259 return req_msg->offset50 & 0x7;
262 static inline void cm_req_set_rnr_retry_count(struct cm_req_msg *req_msg,
263 u8 rnr_retry_count)
265 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF8) |
266 (rnr_retry_count & 0x7));
269 static inline u8 cm_req_get_max_cm_retries(struct cm_req_msg *req_msg)
271 return req_msg->offset51 >> 4;
274 static inline void cm_req_set_max_cm_retries(struct cm_req_msg *req_msg,
275 u8 retries)
277 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF) | (retries << 4));
280 static inline u8 cm_req_get_srq(struct cm_req_msg *req_msg)
282 return (req_msg->offset51 & 0x8) >> 3;
285 static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq)
287 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF7) |
288 ((srq & 0x1) << 3));
291 static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
293 return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12);
296 static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg,
297 __be32 flow_label)
299 req_msg->primary_offset88 = cpu_to_be32(
300 (be32_to_cpu(req_msg->primary_offset88) &
301 0x00000FFF) |
302 (be32_to_cpu(flow_label) << 12));
305 static inline u8 cm_req_get_primary_packet_rate(struct cm_req_msg *req_msg)
307 return (u8) (be32_to_cpu(req_msg->primary_offset88) & 0x3F);
310 static inline void cm_req_set_primary_packet_rate(struct cm_req_msg *req_msg,
311 u8 rate)
313 req_msg->primary_offset88 = cpu_to_be32(
314 (be32_to_cpu(req_msg->primary_offset88) &
315 0xFFFFFFC0) | (rate & 0x3F));
318 static inline u8 cm_req_get_primary_sl(struct cm_req_msg *req_msg)
320 return (u8) (req_msg->primary_offset94 >> 4);
323 static inline void cm_req_set_primary_sl(struct cm_req_msg *req_msg, u8 sl)
325 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0x0F) |
326 (sl << 4));
329 static inline u8 cm_req_get_primary_subnet_local(struct cm_req_msg *req_msg)
331 return (u8) ((req_msg->primary_offset94 & 0x08) >> 3);
334 static inline void cm_req_set_primary_subnet_local(struct cm_req_msg *req_msg,
335 u8 subnet_local)
337 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0xF7) |
338 ((subnet_local & 0x1) << 3));
341 static inline u8 cm_req_get_primary_local_ack_timeout(struct cm_req_msg *req_msg)
343 return (u8) (req_msg->primary_offset95 >> 3);
346 static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_msg,
347 u8 local_ack_timeout)
349 req_msg->primary_offset95 = (u8) ((req_msg->primary_offset95 & 0x07) |
350 (local_ack_timeout << 3));
353 static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
355 return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12);
358 static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg,
359 __be32 flow_label)
361 req_msg->alt_offset132 = cpu_to_be32(
362 (be32_to_cpu(req_msg->alt_offset132) &
363 0x00000FFF) |
364 (be32_to_cpu(flow_label) << 12));
367 static inline u8 cm_req_get_alt_packet_rate(struct cm_req_msg *req_msg)
369 return (u8) (be32_to_cpu(req_msg->alt_offset132) & 0x3F);
372 static inline void cm_req_set_alt_packet_rate(struct cm_req_msg *req_msg,
373 u8 rate)
375 req_msg->alt_offset132 = cpu_to_be32(
376 (be32_to_cpu(req_msg->alt_offset132) &
377 0xFFFFFFC0) | (rate & 0x3F));
380 static inline u8 cm_req_get_alt_sl(struct cm_req_msg *req_msg)
382 return (u8) (req_msg->alt_offset138 >> 4);
385 static inline void cm_req_set_alt_sl(struct cm_req_msg *req_msg, u8 sl)
387 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0x0F) |
388 (sl << 4));
391 static inline u8 cm_req_get_alt_subnet_local(struct cm_req_msg *req_msg)
393 return (u8) ((req_msg->alt_offset138 & 0x08) >> 3);
396 static inline void cm_req_set_alt_subnet_local(struct cm_req_msg *req_msg,
397 u8 subnet_local)
399 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0xF7) |
400 ((subnet_local & 0x1) << 3));
403 static inline u8 cm_req_get_alt_local_ack_timeout(struct cm_req_msg *req_msg)
405 return (u8) (req_msg->alt_offset139 >> 3);
408 static inline void cm_req_set_alt_local_ack_timeout(struct cm_req_msg *req_msg,
409 u8 local_ack_timeout)
411 req_msg->alt_offset139 = (u8) ((req_msg->alt_offset139 & 0x07) |
412 (local_ack_timeout << 3));
415 /* Message REJected or MRAed */
416 enum cm_msg_response {
417 CM_MSG_RESPONSE_REQ = 0x0,
418 CM_MSG_RESPONSE_REP = 0x1,
419 CM_MSG_RESPONSE_OTHER = 0x2
422 struct cm_mra_msg {
423 struct ib_mad_hdr hdr;
425 __be32 local_comm_id;
426 __be32 remote_comm_id;
427 /* message MRAed:2, rsvd:6 */
428 u8 offset8;
429 /* service timeout:5, rsvd:3 */
430 u8 offset9;
432 u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE];
434 } __attribute__ ((packed));
436 static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg)
438 return (u8) (mra_msg->offset8 >> 6);
441 static inline void cm_mra_set_msg_mraed(struct cm_mra_msg *mra_msg, u8 msg)
443 mra_msg->offset8 = (u8) ((mra_msg->offset8 & 0x3F) | (msg << 6));
446 static inline u8 cm_mra_get_service_timeout(struct cm_mra_msg *mra_msg)
448 return (u8) (mra_msg->offset9 >> 3);
451 static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg,
452 u8 service_timeout)
454 mra_msg->offset9 = (u8) ((mra_msg->offset9 & 0x07) |
455 (service_timeout << 3));
458 struct cm_rej_msg {
459 struct ib_mad_hdr hdr;
461 __be32 local_comm_id;
462 __be32 remote_comm_id;
463 /* message REJected:2, rsvd:6 */
464 u8 offset8;
465 /* reject info length:7, rsvd:1. */
466 u8 offset9;
467 __be16 reason;
468 u8 ari[IB_CM_REJ_ARI_LENGTH];
470 u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
472 } __attribute__ ((packed));
474 static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg)
476 return (u8) (rej_msg->offset8 >> 6);
479 static inline void cm_rej_set_msg_rejected(struct cm_rej_msg *rej_msg, u8 msg)
481 rej_msg->offset8 = (u8) ((rej_msg->offset8 & 0x3F) | (msg << 6));
484 static inline u8 cm_rej_get_reject_info_len(struct cm_rej_msg *rej_msg)
486 return (u8) (rej_msg->offset9 >> 1);
489 static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg,
490 u8 len)
492 rej_msg->offset9 = (u8) ((rej_msg->offset9 & 0x1) | (len << 1));
495 struct cm_rep_msg {
496 struct ib_mad_hdr hdr;
498 __be32 local_comm_id;
499 __be32 remote_comm_id;
500 __be32 local_qkey;
501 /* local QPN:24, rsvd:8 */
502 __be32 offset12;
503 /* local EECN:24, rsvd:8 */
504 __be32 offset16;
505 /* starting PSN:24 rsvd:8 */
506 __be32 offset20;
507 u8 resp_resources;
508 u8 initiator_depth;
509 /* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
510 u8 offset26;
511 /* RNR retry count:3, SRQ:1, rsvd:5 */
512 u8 offset27;
513 __be64 local_ca_guid;
515 u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
517 } __attribute__ ((packed));
519 static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
521 return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8);
524 static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn)
526 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
527 (be32_to_cpu(rep_msg->offset12) & 0x000000FF));
530 static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
532 return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
535 static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg,
536 __be32 starting_psn)
538 rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
539 (be32_to_cpu(rep_msg->offset20) & 0x000000FF));
542 static inline u8 cm_rep_get_target_ack_delay(struct cm_rep_msg *rep_msg)
544 return (u8) (rep_msg->offset26 >> 3);
547 static inline void cm_rep_set_target_ack_delay(struct cm_rep_msg *rep_msg,
548 u8 target_ack_delay)
550 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0x07) |
551 (target_ack_delay << 3));
554 static inline u8 cm_rep_get_failover(struct cm_rep_msg *rep_msg)
556 return (u8) ((rep_msg->offset26 & 0x06) >> 1);
559 static inline void cm_rep_set_failover(struct cm_rep_msg *rep_msg, u8 failover)
561 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xF9) |
562 ((failover & 0x3) << 1));
565 static inline u8 cm_rep_get_flow_ctrl(struct cm_rep_msg *rep_msg)
567 return (u8) (rep_msg->offset26 & 0x01);
570 static inline void cm_rep_set_flow_ctrl(struct cm_rep_msg *rep_msg,
571 u8 flow_ctrl)
573 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xFE) |
574 (flow_ctrl & 0x1));
577 static inline u8 cm_rep_get_rnr_retry_count(struct cm_rep_msg *rep_msg)
579 return (u8) (rep_msg->offset27 >> 5);
582 static inline void cm_rep_set_rnr_retry_count(struct cm_rep_msg *rep_msg,
583 u8 rnr_retry_count)
585 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0x1F) |
586 (rnr_retry_count << 5));
589 static inline u8 cm_rep_get_srq(struct cm_rep_msg *rep_msg)
591 return (u8) ((rep_msg->offset27 >> 4) & 0x1);
594 static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq)
596 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0xEF) |
597 ((srq & 0x1) << 4));
600 struct cm_rtu_msg {
601 struct ib_mad_hdr hdr;
603 __be32 local_comm_id;
604 __be32 remote_comm_id;
606 u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
608 } __attribute__ ((packed));
610 struct cm_dreq_msg {
611 struct ib_mad_hdr hdr;
613 __be32 local_comm_id;
614 __be32 remote_comm_id;
615 /* remote QPN/EECN:24, rsvd:8 */
616 __be32 offset8;
618 u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
620 } __attribute__ ((packed));
622 static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
624 return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8);
627 static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn)
629 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
630 (be32_to_cpu(dreq_msg->offset8) & 0x000000FF));
633 struct cm_drep_msg {
634 struct ib_mad_hdr hdr;
636 __be32 local_comm_id;
637 __be32 remote_comm_id;
639 u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
641 } __attribute__ ((packed));
643 struct cm_lap_msg {
644 struct ib_mad_hdr hdr;
646 __be32 local_comm_id;
647 __be32 remote_comm_id;
649 __be32 rsvd8;
650 /* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */
651 __be32 offset12;
652 __be32 rsvd16;
654 __be16 alt_local_lid;
655 __be16 alt_remote_lid;
656 union ib_gid alt_local_gid;
657 union ib_gid alt_remote_gid;
658 /* flow label:20, rsvd:4, traffic class:8 */
659 __be32 offset56;
660 u8 alt_hop_limit;
661 /* rsvd:2, packet rate:6 */
662 u8 offset61;
663 /* SL:4, subnet local:1, rsvd:3 */
664 u8 offset62;
665 /* local ACK timeout:5, rsvd:3 */
666 u8 offset63;
668 u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
669 } __attribute__ ((packed));
671 static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
673 return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8);
676 static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn)
678 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
679 (be32_to_cpu(lap_msg->offset12) &
680 0x000000FF));
683 static inline u8 cm_lap_get_remote_resp_timeout(struct cm_lap_msg *lap_msg)
685 return (u8) ((be32_to_cpu(lap_msg->offset12) & 0xF8) >> 3);
688 static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg,
689 u8 resp_timeout)
691 lap_msg->offset12 = cpu_to_be32((resp_timeout << 3) |
692 (be32_to_cpu(lap_msg->offset12) &
693 0xFFFFFF07));
696 static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
698 return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12);
701 static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg,
702 __be32 flow_label)
704 lap_msg->offset56 = cpu_to_be32(
705 (be32_to_cpu(lap_msg->offset56) & 0x00000FFF) |
706 (be32_to_cpu(flow_label) << 12));
709 static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg)
711 return (u8) be32_to_cpu(lap_msg->offset56);
714 static inline void cm_lap_set_traffic_class(struct cm_lap_msg *lap_msg,
715 u8 traffic_class)
717 lap_msg->offset56 = cpu_to_be32(traffic_class |
718 (be32_to_cpu(lap_msg->offset56) &
719 0xFFFFFF00));
722 static inline u8 cm_lap_get_packet_rate(struct cm_lap_msg *lap_msg)
724 return lap_msg->offset61 & 0x3F;
727 static inline void cm_lap_set_packet_rate(struct cm_lap_msg *lap_msg,
728 u8 packet_rate)
730 lap_msg->offset61 = (packet_rate & 0x3F) | (lap_msg->offset61 & 0xC0);
733 static inline u8 cm_lap_get_sl(struct cm_lap_msg *lap_msg)
735 return lap_msg->offset62 >> 4;
738 static inline void cm_lap_set_sl(struct cm_lap_msg *lap_msg, u8 sl)
740 lap_msg->offset62 = (sl << 4) | (lap_msg->offset62 & 0x0F);
743 static inline u8 cm_lap_get_subnet_local(struct cm_lap_msg *lap_msg)
745 return (lap_msg->offset62 >> 3) & 0x1;
748 static inline void cm_lap_set_subnet_local(struct cm_lap_msg *lap_msg,
749 u8 subnet_local)
751 lap_msg->offset62 = ((subnet_local & 0x1) << 3) |
752 (lap_msg->offset61 & 0xF7);
754 static inline u8 cm_lap_get_local_ack_timeout(struct cm_lap_msg *lap_msg)
756 return lap_msg->offset63 >> 3;
759 static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg,
760 u8 local_ack_timeout)
762 lap_msg->offset63 = (local_ack_timeout << 3) |
763 (lap_msg->offset63 & 0x07);
766 struct cm_apr_msg {
767 struct ib_mad_hdr hdr;
769 __be32 local_comm_id;
770 __be32 remote_comm_id;
772 u8 info_length;
773 u8 ap_status;
774 u8 info[IB_CM_APR_INFO_LENGTH];
776 u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
777 } __attribute__ ((packed));
779 struct cm_sidr_req_msg {
780 struct ib_mad_hdr hdr;
782 __be32 request_id;
783 __be16 pkey;
784 __be16 rsvd;
785 __be64 service_id;
787 u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE];
788 } __attribute__ ((packed));
790 struct cm_sidr_rep_msg {
791 struct ib_mad_hdr hdr;
793 __be32 request_id;
794 u8 status;
795 u8 info_length;
796 __be16 rsvd;
797 /* QPN:24, rsvd:8 */
798 __be32 offset8;
799 __be64 service_id;
800 __be32 qkey;
801 u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
803 u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
804 } __attribute__ ((packed));
806 static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
808 return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8);
811 static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg,
812 __be32 qpn)
814 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
815 (be32_to_cpu(sidr_rep_msg->offset8) &
816 0x000000FF));
819 #endif /* CM_MSGS_H */