2 * Copyright (c) 2004-2006 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * $Id: cm.c 4311 2005-12-05 18:42:01Z sean.hefty $
38 #include <linux/completion.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/interrupt.h>
43 #include <linux/random.h>
44 #include <linux/rbtree.h>
45 #include <linux/spinlock.h>
46 #include <linux/workqueue.h>
48 #include <rdma/ib_cache.h>
49 #include <rdma/ib_cm.h>
52 MODULE_AUTHOR("Sean Hefty");
53 MODULE_DESCRIPTION("InfiniBand CM");
54 MODULE_LICENSE("Dual BSD/GPL");
56 static void cm_add_one(struct ib_device
*device
);
57 static void cm_remove_one(struct ib_device
*device
);
59 static struct ib_client cm_client
= {
62 .remove
= cm_remove_one
67 struct list_head device_list
;
69 struct rb_root listen_service_table
;
70 u64 listen_service_id
;
71 /* struct rb_root peer_service_table; todo: fix peer to peer */
72 struct rb_root remote_qp_table
;
73 struct rb_root remote_id_table
;
74 struct rb_root remote_sidr_table
;
75 struct idr local_id_table
;
76 __be32 random_id_operand
;
77 struct list_head timewait_list
;
78 struct workqueue_struct
*wq
;
82 struct cm_device
*cm_dev
;
83 struct ib_mad_agent
*mad_agent
;
88 struct list_head list
;
89 struct ib_device
*device
;
90 struct cm_port port
[0];
96 struct ib_ah_attr ah_attr
;
102 struct delayed_work work
;
103 struct list_head list
;
104 struct cm_port
*port
;
105 struct ib_mad_recv_wc
*mad_recv_wc
; /* Received MADs */
106 __be32 local_id
; /* Established / timewait */
108 struct ib_cm_event cm_event
;
109 struct ib_sa_path_rec path
[0];
112 struct cm_timewait_info
{
113 struct cm_work work
; /* Must be first. */
114 struct list_head list
;
115 struct rb_node remote_qp_node
;
116 struct rb_node remote_id_node
;
117 __be64 remote_ca_guid
;
119 u8 inserted_remote_qp
;
120 u8 inserted_remote_id
;
123 struct cm_id_private
{
126 struct rb_node service_node
;
127 struct rb_node sidr_id_node
;
128 spinlock_t lock
; /* Do not acquire inside cm.lock */
129 struct completion comp
;
132 struct ib_mad_send_buf
*msg
;
133 struct cm_timewait_info
*timewait_info
;
134 /* todo: use alternate port on send failure */
137 struct ib_cm_compare_data
*compare_data
;
143 enum ib_qp_type qp_type
;
147 enum ib_mtu path_mtu
;
152 u8 responder_resources
;
158 struct list_head work_list
;
162 static void cm_work_handler(struct work_struct
*work
);
164 static inline void cm_deref_id(struct cm_id_private
*cm_id_priv
)
166 if (atomic_dec_and_test(&cm_id_priv
->refcount
))
167 complete(&cm_id_priv
->comp
);
170 static int cm_alloc_msg(struct cm_id_private
*cm_id_priv
,
171 struct ib_mad_send_buf
**msg
)
173 struct ib_mad_agent
*mad_agent
;
174 struct ib_mad_send_buf
*m
;
177 mad_agent
= cm_id_priv
->av
.port
->mad_agent
;
178 ah
= ib_create_ah(mad_agent
->qp
->pd
, &cm_id_priv
->av
.ah_attr
);
182 m
= ib_create_send_mad(mad_agent
, cm_id_priv
->id
.remote_cm_qpn
,
183 cm_id_priv
->av
.pkey_index
,
184 0, IB_MGMT_MAD_HDR
, IB_MGMT_MAD_DATA
,
191 /* Timeout set by caller if response is expected. */
193 m
->retries
= cm_id_priv
->max_cm_retries
;
195 atomic_inc(&cm_id_priv
->refcount
);
196 m
->context
[0] = cm_id_priv
;
201 static int cm_alloc_response_msg(struct cm_port
*port
,
202 struct ib_mad_recv_wc
*mad_recv_wc
,
203 struct ib_mad_send_buf
**msg
)
205 struct ib_mad_send_buf
*m
;
208 ah
= ib_create_ah_from_wc(port
->mad_agent
->qp
->pd
, mad_recv_wc
->wc
,
209 mad_recv_wc
->recv_buf
.grh
, port
->port_num
);
213 m
= ib_create_send_mad(port
->mad_agent
, 1, mad_recv_wc
->wc
->pkey_index
,
214 0, IB_MGMT_MAD_HDR
, IB_MGMT_MAD_DATA
,
225 static void cm_free_msg(struct ib_mad_send_buf
*msg
)
227 ib_destroy_ah(msg
->ah
);
229 cm_deref_id(msg
->context
[0]);
230 ib_free_send_mad(msg
);
233 static void * cm_copy_private_data(const void *private_data
,
238 if (!private_data
|| !private_data_len
)
241 data
= kmemdup(private_data
, private_data_len
, GFP_KERNEL
);
243 return ERR_PTR(-ENOMEM
);
248 static void cm_set_private_data(struct cm_id_private
*cm_id_priv
,
249 void *private_data
, u8 private_data_len
)
251 if (cm_id_priv
->private_data
&& cm_id_priv
->private_data_len
)
252 kfree(cm_id_priv
->private_data
);
254 cm_id_priv
->private_data
= private_data
;
255 cm_id_priv
->private_data_len
= private_data_len
;
258 static void cm_init_av_for_response(struct cm_port
*port
, struct ib_wc
*wc
,
259 struct ib_grh
*grh
, struct cm_av
*av
)
262 av
->pkey_index
= wc
->pkey_index
;
263 ib_init_ah_from_wc(port
->cm_dev
->device
, port
->port_num
, wc
,
267 static int cm_init_av_by_path(struct ib_sa_path_rec
*path
, struct cm_av
*av
)
269 struct cm_device
*cm_dev
;
270 struct cm_port
*port
= NULL
;
275 read_lock_irqsave(&cm
.device_lock
, flags
);
276 list_for_each_entry(cm_dev
, &cm
.device_list
, list
) {
277 if (!ib_find_cached_gid(cm_dev
->device
, &path
->sgid
,
279 port
= &cm_dev
->port
[p
-1];
283 read_unlock_irqrestore(&cm
.device_lock
, flags
);
288 ret
= ib_find_cached_pkey(cm_dev
->device
, port
->port_num
,
289 be16_to_cpu(path
->pkey
), &av
->pkey_index
);
294 ib_init_ah_from_path(cm_dev
->device
, port
->port_num
, path
,
296 av
->packet_life_time
= path
->packet_life_time
;
300 static int cm_alloc_id(struct cm_id_private
*cm_id_priv
)
307 spin_lock_irqsave(&cm
.lock
, flags
);
308 ret
= idr_get_new_above(&cm
.local_id_table
, cm_id_priv
,
310 spin_unlock_irqrestore(&cm
.lock
, flags
);
311 } while( (ret
== -EAGAIN
) && idr_pre_get(&cm
.local_id_table
, GFP_KERNEL
) );
313 cm_id_priv
->id
.local_id
= (__force __be32
) (id
^ cm
.random_id_operand
);
317 static void cm_free_id(__be32 local_id
)
321 spin_lock_irqsave(&cm
.lock
, flags
);
322 idr_remove(&cm
.local_id_table
,
323 (__force
int) (local_id
^ cm
.random_id_operand
));
324 spin_unlock_irqrestore(&cm
.lock
, flags
);
327 static struct cm_id_private
* cm_get_id(__be32 local_id
, __be32 remote_id
)
329 struct cm_id_private
*cm_id_priv
;
331 cm_id_priv
= idr_find(&cm
.local_id_table
,
332 (__force
int) (local_id
^ cm
.random_id_operand
));
334 if (cm_id_priv
->id
.remote_id
== remote_id
)
335 atomic_inc(&cm_id_priv
->refcount
);
343 static struct cm_id_private
* cm_acquire_id(__be32 local_id
, __be32 remote_id
)
345 struct cm_id_private
*cm_id_priv
;
348 spin_lock_irqsave(&cm
.lock
, flags
);
349 cm_id_priv
= cm_get_id(local_id
, remote_id
);
350 spin_unlock_irqrestore(&cm
.lock
, flags
);
355 static void cm_mask_copy(u8
*dst
, u8
*src
, u8
*mask
)
359 for (i
= 0; i
< IB_CM_COMPARE_SIZE
/ sizeof(unsigned long); i
++)
360 ((unsigned long *) dst
)[i
] = ((unsigned long *) src
)[i
] &
361 ((unsigned long *) mask
)[i
];
364 static int cm_compare_data(struct ib_cm_compare_data
*src_data
,
365 struct ib_cm_compare_data
*dst_data
)
367 u8 src
[IB_CM_COMPARE_SIZE
];
368 u8 dst
[IB_CM_COMPARE_SIZE
];
370 if (!src_data
|| !dst_data
)
373 cm_mask_copy(src
, src_data
->data
, dst_data
->mask
);
374 cm_mask_copy(dst
, dst_data
->data
, src_data
->mask
);
375 return memcmp(src
, dst
, IB_CM_COMPARE_SIZE
);
378 static int cm_compare_private_data(u8
*private_data
,
379 struct ib_cm_compare_data
*dst_data
)
381 u8 src
[IB_CM_COMPARE_SIZE
];
386 cm_mask_copy(src
, private_data
, dst_data
->mask
);
387 return memcmp(src
, dst_data
->data
, IB_CM_COMPARE_SIZE
);
390 static struct cm_id_private
* cm_insert_listen(struct cm_id_private
*cm_id_priv
)
392 struct rb_node
**link
= &cm
.listen_service_table
.rb_node
;
393 struct rb_node
*parent
= NULL
;
394 struct cm_id_private
*cur_cm_id_priv
;
395 __be64 service_id
= cm_id_priv
->id
.service_id
;
396 __be64 service_mask
= cm_id_priv
->id
.service_mask
;
401 cur_cm_id_priv
= rb_entry(parent
, struct cm_id_private
,
403 data_cmp
= cm_compare_data(cm_id_priv
->compare_data
,
404 cur_cm_id_priv
->compare_data
);
405 if ((cur_cm_id_priv
->id
.service_mask
& service_id
) ==
406 (service_mask
& cur_cm_id_priv
->id
.service_id
) &&
407 (cm_id_priv
->id
.device
== cur_cm_id_priv
->id
.device
) &&
409 return cur_cm_id_priv
;
411 if (cm_id_priv
->id
.device
< cur_cm_id_priv
->id
.device
)
412 link
= &(*link
)->rb_left
;
413 else if (cm_id_priv
->id
.device
> cur_cm_id_priv
->id
.device
)
414 link
= &(*link
)->rb_right
;
415 else if (service_id
< cur_cm_id_priv
->id
.service_id
)
416 link
= &(*link
)->rb_left
;
417 else if (service_id
> cur_cm_id_priv
->id
.service_id
)
418 link
= &(*link
)->rb_right
;
419 else if (data_cmp
< 0)
420 link
= &(*link
)->rb_left
;
422 link
= &(*link
)->rb_right
;
424 rb_link_node(&cm_id_priv
->service_node
, parent
, link
);
425 rb_insert_color(&cm_id_priv
->service_node
, &cm
.listen_service_table
);
429 static struct cm_id_private
* cm_find_listen(struct ib_device
*device
,
433 struct rb_node
*node
= cm
.listen_service_table
.rb_node
;
434 struct cm_id_private
*cm_id_priv
;
438 cm_id_priv
= rb_entry(node
, struct cm_id_private
, service_node
);
439 data_cmp
= cm_compare_private_data(private_data
,
440 cm_id_priv
->compare_data
);
441 if ((cm_id_priv
->id
.service_mask
& service_id
) ==
442 cm_id_priv
->id
.service_id
&&
443 (cm_id_priv
->id
.device
== device
) && !data_cmp
)
446 if (device
< cm_id_priv
->id
.device
)
447 node
= node
->rb_left
;
448 else if (device
> cm_id_priv
->id
.device
)
449 node
= node
->rb_right
;
450 else if (service_id
< cm_id_priv
->id
.service_id
)
451 node
= node
->rb_left
;
452 else if (service_id
> cm_id_priv
->id
.service_id
)
453 node
= node
->rb_right
;
454 else if (data_cmp
< 0)
455 node
= node
->rb_left
;
457 node
= node
->rb_right
;
462 static struct cm_timewait_info
* cm_insert_remote_id(struct cm_timewait_info
465 struct rb_node
**link
= &cm
.remote_id_table
.rb_node
;
466 struct rb_node
*parent
= NULL
;
467 struct cm_timewait_info
*cur_timewait_info
;
468 __be64 remote_ca_guid
= timewait_info
->remote_ca_guid
;
469 __be32 remote_id
= timewait_info
->work
.remote_id
;
473 cur_timewait_info
= rb_entry(parent
, struct cm_timewait_info
,
475 if (remote_id
< cur_timewait_info
->work
.remote_id
)
476 link
= &(*link
)->rb_left
;
477 else if (remote_id
> cur_timewait_info
->work
.remote_id
)
478 link
= &(*link
)->rb_right
;
479 else if (remote_ca_guid
< cur_timewait_info
->remote_ca_guid
)
480 link
= &(*link
)->rb_left
;
481 else if (remote_ca_guid
> cur_timewait_info
->remote_ca_guid
)
482 link
= &(*link
)->rb_right
;
484 return cur_timewait_info
;
486 timewait_info
->inserted_remote_id
= 1;
487 rb_link_node(&timewait_info
->remote_id_node
, parent
, link
);
488 rb_insert_color(&timewait_info
->remote_id_node
, &cm
.remote_id_table
);
492 static struct cm_timewait_info
* cm_find_remote_id(__be64 remote_ca_guid
,
495 struct rb_node
*node
= cm
.remote_id_table
.rb_node
;
496 struct cm_timewait_info
*timewait_info
;
499 timewait_info
= rb_entry(node
, struct cm_timewait_info
,
501 if (remote_id
< timewait_info
->work
.remote_id
)
502 node
= node
->rb_left
;
503 else if (remote_id
> timewait_info
->work
.remote_id
)
504 node
= node
->rb_right
;
505 else if (remote_ca_guid
< timewait_info
->remote_ca_guid
)
506 node
= node
->rb_left
;
507 else if (remote_ca_guid
> timewait_info
->remote_ca_guid
)
508 node
= node
->rb_right
;
510 return timewait_info
;
515 static struct cm_timewait_info
* cm_insert_remote_qpn(struct cm_timewait_info
518 struct rb_node
**link
= &cm
.remote_qp_table
.rb_node
;
519 struct rb_node
*parent
= NULL
;
520 struct cm_timewait_info
*cur_timewait_info
;
521 __be64 remote_ca_guid
= timewait_info
->remote_ca_guid
;
522 __be32 remote_qpn
= timewait_info
->remote_qpn
;
526 cur_timewait_info
= rb_entry(parent
, struct cm_timewait_info
,
528 if (remote_qpn
< cur_timewait_info
->remote_qpn
)
529 link
= &(*link
)->rb_left
;
530 else if (remote_qpn
> cur_timewait_info
->remote_qpn
)
531 link
= &(*link
)->rb_right
;
532 else if (remote_ca_guid
< cur_timewait_info
->remote_ca_guid
)
533 link
= &(*link
)->rb_left
;
534 else if (remote_ca_guid
> cur_timewait_info
->remote_ca_guid
)
535 link
= &(*link
)->rb_right
;
537 return cur_timewait_info
;
539 timewait_info
->inserted_remote_qp
= 1;
540 rb_link_node(&timewait_info
->remote_qp_node
, parent
, link
);
541 rb_insert_color(&timewait_info
->remote_qp_node
, &cm
.remote_qp_table
);
545 static struct cm_id_private
* cm_insert_remote_sidr(struct cm_id_private
548 struct rb_node
**link
= &cm
.remote_sidr_table
.rb_node
;
549 struct rb_node
*parent
= NULL
;
550 struct cm_id_private
*cur_cm_id_priv
;
551 union ib_gid
*port_gid
= &cm_id_priv
->av
.dgid
;
552 __be32 remote_id
= cm_id_priv
->id
.remote_id
;
556 cur_cm_id_priv
= rb_entry(parent
, struct cm_id_private
,
558 if (remote_id
< cur_cm_id_priv
->id
.remote_id
)
559 link
= &(*link
)->rb_left
;
560 else if (remote_id
> cur_cm_id_priv
->id
.remote_id
)
561 link
= &(*link
)->rb_right
;
564 cmp
= memcmp(port_gid
, &cur_cm_id_priv
->av
.dgid
,
567 link
= &(*link
)->rb_left
;
569 link
= &(*link
)->rb_right
;
571 return cur_cm_id_priv
;
574 rb_link_node(&cm_id_priv
->sidr_id_node
, parent
, link
);
575 rb_insert_color(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
579 static void cm_reject_sidr_req(struct cm_id_private
*cm_id_priv
,
580 enum ib_cm_sidr_status status
)
582 struct ib_cm_sidr_rep_param param
;
584 memset(¶m
, 0, sizeof param
);
585 param
.status
= status
;
586 ib_send_cm_sidr_rep(&cm_id_priv
->id
, ¶m
);
589 struct ib_cm_id
*ib_create_cm_id(struct ib_device
*device
,
590 ib_cm_handler cm_handler
,
593 struct cm_id_private
*cm_id_priv
;
596 cm_id_priv
= kzalloc(sizeof *cm_id_priv
, GFP_KERNEL
);
598 return ERR_PTR(-ENOMEM
);
600 cm_id_priv
->id
.state
= IB_CM_IDLE
;
601 cm_id_priv
->id
.device
= device
;
602 cm_id_priv
->id
.cm_handler
= cm_handler
;
603 cm_id_priv
->id
.context
= context
;
604 cm_id_priv
->id
.remote_cm_qpn
= 1;
605 ret
= cm_alloc_id(cm_id_priv
);
609 spin_lock_init(&cm_id_priv
->lock
);
610 init_completion(&cm_id_priv
->comp
);
611 INIT_LIST_HEAD(&cm_id_priv
->work_list
);
612 atomic_set(&cm_id_priv
->work_count
, -1);
613 atomic_set(&cm_id_priv
->refcount
, 1);
614 return &cm_id_priv
->id
;
618 return ERR_PTR(-ENOMEM
);
620 EXPORT_SYMBOL(ib_create_cm_id
);
622 static struct cm_work
* cm_dequeue_work(struct cm_id_private
*cm_id_priv
)
624 struct cm_work
*work
;
626 if (list_empty(&cm_id_priv
->work_list
))
629 work
= list_entry(cm_id_priv
->work_list
.next
, struct cm_work
, list
);
630 list_del(&work
->list
);
634 static void cm_free_work(struct cm_work
*work
)
636 if (work
->mad_recv_wc
)
637 ib_free_recv_mad(work
->mad_recv_wc
);
641 static inline int cm_convert_to_ms(int iba_time
)
643 /* approximate conversion to ms from 4.096us x 2^iba_time */
644 return 1 << max(iba_time
- 8, 0);
647 static void cm_cleanup_timewait(struct cm_timewait_info
*timewait_info
)
649 if (timewait_info
->inserted_remote_id
) {
650 rb_erase(&timewait_info
->remote_id_node
, &cm
.remote_id_table
);
651 timewait_info
->inserted_remote_id
= 0;
654 if (timewait_info
->inserted_remote_qp
) {
655 rb_erase(&timewait_info
->remote_qp_node
, &cm
.remote_qp_table
);
656 timewait_info
->inserted_remote_qp
= 0;
660 static struct cm_timewait_info
* cm_create_timewait_info(__be32 local_id
)
662 struct cm_timewait_info
*timewait_info
;
664 timewait_info
= kzalloc(sizeof *timewait_info
, GFP_KERNEL
);
666 return ERR_PTR(-ENOMEM
);
668 timewait_info
->work
.local_id
= local_id
;
669 INIT_DELAYED_WORK(&timewait_info
->work
.work
, cm_work_handler
);
670 timewait_info
->work
.cm_event
.event
= IB_CM_TIMEWAIT_EXIT
;
671 return timewait_info
;
674 static void cm_enter_timewait(struct cm_id_private
*cm_id_priv
)
679 spin_lock_irqsave(&cm
.lock
, flags
);
680 cm_cleanup_timewait(cm_id_priv
->timewait_info
);
681 list_add_tail(&cm_id_priv
->timewait_info
->list
, &cm
.timewait_list
);
682 spin_unlock_irqrestore(&cm
.lock
, flags
);
685 * The cm_id could be destroyed by the user before we exit timewait.
686 * To protect against this, we search for the cm_id after exiting
687 * timewait before notifying the user that we've exited timewait.
689 cm_id_priv
->id
.state
= IB_CM_TIMEWAIT
;
690 wait_time
= cm_convert_to_ms(cm_id_priv
->av
.packet_life_time
+ 1);
691 queue_delayed_work(cm
.wq
, &cm_id_priv
->timewait_info
->work
.work
,
692 msecs_to_jiffies(wait_time
));
693 cm_id_priv
->timewait_info
= NULL
;
696 static void cm_reset_to_idle(struct cm_id_private
*cm_id_priv
)
700 cm_id_priv
->id
.state
= IB_CM_IDLE
;
701 if (cm_id_priv
->timewait_info
) {
702 spin_lock_irqsave(&cm
.lock
, flags
);
703 cm_cleanup_timewait(cm_id_priv
->timewait_info
);
704 spin_unlock_irqrestore(&cm
.lock
, flags
);
705 kfree(cm_id_priv
->timewait_info
);
706 cm_id_priv
->timewait_info
= NULL
;
710 static void cm_destroy_id(struct ib_cm_id
*cm_id
, int err
)
712 struct cm_id_private
*cm_id_priv
;
713 struct cm_work
*work
;
716 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
718 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
719 switch (cm_id
->state
) {
721 cm_id
->state
= IB_CM_IDLE
;
722 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
723 spin_lock_irqsave(&cm
.lock
, flags
);
724 rb_erase(&cm_id_priv
->service_node
, &cm
.listen_service_table
);
725 spin_unlock_irqrestore(&cm
.lock
, flags
);
727 case IB_CM_SIDR_REQ_SENT
:
728 cm_id
->state
= IB_CM_IDLE
;
729 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
730 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
732 case IB_CM_SIDR_REQ_RCVD
:
733 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
734 cm_reject_sidr_req(cm_id_priv
, IB_SIDR_REJECT
);
737 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
738 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
739 ib_send_cm_rej(cm_id
, IB_CM_REJ_TIMEOUT
,
740 &cm_id_priv
->id
.device
->node_guid
,
741 sizeof cm_id_priv
->id
.device
->node_guid
,
745 if (err
== -ENOMEM
) {
746 /* Do not reject to allow future retries. */
747 cm_reset_to_idle(cm_id_priv
);
748 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
750 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
751 ib_send_cm_rej(cm_id
, IB_CM_REJ_CONSUMER_DEFINED
,
755 case IB_CM_MRA_REQ_RCVD
:
757 case IB_CM_MRA_REP_RCVD
:
758 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
760 case IB_CM_MRA_REQ_SENT
:
762 case IB_CM_MRA_REP_SENT
:
763 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
764 ib_send_cm_rej(cm_id
, IB_CM_REJ_CONSUMER_DEFINED
,
767 case IB_CM_ESTABLISHED
:
768 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
769 ib_send_cm_dreq(cm_id
, NULL
, 0);
771 case IB_CM_DREQ_SENT
:
772 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
773 cm_enter_timewait(cm_id_priv
);
774 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
776 case IB_CM_DREQ_RCVD
:
777 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
778 ib_send_cm_drep(cm_id
, NULL
, 0);
781 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
785 cm_free_id(cm_id
->local_id
);
786 cm_deref_id(cm_id_priv
);
787 wait_for_completion(&cm_id_priv
->comp
);
788 while ((work
= cm_dequeue_work(cm_id_priv
)) != NULL
)
790 kfree(cm_id_priv
->compare_data
);
791 kfree(cm_id_priv
->private_data
);
795 void ib_destroy_cm_id(struct ib_cm_id
*cm_id
)
797 cm_destroy_id(cm_id
, 0);
799 EXPORT_SYMBOL(ib_destroy_cm_id
);
801 int ib_cm_listen(struct ib_cm_id
*cm_id
, __be64 service_id
, __be64 service_mask
,
802 struct ib_cm_compare_data
*compare_data
)
804 struct cm_id_private
*cm_id_priv
, *cur_cm_id_priv
;
808 service_mask
= service_mask
? service_mask
:
809 __constant_cpu_to_be64(~0ULL);
810 service_id
&= service_mask
;
811 if ((service_id
& IB_SERVICE_ID_AGN_MASK
) == IB_CM_ASSIGN_SERVICE_ID
&&
812 (service_id
!= IB_CM_ASSIGN_SERVICE_ID
))
815 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
816 if (cm_id
->state
!= IB_CM_IDLE
)
820 cm_id_priv
->compare_data
= kzalloc(sizeof *compare_data
,
822 if (!cm_id_priv
->compare_data
)
824 cm_mask_copy(cm_id_priv
->compare_data
->data
,
825 compare_data
->data
, compare_data
->mask
);
826 memcpy(cm_id_priv
->compare_data
->mask
, compare_data
->mask
,
830 cm_id
->state
= IB_CM_LISTEN
;
832 spin_lock_irqsave(&cm
.lock
, flags
);
833 if (service_id
== IB_CM_ASSIGN_SERVICE_ID
) {
834 cm_id
->service_id
= cpu_to_be64(cm
.listen_service_id
++);
835 cm_id
->service_mask
= __constant_cpu_to_be64(~0ULL);
837 cm_id
->service_id
= service_id
;
838 cm_id
->service_mask
= service_mask
;
840 cur_cm_id_priv
= cm_insert_listen(cm_id_priv
);
841 spin_unlock_irqrestore(&cm
.lock
, flags
);
843 if (cur_cm_id_priv
) {
844 cm_id
->state
= IB_CM_IDLE
;
845 kfree(cm_id_priv
->compare_data
);
846 cm_id_priv
->compare_data
= NULL
;
851 EXPORT_SYMBOL(ib_cm_listen
);
853 static __be64
cm_form_tid(struct cm_id_private
*cm_id_priv
,
854 enum cm_msg_sequence msg_seq
)
858 hi_tid
= ((u64
) cm_id_priv
->av
.port
->mad_agent
->hi_tid
) << 32;
859 low_tid
= (u64
) ((__force u32
)cm_id_priv
->id
.local_id
|
861 return cpu_to_be64(hi_tid
| low_tid
);
864 static void cm_format_mad_hdr(struct ib_mad_hdr
*hdr
,
865 __be16 attr_id
, __be64 tid
)
867 hdr
->base_version
= IB_MGMT_BASE_VERSION
;
868 hdr
->mgmt_class
= IB_MGMT_CLASS_CM
;
869 hdr
->class_version
= IB_CM_CLASS_VERSION
;
870 hdr
->method
= IB_MGMT_METHOD_SEND
;
871 hdr
->attr_id
= attr_id
;
875 static void cm_format_req(struct cm_req_msg
*req_msg
,
876 struct cm_id_private
*cm_id_priv
,
877 struct ib_cm_req_param
*param
)
879 cm_format_mad_hdr(&req_msg
->hdr
, CM_REQ_ATTR_ID
,
880 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_REQ
));
882 req_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
883 req_msg
->service_id
= param
->service_id
;
884 req_msg
->local_ca_guid
= cm_id_priv
->id
.device
->node_guid
;
885 cm_req_set_local_qpn(req_msg
, cpu_to_be32(param
->qp_num
));
886 cm_req_set_resp_res(req_msg
, param
->responder_resources
);
887 cm_req_set_init_depth(req_msg
, param
->initiator_depth
);
888 cm_req_set_remote_resp_timeout(req_msg
,
889 param
->remote_cm_response_timeout
);
890 cm_req_set_qp_type(req_msg
, param
->qp_type
);
891 cm_req_set_flow_ctrl(req_msg
, param
->flow_control
);
892 cm_req_set_starting_psn(req_msg
, cpu_to_be32(param
->starting_psn
));
893 cm_req_set_local_resp_timeout(req_msg
,
894 param
->local_cm_response_timeout
);
895 cm_req_set_retry_count(req_msg
, param
->retry_count
);
896 req_msg
->pkey
= param
->primary_path
->pkey
;
897 cm_req_set_path_mtu(req_msg
, param
->primary_path
->mtu
);
898 cm_req_set_rnr_retry_count(req_msg
, param
->rnr_retry_count
);
899 cm_req_set_max_cm_retries(req_msg
, param
->max_cm_retries
);
900 cm_req_set_srq(req_msg
, param
->srq
);
902 req_msg
->primary_local_lid
= param
->primary_path
->slid
;
903 req_msg
->primary_remote_lid
= param
->primary_path
->dlid
;
904 req_msg
->primary_local_gid
= param
->primary_path
->sgid
;
905 req_msg
->primary_remote_gid
= param
->primary_path
->dgid
;
906 cm_req_set_primary_flow_label(req_msg
, param
->primary_path
->flow_label
);
907 cm_req_set_primary_packet_rate(req_msg
, param
->primary_path
->rate
);
908 req_msg
->primary_traffic_class
= param
->primary_path
->traffic_class
;
909 req_msg
->primary_hop_limit
= param
->primary_path
->hop_limit
;
910 cm_req_set_primary_sl(req_msg
, param
->primary_path
->sl
);
911 cm_req_set_primary_subnet_local(req_msg
, 1); /* local only... */
912 cm_req_set_primary_local_ack_timeout(req_msg
,
913 min(31, param
->primary_path
->packet_life_time
+ 1));
915 if (param
->alternate_path
) {
916 req_msg
->alt_local_lid
= param
->alternate_path
->slid
;
917 req_msg
->alt_remote_lid
= param
->alternate_path
->dlid
;
918 req_msg
->alt_local_gid
= param
->alternate_path
->sgid
;
919 req_msg
->alt_remote_gid
= param
->alternate_path
->dgid
;
920 cm_req_set_alt_flow_label(req_msg
,
921 param
->alternate_path
->flow_label
);
922 cm_req_set_alt_packet_rate(req_msg
, param
->alternate_path
->rate
);
923 req_msg
->alt_traffic_class
= param
->alternate_path
->traffic_class
;
924 req_msg
->alt_hop_limit
= param
->alternate_path
->hop_limit
;
925 cm_req_set_alt_sl(req_msg
, param
->alternate_path
->sl
);
926 cm_req_set_alt_subnet_local(req_msg
, 1); /* local only... */
927 cm_req_set_alt_local_ack_timeout(req_msg
,
928 min(31, param
->alternate_path
->packet_life_time
+ 1));
931 if (param
->private_data
&& param
->private_data_len
)
932 memcpy(req_msg
->private_data
, param
->private_data
,
933 param
->private_data_len
);
936 static int cm_validate_req_param(struct ib_cm_req_param
*param
)
938 /* peer-to-peer not supported */
939 if (param
->peer_to_peer
)
942 if (!param
->primary_path
)
945 if (param
->qp_type
!= IB_QPT_RC
&& param
->qp_type
!= IB_QPT_UC
)
948 if (param
->private_data
&&
949 param
->private_data_len
> IB_CM_REQ_PRIVATE_DATA_SIZE
)
952 if (param
->alternate_path
&&
953 (param
->alternate_path
->pkey
!= param
->primary_path
->pkey
||
954 param
->alternate_path
->mtu
!= param
->primary_path
->mtu
))
960 int ib_send_cm_req(struct ib_cm_id
*cm_id
,
961 struct ib_cm_req_param
*param
)
963 struct cm_id_private
*cm_id_priv
;
964 struct cm_req_msg
*req_msg
;
968 ret
= cm_validate_req_param(param
);
972 /* Verify that we're not in timewait. */
973 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
974 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
975 if (cm_id
->state
!= IB_CM_IDLE
) {
976 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
980 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
982 cm_id_priv
->timewait_info
= cm_create_timewait_info(cm_id_priv
->
984 if (IS_ERR(cm_id_priv
->timewait_info
)) {
985 ret
= PTR_ERR(cm_id_priv
->timewait_info
);
989 ret
= cm_init_av_by_path(param
->primary_path
, &cm_id_priv
->av
);
992 if (param
->alternate_path
) {
993 ret
= cm_init_av_by_path(param
->alternate_path
,
994 &cm_id_priv
->alt_av
);
998 cm_id
->service_id
= param
->service_id
;
999 cm_id
->service_mask
= __constant_cpu_to_be64(~0ULL);
1000 cm_id_priv
->timeout_ms
= cm_convert_to_ms(
1001 param
->primary_path
->packet_life_time
) * 2 +
1003 param
->remote_cm_response_timeout
);
1004 cm_id_priv
->max_cm_retries
= param
->max_cm_retries
;
1005 cm_id_priv
->initiator_depth
= param
->initiator_depth
;
1006 cm_id_priv
->responder_resources
= param
->responder_resources
;
1007 cm_id_priv
->retry_count
= param
->retry_count
;
1008 cm_id_priv
->path_mtu
= param
->primary_path
->mtu
;
1009 cm_id_priv
->pkey
= param
->primary_path
->pkey
;
1010 cm_id_priv
->qp_type
= param
->qp_type
;
1012 ret
= cm_alloc_msg(cm_id_priv
, &cm_id_priv
->msg
);
1016 req_msg
= (struct cm_req_msg
*) cm_id_priv
->msg
->mad
;
1017 cm_format_req(req_msg
, cm_id_priv
, param
);
1018 cm_id_priv
->tid
= req_msg
->hdr
.tid
;
1019 cm_id_priv
->msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
1020 cm_id_priv
->msg
->context
[1] = (void *) (unsigned long) IB_CM_REQ_SENT
;
1022 cm_id_priv
->local_qpn
= cm_req_get_local_qpn(req_msg
);
1023 cm_id_priv
->rq_psn
= cm_req_get_starting_psn(req_msg
);
1025 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1026 ret
= ib_post_send_mad(cm_id_priv
->msg
, NULL
);
1028 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1031 BUG_ON(cm_id
->state
!= IB_CM_IDLE
);
1032 cm_id
->state
= IB_CM_REQ_SENT
;
1033 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1036 error2
: cm_free_msg(cm_id_priv
->msg
);
1037 error1
: kfree(cm_id_priv
->timewait_info
);
1040 EXPORT_SYMBOL(ib_send_cm_req
);
1042 static int cm_issue_rej(struct cm_port
*port
,
1043 struct ib_mad_recv_wc
*mad_recv_wc
,
1044 enum ib_cm_rej_reason reason
,
1045 enum cm_msg_response msg_rejected
,
1046 void *ari
, u8 ari_length
)
1048 struct ib_mad_send_buf
*msg
= NULL
;
1049 struct cm_rej_msg
*rej_msg
, *rcv_msg
;
1052 ret
= cm_alloc_response_msg(port
, mad_recv_wc
, &msg
);
1056 /* We just need common CM header information. Cast to any message. */
1057 rcv_msg
= (struct cm_rej_msg
*) mad_recv_wc
->recv_buf
.mad
;
1058 rej_msg
= (struct cm_rej_msg
*) msg
->mad
;
1060 cm_format_mad_hdr(&rej_msg
->hdr
, CM_REJ_ATTR_ID
, rcv_msg
->hdr
.tid
);
1061 rej_msg
->remote_comm_id
= rcv_msg
->local_comm_id
;
1062 rej_msg
->local_comm_id
= rcv_msg
->remote_comm_id
;
1063 cm_rej_set_msg_rejected(rej_msg
, msg_rejected
);
1064 rej_msg
->reason
= cpu_to_be16(reason
);
1066 if (ari
&& ari_length
) {
1067 cm_rej_set_reject_info_len(rej_msg
, ari_length
);
1068 memcpy(rej_msg
->ari
, ari
, ari_length
);
1071 ret
= ib_post_send_mad(msg
, NULL
);
1078 static inline int cm_is_active_peer(__be64 local_ca_guid
, __be64 remote_ca_guid
,
1079 __be32 local_qpn
, __be32 remote_qpn
)
1081 return (be64_to_cpu(local_ca_guid
) > be64_to_cpu(remote_ca_guid
) ||
1082 ((local_ca_guid
== remote_ca_guid
) &&
1083 (be32_to_cpu(local_qpn
) > be32_to_cpu(remote_qpn
))));
1086 static void cm_format_paths_from_req(struct cm_req_msg
*req_msg
,
1087 struct ib_sa_path_rec
*primary_path
,
1088 struct ib_sa_path_rec
*alt_path
)
1090 memset(primary_path
, 0, sizeof *primary_path
);
1091 primary_path
->dgid
= req_msg
->primary_local_gid
;
1092 primary_path
->sgid
= req_msg
->primary_remote_gid
;
1093 primary_path
->dlid
= req_msg
->primary_local_lid
;
1094 primary_path
->slid
= req_msg
->primary_remote_lid
;
1095 primary_path
->flow_label
= cm_req_get_primary_flow_label(req_msg
);
1096 primary_path
->hop_limit
= req_msg
->primary_hop_limit
;
1097 primary_path
->traffic_class
= req_msg
->primary_traffic_class
;
1098 primary_path
->reversible
= 1;
1099 primary_path
->pkey
= req_msg
->pkey
;
1100 primary_path
->sl
= cm_req_get_primary_sl(req_msg
);
1101 primary_path
->mtu_selector
= IB_SA_EQ
;
1102 primary_path
->mtu
= cm_req_get_path_mtu(req_msg
);
1103 primary_path
->rate_selector
= IB_SA_EQ
;
1104 primary_path
->rate
= cm_req_get_primary_packet_rate(req_msg
);
1105 primary_path
->packet_life_time_selector
= IB_SA_EQ
;
1106 primary_path
->packet_life_time
=
1107 cm_req_get_primary_local_ack_timeout(req_msg
);
1108 primary_path
->packet_life_time
-= (primary_path
->packet_life_time
> 0);
1110 if (req_msg
->alt_local_lid
) {
1111 memset(alt_path
, 0, sizeof *alt_path
);
1112 alt_path
->dgid
= req_msg
->alt_local_gid
;
1113 alt_path
->sgid
= req_msg
->alt_remote_gid
;
1114 alt_path
->dlid
= req_msg
->alt_local_lid
;
1115 alt_path
->slid
= req_msg
->alt_remote_lid
;
1116 alt_path
->flow_label
= cm_req_get_alt_flow_label(req_msg
);
1117 alt_path
->hop_limit
= req_msg
->alt_hop_limit
;
1118 alt_path
->traffic_class
= req_msg
->alt_traffic_class
;
1119 alt_path
->reversible
= 1;
1120 alt_path
->pkey
= req_msg
->pkey
;
1121 alt_path
->sl
= cm_req_get_alt_sl(req_msg
);
1122 alt_path
->mtu_selector
= IB_SA_EQ
;
1123 alt_path
->mtu
= cm_req_get_path_mtu(req_msg
);
1124 alt_path
->rate_selector
= IB_SA_EQ
;
1125 alt_path
->rate
= cm_req_get_alt_packet_rate(req_msg
);
1126 alt_path
->packet_life_time_selector
= IB_SA_EQ
;
1127 alt_path
->packet_life_time
=
1128 cm_req_get_alt_local_ack_timeout(req_msg
);
1129 alt_path
->packet_life_time
-= (alt_path
->packet_life_time
> 0);
1133 static void cm_format_req_event(struct cm_work
*work
,
1134 struct cm_id_private
*cm_id_priv
,
1135 struct ib_cm_id
*listen_id
)
1137 struct cm_req_msg
*req_msg
;
1138 struct ib_cm_req_event_param
*param
;
1140 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1141 param
= &work
->cm_event
.param
.req_rcvd
;
1142 param
->listen_id
= listen_id
;
1143 param
->port
= cm_id_priv
->av
.port
->port_num
;
1144 param
->primary_path
= &work
->path
[0];
1145 if (req_msg
->alt_local_lid
)
1146 param
->alternate_path
= &work
->path
[1];
1148 param
->alternate_path
= NULL
;
1149 param
->remote_ca_guid
= req_msg
->local_ca_guid
;
1150 param
->remote_qkey
= be32_to_cpu(req_msg
->local_qkey
);
1151 param
->remote_qpn
= be32_to_cpu(cm_req_get_local_qpn(req_msg
));
1152 param
->qp_type
= cm_req_get_qp_type(req_msg
);
1153 param
->starting_psn
= be32_to_cpu(cm_req_get_starting_psn(req_msg
));
1154 param
->responder_resources
= cm_req_get_init_depth(req_msg
);
1155 param
->initiator_depth
= cm_req_get_resp_res(req_msg
);
1156 param
->local_cm_response_timeout
=
1157 cm_req_get_remote_resp_timeout(req_msg
);
1158 param
->flow_control
= cm_req_get_flow_ctrl(req_msg
);
1159 param
->remote_cm_response_timeout
=
1160 cm_req_get_local_resp_timeout(req_msg
);
1161 param
->retry_count
= cm_req_get_retry_count(req_msg
);
1162 param
->rnr_retry_count
= cm_req_get_rnr_retry_count(req_msg
);
1163 param
->srq
= cm_req_get_srq(req_msg
);
1164 work
->cm_event
.private_data
= &req_msg
->private_data
;
1167 static void cm_process_work(struct cm_id_private
*cm_id_priv
,
1168 struct cm_work
*work
)
1170 unsigned long flags
;
1173 /* We will typically only have the current event to report. */
1174 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, &work
->cm_event
);
1177 while (!ret
&& !atomic_add_negative(-1, &cm_id_priv
->work_count
)) {
1178 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1179 work
= cm_dequeue_work(cm_id_priv
);
1180 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1182 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
,
1186 cm_deref_id(cm_id_priv
);
1188 cm_destroy_id(&cm_id_priv
->id
, ret
);
1191 static void cm_format_mra(struct cm_mra_msg
*mra_msg
,
1192 struct cm_id_private
*cm_id_priv
,
1193 enum cm_msg_response msg_mraed
, u8 service_timeout
,
1194 const void *private_data
, u8 private_data_len
)
1196 cm_format_mad_hdr(&mra_msg
->hdr
, CM_MRA_ATTR_ID
, cm_id_priv
->tid
);
1197 cm_mra_set_msg_mraed(mra_msg
, msg_mraed
);
1198 mra_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1199 mra_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1200 cm_mra_set_service_timeout(mra_msg
, service_timeout
);
1202 if (private_data
&& private_data_len
)
1203 memcpy(mra_msg
->private_data
, private_data
, private_data_len
);
1206 static void cm_format_rej(struct cm_rej_msg
*rej_msg
,
1207 struct cm_id_private
*cm_id_priv
,
1208 enum ib_cm_rej_reason reason
,
1211 const void *private_data
,
1212 u8 private_data_len
)
1214 cm_format_mad_hdr(&rej_msg
->hdr
, CM_REJ_ATTR_ID
, cm_id_priv
->tid
);
1215 rej_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1217 switch(cm_id_priv
->id
.state
) {
1218 case IB_CM_REQ_RCVD
:
1219 rej_msg
->local_comm_id
= 0;
1220 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REQ
);
1222 case IB_CM_MRA_REQ_SENT
:
1223 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1224 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REQ
);
1226 case IB_CM_REP_RCVD
:
1227 case IB_CM_MRA_REP_SENT
:
1228 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1229 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REP
);
1232 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1233 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_OTHER
);
1237 rej_msg
->reason
= cpu_to_be16(reason
);
1238 if (ari
&& ari_length
) {
1239 cm_rej_set_reject_info_len(rej_msg
, ari_length
);
1240 memcpy(rej_msg
->ari
, ari
, ari_length
);
1243 if (private_data
&& private_data_len
)
1244 memcpy(rej_msg
->private_data
, private_data
, private_data_len
);
1247 static void cm_dup_req_handler(struct cm_work
*work
,
1248 struct cm_id_private
*cm_id_priv
)
1250 struct ib_mad_send_buf
*msg
= NULL
;
1251 unsigned long flags
;
1254 /* Quick state check to discard duplicate REQs. */
1255 if (cm_id_priv
->id
.state
== IB_CM_REQ_RCVD
)
1258 ret
= cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
);
1262 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1263 switch (cm_id_priv
->id
.state
) {
1264 case IB_CM_MRA_REQ_SENT
:
1265 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
1266 CM_MSG_RESPONSE_REQ
, cm_id_priv
->service_timeout
,
1267 cm_id_priv
->private_data
,
1268 cm_id_priv
->private_data_len
);
1270 case IB_CM_TIMEWAIT
:
1271 cm_format_rej((struct cm_rej_msg
*) msg
->mad
, cm_id_priv
,
1272 IB_CM_REJ_STALE_CONN
, NULL
, 0, NULL
, 0);
1277 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1279 ret
= ib_post_send_mad(msg
, NULL
);
1284 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1285 free
: cm_free_msg(msg
);
1288 static struct cm_id_private
* cm_match_req(struct cm_work
*work
,
1289 struct cm_id_private
*cm_id_priv
)
1291 struct cm_id_private
*listen_cm_id_priv
, *cur_cm_id_priv
;
1292 struct cm_timewait_info
*timewait_info
;
1293 struct cm_req_msg
*req_msg
;
1294 unsigned long flags
;
1296 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1298 /* Check for duplicate REQ and stale connections. */
1299 spin_lock_irqsave(&cm
.lock
, flags
);
1300 timewait_info
= cm_insert_remote_id(cm_id_priv
->timewait_info
);
1302 timewait_info
= cm_insert_remote_qpn(cm_id_priv
->timewait_info
);
1304 if (timewait_info
) {
1305 cur_cm_id_priv
= cm_get_id(timewait_info
->work
.local_id
,
1306 timewait_info
->work
.remote_id
);
1307 cm_cleanup_timewait(cm_id_priv
->timewait_info
);
1308 spin_unlock_irqrestore(&cm
.lock
, flags
);
1309 if (cur_cm_id_priv
) {
1310 cm_dup_req_handler(work
, cur_cm_id_priv
);
1311 cm_deref_id(cur_cm_id_priv
);
1313 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1314 IB_CM_REJ_STALE_CONN
, CM_MSG_RESPONSE_REQ
,
1316 listen_cm_id_priv
= NULL
;
1320 /* Find matching listen request. */
1321 listen_cm_id_priv
= cm_find_listen(cm_id_priv
->id
.device
,
1322 req_msg
->service_id
,
1323 req_msg
->private_data
);
1324 if (!listen_cm_id_priv
) {
1325 cm_cleanup_timewait(cm_id_priv
->timewait_info
);
1326 spin_unlock_irqrestore(&cm
.lock
, flags
);
1327 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1328 IB_CM_REJ_INVALID_SERVICE_ID
, CM_MSG_RESPONSE_REQ
,
1332 atomic_inc(&listen_cm_id_priv
->refcount
);
1333 atomic_inc(&cm_id_priv
->refcount
);
1334 cm_id_priv
->id
.state
= IB_CM_REQ_RCVD
;
1335 atomic_inc(&cm_id_priv
->work_count
);
1336 spin_unlock_irqrestore(&cm
.lock
, flags
);
1338 return listen_cm_id_priv
;
1341 static int cm_req_handler(struct cm_work
*work
)
1343 struct ib_cm_id
*cm_id
;
1344 struct cm_id_private
*cm_id_priv
, *listen_cm_id_priv
;
1345 struct cm_req_msg
*req_msg
;
1348 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1350 cm_id
= ib_create_cm_id(work
->port
->cm_dev
->device
, NULL
, NULL
);
1352 return PTR_ERR(cm_id
);
1354 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1355 cm_id_priv
->id
.remote_id
= req_msg
->local_comm_id
;
1356 cm_init_av_for_response(work
->port
, work
->mad_recv_wc
->wc
,
1357 work
->mad_recv_wc
->recv_buf
.grh
,
1359 cm_id_priv
->timewait_info
= cm_create_timewait_info(cm_id_priv
->
1361 if (IS_ERR(cm_id_priv
->timewait_info
)) {
1362 ret
= PTR_ERR(cm_id_priv
->timewait_info
);
1365 cm_id_priv
->timewait_info
->work
.remote_id
= req_msg
->local_comm_id
;
1366 cm_id_priv
->timewait_info
->remote_ca_guid
= req_msg
->local_ca_guid
;
1367 cm_id_priv
->timewait_info
->remote_qpn
= cm_req_get_local_qpn(req_msg
);
1369 listen_cm_id_priv
= cm_match_req(work
, cm_id_priv
);
1370 if (!listen_cm_id_priv
) {
1372 kfree(cm_id_priv
->timewait_info
);
1376 cm_id_priv
->id
.cm_handler
= listen_cm_id_priv
->id
.cm_handler
;
1377 cm_id_priv
->id
.context
= listen_cm_id_priv
->id
.context
;
1378 cm_id_priv
->id
.service_id
= req_msg
->service_id
;
1379 cm_id_priv
->id
.service_mask
= __constant_cpu_to_be64(~0ULL);
1381 cm_format_paths_from_req(req_msg
, &work
->path
[0], &work
->path
[1]);
1382 ret
= cm_init_av_by_path(&work
->path
[0], &cm_id_priv
->av
);
1384 ib_get_cached_gid(work
->port
->cm_dev
->device
,
1385 work
->port
->port_num
, 0, &work
->path
[0].sgid
);
1386 ib_send_cm_rej(cm_id
, IB_CM_REJ_INVALID_GID
,
1387 &work
->path
[0].sgid
, sizeof work
->path
[0].sgid
,
1391 if (req_msg
->alt_local_lid
) {
1392 ret
= cm_init_av_by_path(&work
->path
[1], &cm_id_priv
->alt_av
);
1394 ib_send_cm_rej(cm_id
, IB_CM_REJ_INVALID_ALT_GID
,
1395 &work
->path
[0].sgid
,
1396 sizeof work
->path
[0].sgid
, NULL
, 0);
1400 cm_id_priv
->tid
= req_msg
->hdr
.tid
;
1401 cm_id_priv
->timeout_ms
= cm_convert_to_ms(
1402 cm_req_get_local_resp_timeout(req_msg
));
1403 cm_id_priv
->max_cm_retries
= cm_req_get_max_cm_retries(req_msg
);
1404 cm_id_priv
->remote_qpn
= cm_req_get_local_qpn(req_msg
);
1405 cm_id_priv
->initiator_depth
= cm_req_get_resp_res(req_msg
);
1406 cm_id_priv
->responder_resources
= cm_req_get_init_depth(req_msg
);
1407 cm_id_priv
->path_mtu
= cm_req_get_path_mtu(req_msg
);
1408 cm_id_priv
->pkey
= req_msg
->pkey
;
1409 cm_id_priv
->sq_psn
= cm_req_get_starting_psn(req_msg
);
1410 cm_id_priv
->retry_count
= cm_req_get_retry_count(req_msg
);
1411 cm_id_priv
->rnr_retry_count
= cm_req_get_rnr_retry_count(req_msg
);
1412 cm_id_priv
->qp_type
= cm_req_get_qp_type(req_msg
);
1414 cm_format_req_event(work
, cm_id_priv
, &listen_cm_id_priv
->id
);
1415 cm_process_work(cm_id_priv
, work
);
1416 cm_deref_id(listen_cm_id_priv
);
1420 atomic_dec(&cm_id_priv
->refcount
);
1421 cm_deref_id(listen_cm_id_priv
);
1423 ib_destroy_cm_id(cm_id
);
1427 static void cm_format_rep(struct cm_rep_msg
*rep_msg
,
1428 struct cm_id_private
*cm_id_priv
,
1429 struct ib_cm_rep_param
*param
)
1431 cm_format_mad_hdr(&rep_msg
->hdr
, CM_REP_ATTR_ID
, cm_id_priv
->tid
);
1432 rep_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1433 rep_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1434 cm_rep_set_local_qpn(rep_msg
, cpu_to_be32(param
->qp_num
));
1435 cm_rep_set_starting_psn(rep_msg
, cpu_to_be32(param
->starting_psn
));
1436 rep_msg
->resp_resources
= param
->responder_resources
;
1437 rep_msg
->initiator_depth
= param
->initiator_depth
;
1438 cm_rep_set_target_ack_delay(rep_msg
, param
->target_ack_delay
);
1439 cm_rep_set_failover(rep_msg
, param
->failover_accepted
);
1440 cm_rep_set_flow_ctrl(rep_msg
, param
->flow_control
);
1441 cm_rep_set_rnr_retry_count(rep_msg
, param
->rnr_retry_count
);
1442 cm_rep_set_srq(rep_msg
, param
->srq
);
1443 rep_msg
->local_ca_guid
= cm_id_priv
->id
.device
->node_guid
;
1445 if (param
->private_data
&& param
->private_data_len
)
1446 memcpy(rep_msg
->private_data
, param
->private_data
,
1447 param
->private_data_len
);
1450 int ib_send_cm_rep(struct ib_cm_id
*cm_id
,
1451 struct ib_cm_rep_param
*param
)
1453 struct cm_id_private
*cm_id_priv
;
1454 struct ib_mad_send_buf
*msg
;
1455 struct cm_rep_msg
*rep_msg
;
1456 unsigned long flags
;
1459 if (param
->private_data
&&
1460 param
->private_data_len
> IB_CM_REP_PRIVATE_DATA_SIZE
)
1463 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1464 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1465 if (cm_id
->state
!= IB_CM_REQ_RCVD
&&
1466 cm_id
->state
!= IB_CM_MRA_REQ_SENT
) {
1471 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1475 rep_msg
= (struct cm_rep_msg
*) msg
->mad
;
1476 cm_format_rep(rep_msg
, cm_id_priv
, param
);
1477 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
1478 msg
->context
[1] = (void *) (unsigned long) IB_CM_REP_SENT
;
1480 ret
= ib_post_send_mad(msg
, NULL
);
1482 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1487 cm_id
->state
= IB_CM_REP_SENT
;
1488 cm_id_priv
->msg
= msg
;
1489 cm_id_priv
->initiator_depth
= param
->initiator_depth
;
1490 cm_id_priv
->responder_resources
= param
->responder_resources
;
1491 cm_id_priv
->rq_psn
= cm_rep_get_starting_psn(rep_msg
);
1492 cm_id_priv
->local_qpn
= cm_rep_get_local_qpn(rep_msg
);
1494 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1497 EXPORT_SYMBOL(ib_send_cm_rep
);
1499 static void cm_format_rtu(struct cm_rtu_msg
*rtu_msg
,
1500 struct cm_id_private
*cm_id_priv
,
1501 const void *private_data
,
1502 u8 private_data_len
)
1504 cm_format_mad_hdr(&rtu_msg
->hdr
, CM_RTU_ATTR_ID
, cm_id_priv
->tid
);
1505 rtu_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1506 rtu_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1508 if (private_data
&& private_data_len
)
1509 memcpy(rtu_msg
->private_data
, private_data
, private_data_len
);
1512 int ib_send_cm_rtu(struct ib_cm_id
*cm_id
,
1513 const void *private_data
,
1514 u8 private_data_len
)
1516 struct cm_id_private
*cm_id_priv
;
1517 struct ib_mad_send_buf
*msg
;
1518 unsigned long flags
;
1522 if (private_data
&& private_data_len
> IB_CM_RTU_PRIVATE_DATA_SIZE
)
1525 data
= cm_copy_private_data(private_data
, private_data_len
);
1527 return PTR_ERR(data
);
1529 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1530 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1531 if (cm_id
->state
!= IB_CM_REP_RCVD
&&
1532 cm_id
->state
!= IB_CM_MRA_REP_SENT
) {
1537 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1541 cm_format_rtu((struct cm_rtu_msg
*) msg
->mad
, cm_id_priv
,
1542 private_data
, private_data_len
);
1544 ret
= ib_post_send_mad(msg
, NULL
);
1546 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1552 cm_id
->state
= IB_CM_ESTABLISHED
;
1553 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
1554 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1557 error
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1561 EXPORT_SYMBOL(ib_send_cm_rtu
);
1563 static void cm_format_rep_event(struct cm_work
*work
)
1565 struct cm_rep_msg
*rep_msg
;
1566 struct ib_cm_rep_event_param
*param
;
1568 rep_msg
= (struct cm_rep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1569 param
= &work
->cm_event
.param
.rep_rcvd
;
1570 param
->remote_ca_guid
= rep_msg
->local_ca_guid
;
1571 param
->remote_qkey
= be32_to_cpu(rep_msg
->local_qkey
);
1572 param
->remote_qpn
= be32_to_cpu(cm_rep_get_local_qpn(rep_msg
));
1573 param
->starting_psn
= be32_to_cpu(cm_rep_get_starting_psn(rep_msg
));
1574 param
->responder_resources
= rep_msg
->initiator_depth
;
1575 param
->initiator_depth
= rep_msg
->resp_resources
;
1576 param
->target_ack_delay
= cm_rep_get_target_ack_delay(rep_msg
);
1577 param
->failover_accepted
= cm_rep_get_failover(rep_msg
);
1578 param
->flow_control
= cm_rep_get_flow_ctrl(rep_msg
);
1579 param
->rnr_retry_count
= cm_rep_get_rnr_retry_count(rep_msg
);
1580 param
->srq
= cm_rep_get_srq(rep_msg
);
1581 work
->cm_event
.private_data
= &rep_msg
->private_data
;
1584 static void cm_dup_rep_handler(struct cm_work
*work
)
1586 struct cm_id_private
*cm_id_priv
;
1587 struct cm_rep_msg
*rep_msg
;
1588 struct ib_mad_send_buf
*msg
= NULL
;
1589 unsigned long flags
;
1592 rep_msg
= (struct cm_rep_msg
*) work
->mad_recv_wc
->recv_buf
.mad
;
1593 cm_id_priv
= cm_acquire_id(rep_msg
->remote_comm_id
,
1594 rep_msg
->local_comm_id
);
1598 ret
= cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
);
1602 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1603 if (cm_id_priv
->id
.state
== IB_CM_ESTABLISHED
)
1604 cm_format_rtu((struct cm_rtu_msg
*) msg
->mad
, cm_id_priv
,
1605 cm_id_priv
->private_data
,
1606 cm_id_priv
->private_data_len
);
1607 else if (cm_id_priv
->id
.state
== IB_CM_MRA_REP_SENT
)
1608 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
1609 CM_MSG_RESPONSE_REP
, cm_id_priv
->service_timeout
,
1610 cm_id_priv
->private_data
,
1611 cm_id_priv
->private_data_len
);
1614 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1616 ret
= ib_post_send_mad(msg
, NULL
);
1621 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1622 free
: cm_free_msg(msg
);
1623 deref
: cm_deref_id(cm_id_priv
);
1626 static int cm_rep_handler(struct cm_work
*work
)
1628 struct cm_id_private
*cm_id_priv
;
1629 struct cm_rep_msg
*rep_msg
;
1630 unsigned long flags
;
1633 rep_msg
= (struct cm_rep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1634 cm_id_priv
= cm_acquire_id(rep_msg
->remote_comm_id
, 0);
1636 cm_dup_rep_handler(work
);
1640 cm_format_rep_event(work
);
1642 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1643 switch (cm_id_priv
->id
.state
) {
1644 case IB_CM_REQ_SENT
:
1645 case IB_CM_MRA_REQ_RCVD
:
1648 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1653 cm_id_priv
->timewait_info
->work
.remote_id
= rep_msg
->local_comm_id
;
1654 cm_id_priv
->timewait_info
->remote_ca_guid
= rep_msg
->local_ca_guid
;
1655 cm_id_priv
->timewait_info
->remote_qpn
= cm_rep_get_local_qpn(rep_msg
);
1657 spin_lock(&cm
.lock
);
1658 /* Check for duplicate REP. */
1659 if (cm_insert_remote_id(cm_id_priv
->timewait_info
)) {
1660 spin_unlock(&cm
.lock
);
1661 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1665 /* Check for a stale connection. */
1666 if (cm_insert_remote_qpn(cm_id_priv
->timewait_info
)) {
1667 rb_erase(&cm_id_priv
->timewait_info
->remote_id_node
,
1668 &cm
.remote_id_table
);
1669 cm_id_priv
->timewait_info
->inserted_remote_id
= 0;
1670 spin_unlock(&cm
.lock
);
1671 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1672 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1673 IB_CM_REJ_STALE_CONN
, CM_MSG_RESPONSE_REP
,
1678 spin_unlock(&cm
.lock
);
1680 cm_id_priv
->id
.state
= IB_CM_REP_RCVD
;
1681 cm_id_priv
->id
.remote_id
= rep_msg
->local_comm_id
;
1682 cm_id_priv
->remote_qpn
= cm_rep_get_local_qpn(rep_msg
);
1683 cm_id_priv
->initiator_depth
= rep_msg
->resp_resources
;
1684 cm_id_priv
->responder_resources
= rep_msg
->initiator_depth
;
1685 cm_id_priv
->sq_psn
= cm_rep_get_starting_psn(rep_msg
);
1686 cm_id_priv
->rnr_retry_count
= cm_rep_get_rnr_retry_count(rep_msg
);
1688 /* todo: handle peer_to_peer */
1690 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1691 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1693 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1694 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1697 cm_process_work(cm_id_priv
, work
);
1699 cm_deref_id(cm_id_priv
);
1703 cm_deref_id(cm_id_priv
);
1707 static int cm_establish_handler(struct cm_work
*work
)
1709 struct cm_id_private
*cm_id_priv
;
1710 unsigned long flags
;
1713 /* See comment in cm_establish about lookup. */
1714 cm_id_priv
= cm_acquire_id(work
->local_id
, work
->remote_id
);
1718 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1719 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
) {
1720 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1724 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1725 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1727 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1728 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1731 cm_process_work(cm_id_priv
, work
);
1733 cm_deref_id(cm_id_priv
);
1736 cm_deref_id(cm_id_priv
);
1740 static int cm_rtu_handler(struct cm_work
*work
)
1742 struct cm_id_private
*cm_id_priv
;
1743 struct cm_rtu_msg
*rtu_msg
;
1744 unsigned long flags
;
1747 rtu_msg
= (struct cm_rtu_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1748 cm_id_priv
= cm_acquire_id(rtu_msg
->remote_comm_id
,
1749 rtu_msg
->local_comm_id
);
1753 work
->cm_event
.private_data
= &rtu_msg
->private_data
;
1755 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1756 if (cm_id_priv
->id
.state
!= IB_CM_REP_SENT
&&
1757 cm_id_priv
->id
.state
!= IB_CM_MRA_REP_RCVD
) {
1758 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1761 cm_id_priv
->id
.state
= IB_CM_ESTABLISHED
;
1763 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1764 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1766 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1767 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1770 cm_process_work(cm_id_priv
, work
);
1772 cm_deref_id(cm_id_priv
);
1775 cm_deref_id(cm_id_priv
);
1779 static void cm_format_dreq(struct cm_dreq_msg
*dreq_msg
,
1780 struct cm_id_private
*cm_id_priv
,
1781 const void *private_data
,
1782 u8 private_data_len
)
1784 cm_format_mad_hdr(&dreq_msg
->hdr
, CM_DREQ_ATTR_ID
,
1785 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_DREQ
));
1786 dreq_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1787 dreq_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1788 cm_dreq_set_remote_qpn(dreq_msg
, cm_id_priv
->remote_qpn
);
1790 if (private_data
&& private_data_len
)
1791 memcpy(dreq_msg
->private_data
, private_data
, private_data_len
);
1794 int ib_send_cm_dreq(struct ib_cm_id
*cm_id
,
1795 const void *private_data
,
1796 u8 private_data_len
)
1798 struct cm_id_private
*cm_id_priv
;
1799 struct ib_mad_send_buf
*msg
;
1800 unsigned long flags
;
1803 if (private_data
&& private_data_len
> IB_CM_DREQ_PRIVATE_DATA_SIZE
)
1806 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1807 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1808 if (cm_id
->state
!= IB_CM_ESTABLISHED
) {
1813 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1815 cm_enter_timewait(cm_id_priv
);
1819 cm_format_dreq((struct cm_dreq_msg
*) msg
->mad
, cm_id_priv
,
1820 private_data
, private_data_len
);
1821 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
1822 msg
->context
[1] = (void *) (unsigned long) IB_CM_DREQ_SENT
;
1824 ret
= ib_post_send_mad(msg
, NULL
);
1826 cm_enter_timewait(cm_id_priv
);
1827 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1832 cm_id
->state
= IB_CM_DREQ_SENT
;
1833 cm_id_priv
->msg
= msg
;
1834 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1837 EXPORT_SYMBOL(ib_send_cm_dreq
);
1839 static void cm_format_drep(struct cm_drep_msg
*drep_msg
,
1840 struct cm_id_private
*cm_id_priv
,
1841 const void *private_data
,
1842 u8 private_data_len
)
1844 cm_format_mad_hdr(&drep_msg
->hdr
, CM_DREP_ATTR_ID
, cm_id_priv
->tid
);
1845 drep_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1846 drep_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1848 if (private_data
&& private_data_len
)
1849 memcpy(drep_msg
->private_data
, private_data
, private_data_len
);
1852 int ib_send_cm_drep(struct ib_cm_id
*cm_id
,
1853 const void *private_data
,
1854 u8 private_data_len
)
1856 struct cm_id_private
*cm_id_priv
;
1857 struct ib_mad_send_buf
*msg
;
1858 unsigned long flags
;
1862 if (private_data
&& private_data_len
> IB_CM_DREP_PRIVATE_DATA_SIZE
)
1865 data
= cm_copy_private_data(private_data
, private_data_len
);
1867 return PTR_ERR(data
);
1869 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1870 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1871 if (cm_id
->state
!= IB_CM_DREQ_RCVD
) {
1872 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1877 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
1878 cm_enter_timewait(cm_id_priv
);
1880 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1884 cm_format_drep((struct cm_drep_msg
*) msg
->mad
, cm_id_priv
,
1885 private_data
, private_data_len
);
1887 ret
= ib_post_send_mad(msg
, NULL
);
1889 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1894 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1897 EXPORT_SYMBOL(ib_send_cm_drep
);
1899 static int cm_issue_drep(struct cm_port
*port
,
1900 struct ib_mad_recv_wc
*mad_recv_wc
)
1902 struct ib_mad_send_buf
*msg
= NULL
;
1903 struct cm_dreq_msg
*dreq_msg
;
1904 struct cm_drep_msg
*drep_msg
;
1907 ret
= cm_alloc_response_msg(port
, mad_recv_wc
, &msg
);
1911 dreq_msg
= (struct cm_dreq_msg
*) mad_recv_wc
->recv_buf
.mad
;
1912 drep_msg
= (struct cm_drep_msg
*) msg
->mad
;
1914 cm_format_mad_hdr(&drep_msg
->hdr
, CM_DREP_ATTR_ID
, dreq_msg
->hdr
.tid
);
1915 drep_msg
->remote_comm_id
= dreq_msg
->local_comm_id
;
1916 drep_msg
->local_comm_id
= dreq_msg
->remote_comm_id
;
1918 ret
= ib_post_send_mad(msg
, NULL
);
1925 static int cm_dreq_handler(struct cm_work
*work
)
1927 struct cm_id_private
*cm_id_priv
;
1928 struct cm_dreq_msg
*dreq_msg
;
1929 struct ib_mad_send_buf
*msg
= NULL
;
1930 unsigned long flags
;
1933 dreq_msg
= (struct cm_dreq_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1934 cm_id_priv
= cm_acquire_id(dreq_msg
->remote_comm_id
,
1935 dreq_msg
->local_comm_id
);
1937 cm_issue_drep(work
->port
, work
->mad_recv_wc
);
1941 work
->cm_event
.private_data
= &dreq_msg
->private_data
;
1943 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1944 if (cm_id_priv
->local_qpn
!= cm_dreq_get_remote_qpn(dreq_msg
))
1947 switch (cm_id_priv
->id
.state
) {
1948 case IB_CM_REP_SENT
:
1949 case IB_CM_DREQ_SENT
:
1950 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1952 case IB_CM_ESTABLISHED
:
1953 case IB_CM_MRA_REP_RCVD
:
1955 case IB_CM_TIMEWAIT
:
1956 if (cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
))
1959 cm_format_drep((struct cm_drep_msg
*) msg
->mad
, cm_id_priv
,
1960 cm_id_priv
->private_data
,
1961 cm_id_priv
->private_data_len
);
1962 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1964 if (ib_post_send_mad(msg
, NULL
))
1970 cm_id_priv
->id
.state
= IB_CM_DREQ_RCVD
;
1971 cm_id_priv
->tid
= dreq_msg
->hdr
.tid
;
1972 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1974 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1975 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1978 cm_process_work(cm_id_priv
, work
);
1980 cm_deref_id(cm_id_priv
);
1983 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1984 deref
: cm_deref_id(cm_id_priv
);
1988 static int cm_drep_handler(struct cm_work
*work
)
1990 struct cm_id_private
*cm_id_priv
;
1991 struct cm_drep_msg
*drep_msg
;
1992 unsigned long flags
;
1995 drep_msg
= (struct cm_drep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1996 cm_id_priv
= cm_acquire_id(drep_msg
->remote_comm_id
,
1997 drep_msg
->local_comm_id
);
2001 work
->cm_event
.private_data
= &drep_msg
->private_data
;
2003 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2004 if (cm_id_priv
->id
.state
!= IB_CM_DREQ_SENT
&&
2005 cm_id_priv
->id
.state
!= IB_CM_DREQ_RCVD
) {
2006 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2009 cm_enter_timewait(cm_id_priv
);
2011 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2012 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2014 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2015 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2018 cm_process_work(cm_id_priv
, work
);
2020 cm_deref_id(cm_id_priv
);
2023 cm_deref_id(cm_id_priv
);
2027 int ib_send_cm_rej(struct ib_cm_id
*cm_id
,
2028 enum ib_cm_rej_reason reason
,
2031 const void *private_data
,
2032 u8 private_data_len
)
2034 struct cm_id_private
*cm_id_priv
;
2035 struct ib_mad_send_buf
*msg
;
2036 unsigned long flags
;
2039 if ((private_data
&& private_data_len
> IB_CM_REJ_PRIVATE_DATA_SIZE
) ||
2040 (ari
&& ari_length
> IB_CM_REJ_ARI_LENGTH
))
2043 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2045 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2046 switch (cm_id
->state
) {
2047 case IB_CM_REQ_SENT
:
2048 case IB_CM_MRA_REQ_RCVD
:
2049 case IB_CM_REQ_RCVD
:
2050 case IB_CM_MRA_REQ_SENT
:
2051 case IB_CM_REP_RCVD
:
2052 case IB_CM_MRA_REP_SENT
:
2053 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2055 cm_format_rej((struct cm_rej_msg
*) msg
->mad
,
2056 cm_id_priv
, reason
, ari
, ari_length
,
2057 private_data
, private_data_len
);
2059 cm_reset_to_idle(cm_id_priv
);
2061 case IB_CM_REP_SENT
:
2062 case IB_CM_MRA_REP_RCVD
:
2063 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2065 cm_format_rej((struct cm_rej_msg
*) msg
->mad
,
2066 cm_id_priv
, reason
, ari
, ari_length
,
2067 private_data
, private_data_len
);
2069 cm_enter_timewait(cm_id_priv
);
2079 ret
= ib_post_send_mad(msg
, NULL
);
2083 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2086 EXPORT_SYMBOL(ib_send_cm_rej
);
2088 static void cm_format_rej_event(struct cm_work
*work
)
2090 struct cm_rej_msg
*rej_msg
;
2091 struct ib_cm_rej_event_param
*param
;
2093 rej_msg
= (struct cm_rej_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2094 param
= &work
->cm_event
.param
.rej_rcvd
;
2095 param
->ari
= rej_msg
->ari
;
2096 param
->ari_length
= cm_rej_get_reject_info_len(rej_msg
);
2097 param
->reason
= __be16_to_cpu(rej_msg
->reason
);
2098 work
->cm_event
.private_data
= &rej_msg
->private_data
;
2101 static struct cm_id_private
* cm_acquire_rejected_id(struct cm_rej_msg
*rej_msg
)
2103 struct cm_timewait_info
*timewait_info
;
2104 struct cm_id_private
*cm_id_priv
;
2105 unsigned long flags
;
2108 remote_id
= rej_msg
->local_comm_id
;
2110 if (__be16_to_cpu(rej_msg
->reason
) == IB_CM_REJ_TIMEOUT
) {
2111 spin_lock_irqsave(&cm
.lock
, flags
);
2112 timewait_info
= cm_find_remote_id( *((__be64
*) rej_msg
->ari
),
2114 if (!timewait_info
) {
2115 spin_unlock_irqrestore(&cm
.lock
, flags
);
2118 cm_id_priv
= idr_find(&cm
.local_id_table
, (__force
int)
2119 (timewait_info
->work
.local_id
^
2120 cm
.random_id_operand
));
2122 if (cm_id_priv
->id
.remote_id
== remote_id
)
2123 atomic_inc(&cm_id_priv
->refcount
);
2127 spin_unlock_irqrestore(&cm
.lock
, flags
);
2128 } else if (cm_rej_get_msg_rejected(rej_msg
) == CM_MSG_RESPONSE_REQ
)
2129 cm_id_priv
= cm_acquire_id(rej_msg
->remote_comm_id
, 0);
2131 cm_id_priv
= cm_acquire_id(rej_msg
->remote_comm_id
, remote_id
);
2136 static int cm_rej_handler(struct cm_work
*work
)
2138 struct cm_id_private
*cm_id_priv
;
2139 struct cm_rej_msg
*rej_msg
;
2140 unsigned long flags
;
2143 rej_msg
= (struct cm_rej_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2144 cm_id_priv
= cm_acquire_rejected_id(rej_msg
);
2148 cm_format_rej_event(work
);
2150 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2151 switch (cm_id_priv
->id
.state
) {
2152 case IB_CM_REQ_SENT
:
2153 case IB_CM_MRA_REQ_RCVD
:
2154 case IB_CM_REP_SENT
:
2155 case IB_CM_MRA_REP_RCVD
:
2156 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2158 case IB_CM_REQ_RCVD
:
2159 case IB_CM_MRA_REQ_SENT
:
2160 if (__be16_to_cpu(rej_msg
->reason
) == IB_CM_REJ_STALE_CONN
)
2161 cm_enter_timewait(cm_id_priv
);
2163 cm_reset_to_idle(cm_id_priv
);
2165 case IB_CM_DREQ_SENT
:
2166 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2168 case IB_CM_REP_RCVD
:
2169 case IB_CM_MRA_REP_SENT
:
2170 case IB_CM_ESTABLISHED
:
2171 cm_enter_timewait(cm_id_priv
);
2174 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2179 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2181 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2182 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2185 cm_process_work(cm_id_priv
, work
);
2187 cm_deref_id(cm_id_priv
);
2190 cm_deref_id(cm_id_priv
);
2194 int ib_send_cm_mra(struct ib_cm_id
*cm_id
,
2196 const void *private_data
,
2197 u8 private_data_len
)
2199 struct cm_id_private
*cm_id_priv
;
2200 struct ib_mad_send_buf
*msg
;
2202 unsigned long flags
;
2205 if (private_data
&& private_data_len
> IB_CM_MRA_PRIVATE_DATA_SIZE
)
2208 data
= cm_copy_private_data(private_data
, private_data_len
);
2210 return PTR_ERR(data
);
2212 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2214 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2215 switch(cm_id_priv
->id
.state
) {
2216 case IB_CM_REQ_RCVD
:
2217 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2221 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2222 CM_MSG_RESPONSE_REQ
, service_timeout
,
2223 private_data
, private_data_len
);
2224 ret
= ib_post_send_mad(msg
, NULL
);
2227 cm_id
->state
= IB_CM_MRA_REQ_SENT
;
2229 case IB_CM_REP_RCVD
:
2230 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2234 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2235 CM_MSG_RESPONSE_REP
, service_timeout
,
2236 private_data
, private_data_len
);
2237 ret
= ib_post_send_mad(msg
, NULL
);
2240 cm_id
->state
= IB_CM_MRA_REP_SENT
;
2242 case IB_CM_ESTABLISHED
:
2243 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2247 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2248 CM_MSG_RESPONSE_OTHER
, service_timeout
,
2249 private_data
, private_data_len
);
2250 ret
= ib_post_send_mad(msg
, NULL
);
2253 cm_id
->lap_state
= IB_CM_MRA_LAP_SENT
;
2259 cm_id_priv
->service_timeout
= service_timeout
;
2260 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
2261 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2264 error1
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2268 error2
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2273 EXPORT_SYMBOL(ib_send_cm_mra
);
2275 static struct cm_id_private
* cm_acquire_mraed_id(struct cm_mra_msg
*mra_msg
)
2277 switch (cm_mra_get_msg_mraed(mra_msg
)) {
2278 case CM_MSG_RESPONSE_REQ
:
2279 return cm_acquire_id(mra_msg
->remote_comm_id
, 0);
2280 case CM_MSG_RESPONSE_REP
:
2281 case CM_MSG_RESPONSE_OTHER
:
2282 return cm_acquire_id(mra_msg
->remote_comm_id
,
2283 mra_msg
->local_comm_id
);
2289 static int cm_mra_handler(struct cm_work
*work
)
2291 struct cm_id_private
*cm_id_priv
;
2292 struct cm_mra_msg
*mra_msg
;
2293 unsigned long flags
;
2296 mra_msg
= (struct cm_mra_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2297 cm_id_priv
= cm_acquire_mraed_id(mra_msg
);
2301 work
->cm_event
.private_data
= &mra_msg
->private_data
;
2302 work
->cm_event
.param
.mra_rcvd
.service_timeout
=
2303 cm_mra_get_service_timeout(mra_msg
);
2304 timeout
= cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg
)) +
2305 cm_convert_to_ms(cm_id_priv
->av
.packet_life_time
);
2307 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2308 switch (cm_id_priv
->id
.state
) {
2309 case IB_CM_REQ_SENT
:
2310 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_REQ
||
2311 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2312 cm_id_priv
->msg
, timeout
))
2314 cm_id_priv
->id
.state
= IB_CM_MRA_REQ_RCVD
;
2316 case IB_CM_REP_SENT
:
2317 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_REP
||
2318 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2319 cm_id_priv
->msg
, timeout
))
2321 cm_id_priv
->id
.state
= IB_CM_MRA_REP_RCVD
;
2323 case IB_CM_ESTABLISHED
:
2324 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_OTHER
||
2325 cm_id_priv
->id
.lap_state
!= IB_CM_LAP_SENT
||
2326 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2327 cm_id_priv
->msg
, timeout
))
2329 cm_id_priv
->id
.lap_state
= IB_CM_MRA_LAP_RCVD
;
2335 cm_id_priv
->msg
->context
[1] = (void *) (unsigned long)
2336 cm_id_priv
->id
.state
;
2337 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2339 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2340 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2343 cm_process_work(cm_id_priv
, work
);
2345 cm_deref_id(cm_id_priv
);
2348 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2349 cm_deref_id(cm_id_priv
);
2353 static void cm_format_lap(struct cm_lap_msg
*lap_msg
,
2354 struct cm_id_private
*cm_id_priv
,
2355 struct ib_sa_path_rec
*alternate_path
,
2356 const void *private_data
,
2357 u8 private_data_len
)
2359 cm_format_mad_hdr(&lap_msg
->hdr
, CM_LAP_ATTR_ID
,
2360 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_LAP
));
2361 lap_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
2362 lap_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
2363 cm_lap_set_remote_qpn(lap_msg
, cm_id_priv
->remote_qpn
);
2364 /* todo: need remote CM response timeout */
2365 cm_lap_set_remote_resp_timeout(lap_msg
, 0x1F);
2366 lap_msg
->alt_local_lid
= alternate_path
->slid
;
2367 lap_msg
->alt_remote_lid
= alternate_path
->dlid
;
2368 lap_msg
->alt_local_gid
= alternate_path
->sgid
;
2369 lap_msg
->alt_remote_gid
= alternate_path
->dgid
;
2370 cm_lap_set_flow_label(lap_msg
, alternate_path
->flow_label
);
2371 cm_lap_set_traffic_class(lap_msg
, alternate_path
->traffic_class
);
2372 lap_msg
->alt_hop_limit
= alternate_path
->hop_limit
;
2373 cm_lap_set_packet_rate(lap_msg
, alternate_path
->rate
);
2374 cm_lap_set_sl(lap_msg
, alternate_path
->sl
);
2375 cm_lap_set_subnet_local(lap_msg
, 1); /* local only... */
2376 cm_lap_set_local_ack_timeout(lap_msg
,
2377 min(31, alternate_path
->packet_life_time
+ 1));
2379 if (private_data
&& private_data_len
)
2380 memcpy(lap_msg
->private_data
, private_data
, private_data_len
);
2383 int ib_send_cm_lap(struct ib_cm_id
*cm_id
,
2384 struct ib_sa_path_rec
*alternate_path
,
2385 const void *private_data
,
2386 u8 private_data_len
)
2388 struct cm_id_private
*cm_id_priv
;
2389 struct ib_mad_send_buf
*msg
;
2390 unsigned long flags
;
2393 if (private_data
&& private_data_len
> IB_CM_LAP_PRIVATE_DATA_SIZE
)
2396 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2397 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2398 if (cm_id
->state
!= IB_CM_ESTABLISHED
||
2399 (cm_id
->lap_state
!= IB_CM_LAP_UNINIT
&&
2400 cm_id
->lap_state
!= IB_CM_LAP_IDLE
)) {
2405 ret
= cm_init_av_by_path(alternate_path
, &cm_id_priv
->alt_av
);
2409 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2413 cm_format_lap((struct cm_lap_msg
*) msg
->mad
, cm_id_priv
,
2414 alternate_path
, private_data
, private_data_len
);
2415 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
2416 msg
->context
[1] = (void *) (unsigned long) IB_CM_ESTABLISHED
;
2418 ret
= ib_post_send_mad(msg
, NULL
);
2420 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2425 cm_id
->lap_state
= IB_CM_LAP_SENT
;
2426 cm_id_priv
->msg
= msg
;
2428 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2431 EXPORT_SYMBOL(ib_send_cm_lap
);
2433 static void cm_format_path_from_lap(struct cm_id_private
*cm_id_priv
,
2434 struct ib_sa_path_rec
*path
,
2435 struct cm_lap_msg
*lap_msg
)
2437 memset(path
, 0, sizeof *path
);
2438 path
->dgid
= lap_msg
->alt_local_gid
;
2439 path
->sgid
= lap_msg
->alt_remote_gid
;
2440 path
->dlid
= lap_msg
->alt_local_lid
;
2441 path
->slid
= lap_msg
->alt_remote_lid
;
2442 path
->flow_label
= cm_lap_get_flow_label(lap_msg
);
2443 path
->hop_limit
= lap_msg
->alt_hop_limit
;
2444 path
->traffic_class
= cm_lap_get_traffic_class(lap_msg
);
2445 path
->reversible
= 1;
2446 path
->pkey
= cm_id_priv
->pkey
;
2447 path
->sl
= cm_lap_get_sl(lap_msg
);
2448 path
->mtu_selector
= IB_SA_EQ
;
2449 path
->mtu
= cm_id_priv
->path_mtu
;
2450 path
->rate_selector
= IB_SA_EQ
;
2451 path
->rate
= cm_lap_get_packet_rate(lap_msg
);
2452 path
->packet_life_time_selector
= IB_SA_EQ
;
2453 path
->packet_life_time
= cm_lap_get_local_ack_timeout(lap_msg
);
2454 path
->packet_life_time
-= (path
->packet_life_time
> 0);
2457 static int cm_lap_handler(struct cm_work
*work
)
2459 struct cm_id_private
*cm_id_priv
;
2460 struct cm_lap_msg
*lap_msg
;
2461 struct ib_cm_lap_event_param
*param
;
2462 struct ib_mad_send_buf
*msg
= NULL
;
2463 unsigned long flags
;
2466 /* todo: verify LAP request and send reject APR if invalid. */
2467 lap_msg
= (struct cm_lap_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2468 cm_id_priv
= cm_acquire_id(lap_msg
->remote_comm_id
,
2469 lap_msg
->local_comm_id
);
2473 param
= &work
->cm_event
.param
.lap_rcvd
;
2474 param
->alternate_path
= &work
->path
[0];
2475 cm_format_path_from_lap(cm_id_priv
, param
->alternate_path
, lap_msg
);
2476 work
->cm_event
.private_data
= &lap_msg
->private_data
;
2478 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2479 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
)
2482 switch (cm_id_priv
->id
.lap_state
) {
2483 case IB_CM_LAP_UNINIT
:
2484 case IB_CM_LAP_IDLE
:
2486 case IB_CM_MRA_LAP_SENT
:
2487 if (cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
))
2490 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2491 CM_MSG_RESPONSE_OTHER
,
2492 cm_id_priv
->service_timeout
,
2493 cm_id_priv
->private_data
,
2494 cm_id_priv
->private_data_len
);
2495 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2497 if (ib_post_send_mad(msg
, NULL
))
2504 cm_id_priv
->id
.lap_state
= IB_CM_LAP_RCVD
;
2505 cm_id_priv
->tid
= lap_msg
->hdr
.tid
;
2506 cm_init_av_for_response(work
->port
, work
->mad_recv_wc
->wc
,
2507 work
->mad_recv_wc
->recv_buf
.grh
,
2509 cm_init_av_by_path(param
->alternate_path
, &cm_id_priv
->alt_av
);
2510 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2512 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2513 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2516 cm_process_work(cm_id_priv
, work
);
2518 cm_deref_id(cm_id_priv
);
2521 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2522 deref
: cm_deref_id(cm_id_priv
);
2526 static void cm_format_apr(struct cm_apr_msg
*apr_msg
,
2527 struct cm_id_private
*cm_id_priv
,
2528 enum ib_cm_apr_status status
,
2531 const void *private_data
,
2532 u8 private_data_len
)
2534 cm_format_mad_hdr(&apr_msg
->hdr
, CM_APR_ATTR_ID
, cm_id_priv
->tid
);
2535 apr_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
2536 apr_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
2537 apr_msg
->ap_status
= (u8
) status
;
2539 if (info
&& info_length
) {
2540 apr_msg
->info_length
= info_length
;
2541 memcpy(apr_msg
->info
, info
, info_length
);
2544 if (private_data
&& private_data_len
)
2545 memcpy(apr_msg
->private_data
, private_data
, private_data_len
);
2548 int ib_send_cm_apr(struct ib_cm_id
*cm_id
,
2549 enum ib_cm_apr_status status
,
2552 const void *private_data
,
2553 u8 private_data_len
)
2555 struct cm_id_private
*cm_id_priv
;
2556 struct ib_mad_send_buf
*msg
;
2557 unsigned long flags
;
2560 if ((private_data
&& private_data_len
> IB_CM_APR_PRIVATE_DATA_SIZE
) ||
2561 (info
&& info_length
> IB_CM_APR_INFO_LENGTH
))
2564 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2565 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2566 if (cm_id
->state
!= IB_CM_ESTABLISHED
||
2567 (cm_id
->lap_state
!= IB_CM_LAP_RCVD
&&
2568 cm_id
->lap_state
!= IB_CM_MRA_LAP_SENT
)) {
2573 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2577 cm_format_apr((struct cm_apr_msg
*) msg
->mad
, cm_id_priv
, status
,
2578 info
, info_length
, private_data
, private_data_len
);
2579 ret
= ib_post_send_mad(msg
, NULL
);
2581 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2586 cm_id
->lap_state
= IB_CM_LAP_IDLE
;
2587 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2590 EXPORT_SYMBOL(ib_send_cm_apr
);
2592 static int cm_apr_handler(struct cm_work
*work
)
2594 struct cm_id_private
*cm_id_priv
;
2595 struct cm_apr_msg
*apr_msg
;
2596 unsigned long flags
;
2599 apr_msg
= (struct cm_apr_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2600 cm_id_priv
= cm_acquire_id(apr_msg
->remote_comm_id
,
2601 apr_msg
->local_comm_id
);
2603 return -EINVAL
; /* Unmatched reply. */
2605 work
->cm_event
.param
.apr_rcvd
.ap_status
= apr_msg
->ap_status
;
2606 work
->cm_event
.param
.apr_rcvd
.apr_info
= &apr_msg
->info
;
2607 work
->cm_event
.param
.apr_rcvd
.info_len
= apr_msg
->info_length
;
2608 work
->cm_event
.private_data
= &apr_msg
->private_data
;
2610 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2611 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
||
2612 (cm_id_priv
->id
.lap_state
!= IB_CM_LAP_SENT
&&
2613 cm_id_priv
->id
.lap_state
!= IB_CM_MRA_LAP_RCVD
)) {
2614 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2617 cm_id_priv
->id
.lap_state
= IB_CM_LAP_IDLE
;
2618 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2619 cm_id_priv
->msg
= NULL
;
2621 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2623 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2624 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2627 cm_process_work(cm_id_priv
, work
);
2629 cm_deref_id(cm_id_priv
);
2632 cm_deref_id(cm_id_priv
);
2636 static int cm_timewait_handler(struct cm_work
*work
)
2638 struct cm_timewait_info
*timewait_info
;
2639 struct cm_id_private
*cm_id_priv
;
2642 timewait_info
= (struct cm_timewait_info
*)work
;
2643 spin_lock_irq(&cm
.lock
);
2644 list_del(&timewait_info
->list
);
2645 spin_unlock_irq(&cm
.lock
);
2647 cm_id_priv
= cm_acquire_id(timewait_info
->work
.local_id
,
2648 timewait_info
->work
.remote_id
);
2652 spin_lock_irq(&cm_id_priv
->lock
);
2653 if (cm_id_priv
->id
.state
!= IB_CM_TIMEWAIT
||
2654 cm_id_priv
->remote_qpn
!= timewait_info
->remote_qpn
) {
2655 spin_unlock_irq(&cm_id_priv
->lock
);
2658 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2659 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2661 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2662 spin_unlock_irq(&cm_id_priv
->lock
);
2665 cm_process_work(cm_id_priv
, work
);
2667 cm_deref_id(cm_id_priv
);
2670 cm_deref_id(cm_id_priv
);
2674 static void cm_format_sidr_req(struct cm_sidr_req_msg
*sidr_req_msg
,
2675 struct cm_id_private
*cm_id_priv
,
2676 struct ib_cm_sidr_req_param
*param
)
2678 cm_format_mad_hdr(&sidr_req_msg
->hdr
, CM_SIDR_REQ_ATTR_ID
,
2679 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_SIDR
));
2680 sidr_req_msg
->request_id
= cm_id_priv
->id
.local_id
;
2681 sidr_req_msg
->pkey
= cpu_to_be16(param
->path
->pkey
);
2682 sidr_req_msg
->service_id
= param
->service_id
;
2684 if (param
->private_data
&& param
->private_data_len
)
2685 memcpy(sidr_req_msg
->private_data
, param
->private_data
,
2686 param
->private_data_len
);
2689 int ib_send_cm_sidr_req(struct ib_cm_id
*cm_id
,
2690 struct ib_cm_sidr_req_param
*param
)
2692 struct cm_id_private
*cm_id_priv
;
2693 struct ib_mad_send_buf
*msg
;
2694 unsigned long flags
;
2697 if (!param
->path
|| (param
->private_data
&&
2698 param
->private_data_len
> IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
))
2701 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2702 ret
= cm_init_av_by_path(param
->path
, &cm_id_priv
->av
);
2706 cm_id
->service_id
= param
->service_id
;
2707 cm_id
->service_mask
= __constant_cpu_to_be64(~0ULL);
2708 cm_id_priv
->timeout_ms
= param
->timeout_ms
;
2709 cm_id_priv
->max_cm_retries
= param
->max_cm_retries
;
2710 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2714 cm_format_sidr_req((struct cm_sidr_req_msg
*) msg
->mad
, cm_id_priv
,
2716 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
2717 msg
->context
[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT
;
2719 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2720 if (cm_id
->state
== IB_CM_IDLE
)
2721 ret
= ib_post_send_mad(msg
, NULL
);
2726 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2730 cm_id
->state
= IB_CM_SIDR_REQ_SENT
;
2731 cm_id_priv
->msg
= msg
;
2732 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2736 EXPORT_SYMBOL(ib_send_cm_sidr_req
);
2738 static void cm_format_sidr_req_event(struct cm_work
*work
,
2739 struct ib_cm_id
*listen_id
)
2741 struct cm_sidr_req_msg
*sidr_req_msg
;
2742 struct ib_cm_sidr_req_event_param
*param
;
2744 sidr_req_msg
= (struct cm_sidr_req_msg
*)
2745 work
->mad_recv_wc
->recv_buf
.mad
;
2746 param
= &work
->cm_event
.param
.sidr_req_rcvd
;
2747 param
->pkey
= __be16_to_cpu(sidr_req_msg
->pkey
);
2748 param
->listen_id
= listen_id
;
2749 param
->port
= work
->port
->port_num
;
2750 work
->cm_event
.private_data
= &sidr_req_msg
->private_data
;
2753 static int cm_sidr_req_handler(struct cm_work
*work
)
2755 struct ib_cm_id
*cm_id
;
2756 struct cm_id_private
*cm_id_priv
, *cur_cm_id_priv
;
2757 struct cm_sidr_req_msg
*sidr_req_msg
;
2759 unsigned long flags
;
2761 cm_id
= ib_create_cm_id(work
->port
->cm_dev
->device
, NULL
, NULL
);
2763 return PTR_ERR(cm_id
);
2764 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2766 /* Record SGID/SLID and request ID for lookup. */
2767 sidr_req_msg
= (struct cm_sidr_req_msg
*)
2768 work
->mad_recv_wc
->recv_buf
.mad
;
2769 wc
= work
->mad_recv_wc
->wc
;
2770 cm_id_priv
->av
.dgid
.global
.subnet_prefix
= cpu_to_be64(wc
->slid
);
2771 cm_id_priv
->av
.dgid
.global
.interface_id
= 0;
2772 cm_init_av_for_response(work
->port
, work
->mad_recv_wc
->wc
,
2773 work
->mad_recv_wc
->recv_buf
.grh
,
2775 cm_id_priv
->id
.remote_id
= sidr_req_msg
->request_id
;
2776 cm_id_priv
->id
.state
= IB_CM_SIDR_REQ_RCVD
;
2777 cm_id_priv
->tid
= sidr_req_msg
->hdr
.tid
;
2778 atomic_inc(&cm_id_priv
->work_count
);
2780 spin_lock_irqsave(&cm
.lock
, flags
);
2781 cur_cm_id_priv
= cm_insert_remote_sidr(cm_id_priv
);
2782 if (cur_cm_id_priv
) {
2783 spin_unlock_irqrestore(&cm
.lock
, flags
);
2784 goto out
; /* Duplicate message. */
2786 cur_cm_id_priv
= cm_find_listen(cm_id
->device
,
2787 sidr_req_msg
->service_id
,
2788 sidr_req_msg
->private_data
);
2789 if (!cur_cm_id_priv
) {
2790 rb_erase(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
2791 spin_unlock_irqrestore(&cm
.lock
, flags
);
2792 /* todo: reply with no match */
2793 goto out
; /* No match. */
2795 atomic_inc(&cur_cm_id_priv
->refcount
);
2796 spin_unlock_irqrestore(&cm
.lock
, flags
);
2798 cm_id_priv
->id
.cm_handler
= cur_cm_id_priv
->id
.cm_handler
;
2799 cm_id_priv
->id
.context
= cur_cm_id_priv
->id
.context
;
2800 cm_id_priv
->id
.service_id
= sidr_req_msg
->service_id
;
2801 cm_id_priv
->id
.service_mask
= __constant_cpu_to_be64(~0ULL);
2803 cm_format_sidr_req_event(work
, &cur_cm_id_priv
->id
);
2804 cm_process_work(cm_id_priv
, work
);
2805 cm_deref_id(cur_cm_id_priv
);
2808 ib_destroy_cm_id(&cm_id_priv
->id
);
2812 static void cm_format_sidr_rep(struct cm_sidr_rep_msg
*sidr_rep_msg
,
2813 struct cm_id_private
*cm_id_priv
,
2814 struct ib_cm_sidr_rep_param
*param
)
2816 cm_format_mad_hdr(&sidr_rep_msg
->hdr
, CM_SIDR_REP_ATTR_ID
,
2818 sidr_rep_msg
->request_id
= cm_id_priv
->id
.remote_id
;
2819 sidr_rep_msg
->status
= param
->status
;
2820 cm_sidr_rep_set_qpn(sidr_rep_msg
, cpu_to_be32(param
->qp_num
));
2821 sidr_rep_msg
->service_id
= cm_id_priv
->id
.service_id
;
2822 sidr_rep_msg
->qkey
= cpu_to_be32(param
->qkey
);
2824 if (param
->info
&& param
->info_length
)
2825 memcpy(sidr_rep_msg
->info
, param
->info
, param
->info_length
);
2827 if (param
->private_data
&& param
->private_data_len
)
2828 memcpy(sidr_rep_msg
->private_data
, param
->private_data
,
2829 param
->private_data_len
);
2832 int ib_send_cm_sidr_rep(struct ib_cm_id
*cm_id
,
2833 struct ib_cm_sidr_rep_param
*param
)
2835 struct cm_id_private
*cm_id_priv
;
2836 struct ib_mad_send_buf
*msg
;
2837 unsigned long flags
;
2840 if ((param
->info
&& param
->info_length
> IB_CM_SIDR_REP_INFO_LENGTH
) ||
2841 (param
->private_data
&&
2842 param
->private_data_len
> IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
))
2845 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2846 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2847 if (cm_id
->state
!= IB_CM_SIDR_REQ_RCVD
) {
2852 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2856 cm_format_sidr_rep((struct cm_sidr_rep_msg
*) msg
->mad
, cm_id_priv
,
2858 ret
= ib_post_send_mad(msg
, NULL
);
2860 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2864 cm_id
->state
= IB_CM_IDLE
;
2865 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2867 spin_lock_irqsave(&cm
.lock
, flags
);
2868 rb_erase(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
2869 spin_unlock_irqrestore(&cm
.lock
, flags
);
2872 error
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2875 EXPORT_SYMBOL(ib_send_cm_sidr_rep
);
2877 static void cm_format_sidr_rep_event(struct cm_work
*work
)
2879 struct cm_sidr_rep_msg
*sidr_rep_msg
;
2880 struct ib_cm_sidr_rep_event_param
*param
;
2882 sidr_rep_msg
= (struct cm_sidr_rep_msg
*)
2883 work
->mad_recv_wc
->recv_buf
.mad
;
2884 param
= &work
->cm_event
.param
.sidr_rep_rcvd
;
2885 param
->status
= sidr_rep_msg
->status
;
2886 param
->qkey
= be32_to_cpu(sidr_rep_msg
->qkey
);
2887 param
->qpn
= be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg
));
2888 param
->info
= &sidr_rep_msg
->info
;
2889 param
->info_len
= sidr_rep_msg
->info_length
;
2890 work
->cm_event
.private_data
= &sidr_rep_msg
->private_data
;
2893 static int cm_sidr_rep_handler(struct cm_work
*work
)
2895 struct cm_sidr_rep_msg
*sidr_rep_msg
;
2896 struct cm_id_private
*cm_id_priv
;
2897 unsigned long flags
;
2899 sidr_rep_msg
= (struct cm_sidr_rep_msg
*)
2900 work
->mad_recv_wc
->recv_buf
.mad
;
2901 cm_id_priv
= cm_acquire_id(sidr_rep_msg
->request_id
, 0);
2903 return -EINVAL
; /* Unmatched reply. */
2905 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2906 if (cm_id_priv
->id
.state
!= IB_CM_SIDR_REQ_SENT
) {
2907 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2910 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2911 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2912 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2914 cm_format_sidr_rep_event(work
);
2915 cm_process_work(cm_id_priv
, work
);
2918 cm_deref_id(cm_id_priv
);
2922 static void cm_process_send_error(struct ib_mad_send_buf
*msg
,
2923 enum ib_wc_status wc_status
)
2925 struct cm_id_private
*cm_id_priv
;
2926 struct ib_cm_event cm_event
;
2927 enum ib_cm_state state
;
2928 unsigned long flags
;
2931 memset(&cm_event
, 0, sizeof cm_event
);
2932 cm_id_priv
= msg
->context
[0];
2934 /* Discard old sends or ones without a response. */
2935 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2936 state
= (enum ib_cm_state
) (unsigned long) msg
->context
[1];
2937 if (msg
!= cm_id_priv
->msg
|| state
!= cm_id_priv
->id
.state
)
2941 case IB_CM_REQ_SENT
:
2942 case IB_CM_MRA_REQ_RCVD
:
2943 cm_reset_to_idle(cm_id_priv
);
2944 cm_event
.event
= IB_CM_REQ_ERROR
;
2946 case IB_CM_REP_SENT
:
2947 case IB_CM_MRA_REP_RCVD
:
2948 cm_reset_to_idle(cm_id_priv
);
2949 cm_event
.event
= IB_CM_REP_ERROR
;
2951 case IB_CM_DREQ_SENT
:
2952 cm_enter_timewait(cm_id_priv
);
2953 cm_event
.event
= IB_CM_DREQ_ERROR
;
2955 case IB_CM_SIDR_REQ_SENT
:
2956 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2957 cm_event
.event
= IB_CM_SIDR_REQ_ERROR
;
2962 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2963 cm_event
.param
.send_status
= wc_status
;
2965 /* No other events can occur on the cm_id at this point. */
2966 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, &cm_event
);
2969 ib_destroy_cm_id(&cm_id_priv
->id
);
2972 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2976 static void cm_send_handler(struct ib_mad_agent
*mad_agent
,
2977 struct ib_mad_send_wc
*mad_send_wc
)
2979 struct ib_mad_send_buf
*msg
= mad_send_wc
->send_buf
;
2981 switch (mad_send_wc
->status
) {
2983 case IB_WC_WR_FLUSH_ERR
:
2987 if (msg
->context
[0] && msg
->context
[1])
2988 cm_process_send_error(msg
, mad_send_wc
->status
);
2995 static void cm_work_handler(struct work_struct
*_work
)
2997 struct cm_work
*work
= container_of(_work
, struct cm_work
, work
.work
);
3000 switch (work
->cm_event
.event
) {
3001 case IB_CM_REQ_RECEIVED
:
3002 ret
= cm_req_handler(work
);
3004 case IB_CM_MRA_RECEIVED
:
3005 ret
= cm_mra_handler(work
);
3007 case IB_CM_REJ_RECEIVED
:
3008 ret
= cm_rej_handler(work
);
3010 case IB_CM_REP_RECEIVED
:
3011 ret
= cm_rep_handler(work
);
3013 case IB_CM_RTU_RECEIVED
:
3014 ret
= cm_rtu_handler(work
);
3016 case IB_CM_USER_ESTABLISHED
:
3017 ret
= cm_establish_handler(work
);
3019 case IB_CM_DREQ_RECEIVED
:
3020 ret
= cm_dreq_handler(work
);
3022 case IB_CM_DREP_RECEIVED
:
3023 ret
= cm_drep_handler(work
);
3025 case IB_CM_SIDR_REQ_RECEIVED
:
3026 ret
= cm_sidr_req_handler(work
);
3028 case IB_CM_SIDR_REP_RECEIVED
:
3029 ret
= cm_sidr_rep_handler(work
);
3031 case IB_CM_LAP_RECEIVED
:
3032 ret
= cm_lap_handler(work
);
3034 case IB_CM_APR_RECEIVED
:
3035 ret
= cm_apr_handler(work
);
3037 case IB_CM_TIMEWAIT_EXIT
:
3038 ret
= cm_timewait_handler(work
);
3048 static int cm_establish(struct ib_cm_id
*cm_id
)
3050 struct cm_id_private
*cm_id_priv
;
3051 struct cm_work
*work
;
3052 unsigned long flags
;
3055 work
= kmalloc(sizeof *work
, GFP_ATOMIC
);
3059 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
3060 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3061 switch (cm_id
->state
)
3063 case IB_CM_REP_SENT
:
3064 case IB_CM_MRA_REP_RCVD
:
3065 cm_id
->state
= IB_CM_ESTABLISHED
;
3067 case IB_CM_ESTABLISHED
:
3074 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3082 * The CM worker thread may try to destroy the cm_id before it
3083 * can execute this work item. To prevent potential deadlock,
3084 * we need to find the cm_id once we're in the context of the
3085 * worker thread, rather than holding a reference on it.
3087 INIT_DELAYED_WORK(&work
->work
, cm_work_handler
);
3088 work
->local_id
= cm_id
->local_id
;
3089 work
->remote_id
= cm_id
->remote_id
;
3090 work
->mad_recv_wc
= NULL
;
3091 work
->cm_event
.event
= IB_CM_USER_ESTABLISHED
;
3092 queue_delayed_work(cm
.wq
, &work
->work
, 0);
3097 static int cm_migrate(struct ib_cm_id
*cm_id
)
3099 struct cm_id_private
*cm_id_priv
;
3100 unsigned long flags
;
3103 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
3104 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3105 if (cm_id
->state
== IB_CM_ESTABLISHED
&&
3106 (cm_id
->lap_state
== IB_CM_LAP_UNINIT
||
3107 cm_id
->lap_state
== IB_CM_LAP_IDLE
)) {
3108 cm_id
->lap_state
= IB_CM_LAP_IDLE
;
3109 cm_id_priv
->av
= cm_id_priv
->alt_av
;
3112 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3117 int ib_cm_notify(struct ib_cm_id
*cm_id
, enum ib_event_type event
)
3122 case IB_EVENT_COMM_EST
:
3123 ret
= cm_establish(cm_id
);
3125 case IB_EVENT_PATH_MIG
:
3126 ret
= cm_migrate(cm_id
);
3133 EXPORT_SYMBOL(ib_cm_notify
);
3135 static void cm_recv_handler(struct ib_mad_agent
*mad_agent
,
3136 struct ib_mad_recv_wc
*mad_recv_wc
)
3138 struct cm_work
*work
;
3139 enum ib_cm_event_type event
;
3142 switch (mad_recv_wc
->recv_buf
.mad
->mad_hdr
.attr_id
) {
3143 case CM_REQ_ATTR_ID
:
3144 paths
= 1 + (((struct cm_req_msg
*) mad_recv_wc
->recv_buf
.mad
)->
3145 alt_local_lid
!= 0);
3146 event
= IB_CM_REQ_RECEIVED
;
3148 case CM_MRA_ATTR_ID
:
3149 event
= IB_CM_MRA_RECEIVED
;
3151 case CM_REJ_ATTR_ID
:
3152 event
= IB_CM_REJ_RECEIVED
;
3154 case CM_REP_ATTR_ID
:
3155 event
= IB_CM_REP_RECEIVED
;
3157 case CM_RTU_ATTR_ID
:
3158 event
= IB_CM_RTU_RECEIVED
;
3160 case CM_DREQ_ATTR_ID
:
3161 event
= IB_CM_DREQ_RECEIVED
;
3163 case CM_DREP_ATTR_ID
:
3164 event
= IB_CM_DREP_RECEIVED
;
3166 case CM_SIDR_REQ_ATTR_ID
:
3167 event
= IB_CM_SIDR_REQ_RECEIVED
;
3169 case CM_SIDR_REP_ATTR_ID
:
3170 event
= IB_CM_SIDR_REP_RECEIVED
;
3172 case CM_LAP_ATTR_ID
:
3174 event
= IB_CM_LAP_RECEIVED
;
3176 case CM_APR_ATTR_ID
:
3177 event
= IB_CM_APR_RECEIVED
;
3180 ib_free_recv_mad(mad_recv_wc
);
3184 work
= kmalloc(sizeof *work
+ sizeof(struct ib_sa_path_rec
) * paths
,
3187 ib_free_recv_mad(mad_recv_wc
);
3191 INIT_DELAYED_WORK(&work
->work
, cm_work_handler
);
3192 work
->cm_event
.event
= event
;
3193 work
->mad_recv_wc
= mad_recv_wc
;
3194 work
->port
= (struct cm_port
*)mad_agent
->context
;
3195 queue_delayed_work(cm
.wq
, &work
->work
, 0);
3198 static int cm_init_qp_init_attr(struct cm_id_private
*cm_id_priv
,
3199 struct ib_qp_attr
*qp_attr
,
3202 unsigned long flags
;
3205 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3206 switch (cm_id_priv
->id
.state
) {
3207 case IB_CM_REQ_SENT
:
3208 case IB_CM_MRA_REQ_RCVD
:
3209 case IB_CM_REQ_RCVD
:
3210 case IB_CM_MRA_REQ_SENT
:
3211 case IB_CM_REP_RCVD
:
3212 case IB_CM_MRA_REP_SENT
:
3213 case IB_CM_REP_SENT
:
3214 case IB_CM_MRA_REP_RCVD
:
3215 case IB_CM_ESTABLISHED
:
3216 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
|
3217 IB_QP_PKEY_INDEX
| IB_QP_PORT
;
3218 qp_attr
->qp_access_flags
= IB_ACCESS_REMOTE_WRITE
;
3219 if (cm_id_priv
->responder_resources
)
3220 qp_attr
->qp_access_flags
|= IB_ACCESS_REMOTE_READ
|
3221 IB_ACCESS_REMOTE_ATOMIC
;
3222 qp_attr
->pkey_index
= cm_id_priv
->av
.pkey_index
;
3223 qp_attr
->port_num
= cm_id_priv
->av
.port
->port_num
;
3230 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3234 static int cm_init_qp_rtr_attr(struct cm_id_private
*cm_id_priv
,
3235 struct ib_qp_attr
*qp_attr
,
3238 unsigned long flags
;
3241 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3242 switch (cm_id_priv
->id
.state
) {
3243 case IB_CM_REQ_RCVD
:
3244 case IB_CM_MRA_REQ_SENT
:
3245 case IB_CM_REP_RCVD
:
3246 case IB_CM_MRA_REP_SENT
:
3247 case IB_CM_REP_SENT
:
3248 case IB_CM_MRA_REP_RCVD
:
3249 case IB_CM_ESTABLISHED
:
3250 *qp_attr_mask
= IB_QP_STATE
| IB_QP_AV
| IB_QP_PATH_MTU
|
3251 IB_QP_DEST_QPN
| IB_QP_RQ_PSN
;
3252 qp_attr
->ah_attr
= cm_id_priv
->av
.ah_attr
;
3253 qp_attr
->path_mtu
= cm_id_priv
->path_mtu
;
3254 qp_attr
->dest_qp_num
= be32_to_cpu(cm_id_priv
->remote_qpn
);
3255 qp_attr
->rq_psn
= be32_to_cpu(cm_id_priv
->rq_psn
);
3256 if (cm_id_priv
->qp_type
== IB_QPT_RC
) {
3257 *qp_attr_mask
|= IB_QP_MAX_DEST_RD_ATOMIC
|
3258 IB_QP_MIN_RNR_TIMER
;
3259 qp_attr
->max_dest_rd_atomic
=
3260 cm_id_priv
->responder_resources
;
3261 qp_attr
->min_rnr_timer
= 0;
3263 if (cm_id_priv
->alt_av
.ah_attr
.dlid
) {
3264 *qp_attr_mask
|= IB_QP_ALT_PATH
;
3265 qp_attr
->alt_port_num
= cm_id_priv
->alt_av
.port
->port_num
;
3266 qp_attr
->alt_pkey_index
= cm_id_priv
->alt_av
.pkey_index
;
3267 qp_attr
->alt_timeout
=
3268 cm_id_priv
->alt_av
.packet_life_time
+ 1;
3269 qp_attr
->alt_ah_attr
= cm_id_priv
->alt_av
.ah_attr
;
3277 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3281 static int cm_init_qp_rts_attr(struct cm_id_private
*cm_id_priv
,
3282 struct ib_qp_attr
*qp_attr
,
3285 unsigned long flags
;
3288 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3289 switch (cm_id_priv
->id
.state
) {
3290 /* Allow transition to RTS before sending REP */
3291 case IB_CM_REQ_RCVD
:
3292 case IB_CM_MRA_REQ_SENT
:
3294 case IB_CM_REP_RCVD
:
3295 case IB_CM_MRA_REP_SENT
:
3296 case IB_CM_REP_SENT
:
3297 case IB_CM_MRA_REP_RCVD
:
3298 case IB_CM_ESTABLISHED
:
3299 if (cm_id_priv
->id
.lap_state
== IB_CM_LAP_UNINIT
) {
3300 *qp_attr_mask
= IB_QP_STATE
| IB_QP_SQ_PSN
;
3301 qp_attr
->sq_psn
= be32_to_cpu(cm_id_priv
->sq_psn
);
3302 if (cm_id_priv
->qp_type
== IB_QPT_RC
) {
3303 *qp_attr_mask
|= IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
|
3305 IB_QP_MAX_QP_RD_ATOMIC
;
3307 cm_id_priv
->av
.packet_life_time
+ 1;
3308 qp_attr
->retry_cnt
= cm_id_priv
->retry_count
;
3309 qp_attr
->rnr_retry
= cm_id_priv
->rnr_retry_count
;
3310 qp_attr
->max_rd_atomic
=
3311 cm_id_priv
->initiator_depth
;
3313 if (cm_id_priv
->alt_av
.ah_attr
.dlid
) {
3314 *qp_attr_mask
|= IB_QP_PATH_MIG_STATE
;
3315 qp_attr
->path_mig_state
= IB_MIG_REARM
;
3318 *qp_attr_mask
= IB_QP_ALT_PATH
| IB_QP_PATH_MIG_STATE
;
3319 qp_attr
->alt_port_num
= cm_id_priv
->alt_av
.port
->port_num
;
3320 qp_attr
->alt_pkey_index
= cm_id_priv
->alt_av
.pkey_index
;
3321 qp_attr
->alt_timeout
=
3322 cm_id_priv
->alt_av
.packet_life_time
+ 1;
3323 qp_attr
->alt_ah_attr
= cm_id_priv
->alt_av
.ah_attr
;
3324 qp_attr
->path_mig_state
= IB_MIG_REARM
;
3332 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3336 int ib_cm_init_qp_attr(struct ib_cm_id
*cm_id
,
3337 struct ib_qp_attr
*qp_attr
,
3340 struct cm_id_private
*cm_id_priv
;
3343 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
3344 switch (qp_attr
->qp_state
) {
3346 ret
= cm_init_qp_init_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3349 ret
= cm_init_qp_rtr_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3352 ret
= cm_init_qp_rts_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3360 EXPORT_SYMBOL(ib_cm_init_qp_attr
);
3362 static void cm_add_one(struct ib_device
*device
)
3364 struct cm_device
*cm_dev
;
3365 struct cm_port
*port
;
3366 struct ib_mad_reg_req reg_req
= {
3367 .mgmt_class
= IB_MGMT_CLASS_CM
,
3368 .mgmt_class_version
= IB_CM_CLASS_VERSION
3370 struct ib_port_modify port_modify
= {
3371 .set_port_cap_mask
= IB_PORT_CM_SUP
3373 unsigned long flags
;
3377 if (rdma_node_get_transport(device
->node_type
) != RDMA_TRANSPORT_IB
)
3380 cm_dev
= kmalloc(sizeof(*cm_dev
) + sizeof(*port
) *
3381 device
->phys_port_cnt
, GFP_KERNEL
);
3385 cm_dev
->device
= device
;
3387 set_bit(IB_MGMT_METHOD_SEND
, reg_req
.method_mask
);
3388 for (i
= 1; i
<= device
->phys_port_cnt
; i
++) {
3389 port
= &cm_dev
->port
[i
-1];
3390 port
->cm_dev
= cm_dev
;
3392 port
->mad_agent
= ib_register_mad_agent(device
, i
,
3399 if (IS_ERR(port
->mad_agent
))
3402 ret
= ib_modify_port(device
, i
, 0, &port_modify
);
3406 ib_set_client_data(device
, &cm_client
, cm_dev
);
3408 write_lock_irqsave(&cm
.device_lock
, flags
);
3409 list_add_tail(&cm_dev
->list
, &cm
.device_list
);
3410 write_unlock_irqrestore(&cm
.device_lock
, flags
);
3414 ib_unregister_mad_agent(port
->mad_agent
);
3416 port_modify
.set_port_cap_mask
= 0;
3417 port_modify
.clr_port_cap_mask
= IB_PORT_CM_SUP
;
3419 port
= &cm_dev
->port
[i
-1];
3420 ib_modify_port(device
, port
->port_num
, 0, &port_modify
);
3421 ib_unregister_mad_agent(port
->mad_agent
);
3426 static void cm_remove_one(struct ib_device
*device
)
3428 struct cm_device
*cm_dev
;
3429 struct cm_port
*port
;
3430 struct ib_port_modify port_modify
= {
3431 .clr_port_cap_mask
= IB_PORT_CM_SUP
3433 unsigned long flags
;
3436 cm_dev
= ib_get_client_data(device
, &cm_client
);
3440 write_lock_irqsave(&cm
.device_lock
, flags
);
3441 list_del(&cm_dev
->list
);
3442 write_unlock_irqrestore(&cm
.device_lock
, flags
);
3444 for (i
= 1; i
<= device
->phys_port_cnt
; i
++) {
3445 port
= &cm_dev
->port
[i
-1];
3446 ib_modify_port(device
, port
->port_num
, 0, &port_modify
);
3447 ib_unregister_mad_agent(port
->mad_agent
);
3452 static int __init
ib_cm_init(void)
3456 memset(&cm
, 0, sizeof cm
);
3457 INIT_LIST_HEAD(&cm
.device_list
);
3458 rwlock_init(&cm
.device_lock
);
3459 spin_lock_init(&cm
.lock
);
3460 cm
.listen_service_table
= RB_ROOT
;
3461 cm
.listen_service_id
= __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID
);
3462 cm
.remote_id_table
= RB_ROOT
;
3463 cm
.remote_qp_table
= RB_ROOT
;
3464 cm
.remote_sidr_table
= RB_ROOT
;
3465 idr_init(&cm
.local_id_table
);
3466 get_random_bytes(&cm
.random_id_operand
, sizeof cm
.random_id_operand
);
3467 idr_pre_get(&cm
.local_id_table
, GFP_KERNEL
);
3468 INIT_LIST_HEAD(&cm
.timewait_list
);
3470 cm
.wq
= create_workqueue("ib_cm");
3474 ret
= ib_register_client(&cm_client
);
3480 destroy_workqueue(cm
.wq
);
3484 static void __exit
ib_cm_cleanup(void)
3486 struct cm_timewait_info
*timewait_info
, *tmp
;
3488 spin_lock_irq(&cm
.lock
);
3489 list_for_each_entry(timewait_info
, &cm
.timewait_list
, list
)
3490 cancel_delayed_work(&timewait_info
->work
.work
);
3491 spin_unlock_irq(&cm
.lock
);
3493 destroy_workqueue(cm
.wq
);
3495 list_for_each_entry_safe(timewait_info
, tmp
, &cm
.timewait_list
, list
) {
3496 list_del(&timewait_info
->list
);
3497 kfree(timewait_info
);
3500 ib_unregister_client(&cm_client
);
3501 idr_destroy(&cm
.local_id_table
);
3504 module_init(ib_cm_init
);
3505 module_exit(ib_cm_cleanup
);