2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $
37 #include <linux/dma-mapping.h>
38 #include <linux/err.h>
39 #include <linux/idr.h>
40 #include <linux/interrupt.h>
41 #include <linux/pci.h>
42 #include <linux/rbtree.h>
43 #include <linux/spinlock.h>
44 #include <linux/workqueue.h>
50 MODULE_AUTHOR("Sean Hefty");
51 MODULE_DESCRIPTION("InfiniBand CM");
52 MODULE_LICENSE("Dual BSD/GPL");
54 static void cm_add_one(struct ib_device
*device
);
55 static void cm_remove_one(struct ib_device
*device
);
57 static struct ib_client cm_client
= {
60 .remove
= cm_remove_one
65 struct list_head device_list
;
67 struct rb_root listen_service_table
;
68 u64 listen_service_id
;
69 /* struct rb_root peer_service_table; todo: fix peer to peer */
70 struct rb_root remote_qp_table
;
71 struct rb_root remote_id_table
;
72 struct rb_root remote_sidr_table
;
73 struct idr local_id_table
;
74 struct workqueue_struct
*wq
;
78 struct cm_device
*cm_dev
;
79 struct ib_mad_agent
*mad_agent
;
84 struct list_head list
;
85 struct ib_device
*device
;
87 struct cm_port port
[0];
93 struct ib_ah_attr ah_attr
;
99 struct work_struct work
;
100 struct list_head list
;
101 struct cm_port
*port
;
102 struct ib_mad_recv_wc
*mad_recv_wc
; /* Received MADs */
103 u32 local_id
; /* Established / timewait */
105 struct ib_cm_event cm_event
;
106 struct ib_sa_path_rec path
[0];
109 struct cm_timewait_info
{
110 struct cm_work work
; /* Must be first. */
111 struct rb_node remote_qp_node
;
112 struct rb_node remote_id_node
;
115 u8 inserted_remote_qp
;
116 u8 inserted_remote_id
;
119 struct cm_id_private
{
122 struct rb_node service_node
;
123 struct rb_node sidr_id_node
;
125 wait_queue_head_t wait
;
128 struct ib_mad_send_buf
*msg
;
129 struct cm_timewait_info
*timewait_info
;
130 /* todo: use alternate port on send failure */
141 enum ib_mtu path_mtu
;
145 u8 responder_resources
;
147 u8 local_ack_timeout
;
152 struct list_head work_list
;
156 static void cm_work_handler(void *data
);
158 static inline void cm_deref_id(struct cm_id_private
*cm_id_priv
)
160 if (atomic_dec_and_test(&cm_id_priv
->refcount
))
161 wake_up(&cm_id_priv
->wait
);
164 static int cm_alloc_msg(struct cm_id_private
*cm_id_priv
,
165 struct ib_mad_send_buf
**msg
)
167 struct ib_mad_agent
*mad_agent
;
168 struct ib_mad_send_buf
*m
;
171 mad_agent
= cm_id_priv
->av
.port
->mad_agent
;
172 ah
= ib_create_ah(mad_agent
->qp
->pd
, &cm_id_priv
->av
.ah_attr
);
176 m
= ib_create_send_mad(mad_agent
, 1, cm_id_priv
->av
.pkey_index
,
177 ah
, 0, sizeof(struct ib_mad_hdr
),
178 sizeof(struct ib_mad
)-sizeof(struct ib_mad_hdr
),
185 /* Timeout set by caller if response is expected. */
186 m
->send_wr
.wr
.ud
.retries
= cm_id_priv
->max_cm_retries
;
188 atomic_inc(&cm_id_priv
->refcount
);
189 m
->context
[0] = cm_id_priv
;
194 static int cm_alloc_response_msg(struct cm_port
*port
,
195 struct ib_mad_recv_wc
*mad_recv_wc
,
196 struct ib_mad_send_buf
**msg
)
198 struct ib_mad_send_buf
*m
;
201 ah
= ib_create_ah_from_wc(port
->mad_agent
->qp
->pd
, mad_recv_wc
->wc
,
202 mad_recv_wc
->recv_buf
.grh
, port
->port_num
);
206 m
= ib_create_send_mad(port
->mad_agent
, 1, mad_recv_wc
->wc
->pkey_index
,
207 ah
, 0, sizeof(struct ib_mad_hdr
),
208 sizeof(struct ib_mad
)-sizeof(struct ib_mad_hdr
),
218 static void cm_free_msg(struct ib_mad_send_buf
*msg
)
220 ib_destroy_ah(msg
->send_wr
.wr
.ud
.ah
);
222 cm_deref_id(msg
->context
[0]);
223 ib_free_send_mad(msg
);
226 static void * cm_copy_private_data(const void *private_data
,
231 if (!private_data
|| !private_data_len
)
234 data
= kmalloc(private_data_len
, GFP_KERNEL
);
236 return ERR_PTR(-ENOMEM
);
238 memcpy(data
, private_data
, private_data_len
);
242 static void cm_set_private_data(struct cm_id_private
*cm_id_priv
,
243 void *private_data
, u8 private_data_len
)
245 if (cm_id_priv
->private_data
&& cm_id_priv
->private_data_len
)
246 kfree(cm_id_priv
->private_data
);
248 cm_id_priv
->private_data
= private_data
;
249 cm_id_priv
->private_data_len
= private_data_len
;
252 static void cm_set_ah_attr(struct ib_ah_attr
*ah_attr
, u8 port_num
,
253 u16 dlid
, u8 sl
, u16 src_path_bits
)
255 memset(ah_attr
, 0, sizeof ah_attr
);
256 ah_attr
->dlid
= be16_to_cpu(dlid
);
258 ah_attr
->src_path_bits
= src_path_bits
;
259 ah_attr
->port_num
= port_num
;
262 static void cm_init_av_for_response(struct cm_port
*port
,
263 struct ib_wc
*wc
, struct cm_av
*av
)
266 av
->pkey_index
= wc
->pkey_index
;
267 cm_set_ah_attr(&av
->ah_attr
, port
->port_num
, cpu_to_be16(wc
->slid
),
268 wc
->sl
, wc
->dlid_path_bits
);
271 static int cm_init_av_by_path(struct ib_sa_path_rec
*path
, struct cm_av
*av
)
273 struct cm_device
*cm_dev
;
274 struct cm_port
*port
= NULL
;
279 read_lock_irqsave(&cm
.device_lock
, flags
);
280 list_for_each_entry(cm_dev
, &cm
.device_list
, list
) {
281 if (!ib_find_cached_gid(cm_dev
->device
, &path
->sgid
,
283 port
= &cm_dev
->port
[p
-1];
287 read_unlock_irqrestore(&cm
.device_lock
, flags
);
292 ret
= ib_find_cached_pkey(cm_dev
->device
, port
->port_num
,
293 be16_to_cpu(path
->pkey
), &av
->pkey_index
);
298 cm_set_ah_attr(&av
->ah_attr
, av
->port
->port_num
, path
->dlid
,
299 path
->sl
, path
->slid
& 0x7F);
300 av
->packet_life_time
= path
->packet_life_time
;
304 static int cm_alloc_id(struct cm_id_private
*cm_id_priv
)
310 spin_lock_irqsave(&cm
.lock
, flags
);
311 ret
= idr_get_new_above(&cm
.local_id_table
, cm_id_priv
, 1,
312 (int *) &cm_id_priv
->id
.local_id
);
313 spin_unlock_irqrestore(&cm
.lock
, flags
);
314 } while( (ret
== -EAGAIN
) && idr_pre_get(&cm
.local_id_table
, GFP_KERNEL
) );
318 static void cm_free_id(u32 local_id
)
322 spin_lock_irqsave(&cm
.lock
, flags
);
323 idr_remove(&cm
.local_id_table
, (int) local_id
);
324 spin_unlock_irqrestore(&cm
.lock
, flags
);
327 static struct cm_id_private
* cm_get_id(u32 local_id
, u32 remote_id
)
329 struct cm_id_private
*cm_id_priv
;
331 cm_id_priv
= idr_find(&cm
.local_id_table
, (int) local_id
);
333 if (cm_id_priv
->id
.remote_id
== remote_id
)
334 atomic_inc(&cm_id_priv
->refcount
);
342 static struct cm_id_private
* cm_acquire_id(u32 local_id
, u32 remote_id
)
344 struct cm_id_private
*cm_id_priv
;
347 spin_lock_irqsave(&cm
.lock
, flags
);
348 cm_id_priv
= cm_get_id(local_id
, remote_id
);
349 spin_unlock_irqrestore(&cm
.lock
, flags
);
354 static struct cm_id_private
* cm_insert_listen(struct cm_id_private
*cm_id_priv
)
356 struct rb_node
**link
= &cm
.listen_service_table
.rb_node
;
357 struct rb_node
*parent
= NULL
;
358 struct cm_id_private
*cur_cm_id_priv
;
359 u64 service_id
= cm_id_priv
->id
.service_id
;
360 u64 service_mask
= cm_id_priv
->id
.service_mask
;
364 cur_cm_id_priv
= rb_entry(parent
, struct cm_id_private
,
366 if ((cur_cm_id_priv
->id
.service_mask
& service_id
) ==
367 (service_mask
& cur_cm_id_priv
->id
.service_id
))
369 if (service_id
< cur_cm_id_priv
->id
.service_id
)
370 link
= &(*link
)->rb_left
;
372 link
= &(*link
)->rb_right
;
374 rb_link_node(&cm_id_priv
->service_node
, parent
, link
);
375 rb_insert_color(&cm_id_priv
->service_node
, &cm
.listen_service_table
);
379 static struct cm_id_private
* cm_find_listen(u64 service_id
)
381 struct rb_node
*node
= cm
.listen_service_table
.rb_node
;
382 struct cm_id_private
*cm_id_priv
;
385 cm_id_priv
= rb_entry(node
, struct cm_id_private
, service_node
);
386 if ((cm_id_priv
->id
.service_mask
& service_id
) ==
387 (cm_id_priv
->id
.service_mask
& cm_id_priv
->id
.service_id
))
389 if (service_id
< cm_id_priv
->id
.service_id
)
390 node
= node
->rb_left
;
392 node
= node
->rb_right
;
397 static struct cm_timewait_info
* cm_insert_remote_id(struct cm_timewait_info
400 struct rb_node
**link
= &cm
.remote_id_table
.rb_node
;
401 struct rb_node
*parent
= NULL
;
402 struct cm_timewait_info
*cur_timewait_info
;
403 u64 remote_ca_guid
= timewait_info
->remote_ca_guid
;
404 u32 remote_id
= timewait_info
->work
.remote_id
;
408 cur_timewait_info
= rb_entry(parent
, struct cm_timewait_info
,
410 if (remote_id
< cur_timewait_info
->work
.remote_id
)
411 link
= &(*link
)->rb_left
;
412 else if (remote_id
> cur_timewait_info
->work
.remote_id
)
413 link
= &(*link
)->rb_right
;
414 else if (remote_ca_guid
< cur_timewait_info
->remote_ca_guid
)
415 link
= &(*link
)->rb_left
;
416 else if (remote_ca_guid
> cur_timewait_info
->remote_ca_guid
)
417 link
= &(*link
)->rb_right
;
419 return cur_timewait_info
;
421 timewait_info
->inserted_remote_id
= 1;
422 rb_link_node(&timewait_info
->remote_id_node
, parent
, link
);
423 rb_insert_color(&timewait_info
->remote_id_node
, &cm
.remote_id_table
);
427 static struct cm_timewait_info
* cm_find_remote_id(u64 remote_ca_guid
,
430 struct rb_node
*node
= cm
.remote_id_table
.rb_node
;
431 struct cm_timewait_info
*timewait_info
;
434 timewait_info
= rb_entry(node
, struct cm_timewait_info
,
436 if (remote_id
< timewait_info
->work
.remote_id
)
437 node
= node
->rb_left
;
438 else if (remote_id
> timewait_info
->work
.remote_id
)
439 node
= node
->rb_right
;
440 else if (remote_ca_guid
< timewait_info
->remote_ca_guid
)
441 node
= node
->rb_left
;
442 else if (remote_ca_guid
> timewait_info
->remote_ca_guid
)
443 node
= node
->rb_right
;
445 return timewait_info
;
450 static struct cm_timewait_info
* cm_insert_remote_qpn(struct cm_timewait_info
453 struct rb_node
**link
= &cm
.remote_qp_table
.rb_node
;
454 struct rb_node
*parent
= NULL
;
455 struct cm_timewait_info
*cur_timewait_info
;
456 u64 remote_ca_guid
= timewait_info
->remote_ca_guid
;
457 u32 remote_qpn
= timewait_info
->remote_qpn
;
461 cur_timewait_info
= rb_entry(parent
, struct cm_timewait_info
,
463 if (remote_qpn
< cur_timewait_info
->remote_qpn
)
464 link
= &(*link
)->rb_left
;
465 else if (remote_qpn
> cur_timewait_info
->remote_qpn
)
466 link
= &(*link
)->rb_right
;
467 else if (remote_ca_guid
< cur_timewait_info
->remote_ca_guid
)
468 link
= &(*link
)->rb_left
;
469 else if (remote_ca_guid
> cur_timewait_info
->remote_ca_guid
)
470 link
= &(*link
)->rb_right
;
472 return cur_timewait_info
;
474 timewait_info
->inserted_remote_qp
= 1;
475 rb_link_node(&timewait_info
->remote_qp_node
, parent
, link
);
476 rb_insert_color(&timewait_info
->remote_qp_node
, &cm
.remote_qp_table
);
480 static struct cm_id_private
* cm_insert_remote_sidr(struct cm_id_private
483 struct rb_node
**link
= &cm
.remote_sidr_table
.rb_node
;
484 struct rb_node
*parent
= NULL
;
485 struct cm_id_private
*cur_cm_id_priv
;
486 union ib_gid
*port_gid
= &cm_id_priv
->av
.dgid
;
487 u32 remote_id
= cm_id_priv
->id
.remote_id
;
491 cur_cm_id_priv
= rb_entry(parent
, struct cm_id_private
,
493 if (remote_id
< cur_cm_id_priv
->id
.remote_id
)
494 link
= &(*link
)->rb_left
;
495 else if (remote_id
> cur_cm_id_priv
->id
.remote_id
)
496 link
= &(*link
)->rb_right
;
499 cmp
= memcmp(port_gid
, &cur_cm_id_priv
->av
.dgid
,
502 link
= &(*link
)->rb_left
;
504 link
= &(*link
)->rb_right
;
506 return cur_cm_id_priv
;
509 rb_link_node(&cm_id_priv
->sidr_id_node
, parent
, link
);
510 rb_insert_color(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
514 static void cm_reject_sidr_req(struct cm_id_private
*cm_id_priv
,
515 enum ib_cm_sidr_status status
)
517 struct ib_cm_sidr_rep_param param
;
519 memset(¶m
, 0, sizeof param
);
520 param
.status
= status
;
521 ib_send_cm_sidr_rep(&cm_id_priv
->id
, ¶m
);
524 struct ib_cm_id
*ib_create_cm_id(ib_cm_handler cm_handler
,
527 struct cm_id_private
*cm_id_priv
;
530 cm_id_priv
= kmalloc(sizeof *cm_id_priv
, GFP_KERNEL
);
532 return ERR_PTR(-ENOMEM
);
534 memset(cm_id_priv
, 0, sizeof *cm_id_priv
);
535 cm_id_priv
->id
.state
= IB_CM_IDLE
;
536 cm_id_priv
->id
.cm_handler
= cm_handler
;
537 cm_id_priv
->id
.context
= context
;
538 ret
= cm_alloc_id(cm_id_priv
);
542 spin_lock_init(&cm_id_priv
->lock
);
543 init_waitqueue_head(&cm_id_priv
->wait
);
544 INIT_LIST_HEAD(&cm_id_priv
->work_list
);
545 atomic_set(&cm_id_priv
->work_count
, -1);
546 atomic_set(&cm_id_priv
->refcount
, 1);
547 return &cm_id_priv
->id
;
551 return ERR_PTR(-ENOMEM
);
553 EXPORT_SYMBOL(ib_create_cm_id
);
555 static struct cm_work
* cm_dequeue_work(struct cm_id_private
*cm_id_priv
)
557 struct cm_work
*work
;
559 if (list_empty(&cm_id_priv
->work_list
))
562 work
= list_entry(cm_id_priv
->work_list
.next
, struct cm_work
, list
);
563 list_del(&work
->list
);
567 static void cm_free_work(struct cm_work
*work
)
569 if (work
->mad_recv_wc
)
570 ib_free_recv_mad(work
->mad_recv_wc
);
574 static inline int cm_convert_to_ms(int iba_time
)
576 /* approximate conversion to ms from 4.096us x 2^iba_time */
577 return 1 << max(iba_time
- 8, 0);
580 static void cm_cleanup_timewait(struct cm_timewait_info
*timewait_info
)
584 if (!timewait_info
->inserted_remote_id
&&
585 !timewait_info
->inserted_remote_qp
)
588 spin_lock_irqsave(&cm
.lock
, flags
);
589 if (timewait_info
->inserted_remote_id
) {
590 rb_erase(&timewait_info
->remote_id_node
, &cm
.remote_id_table
);
591 timewait_info
->inserted_remote_id
= 0;
594 if (timewait_info
->inserted_remote_qp
) {
595 rb_erase(&timewait_info
->remote_qp_node
, &cm
.remote_qp_table
);
596 timewait_info
->inserted_remote_qp
= 0;
598 spin_unlock_irqrestore(&cm
.lock
, flags
);
601 static struct cm_timewait_info
* cm_create_timewait_info(u32 local_id
)
603 struct cm_timewait_info
*timewait_info
;
605 timewait_info
= kmalloc(sizeof *timewait_info
, GFP_KERNEL
);
607 return ERR_PTR(-ENOMEM
);
608 memset(timewait_info
, 0, sizeof *timewait_info
);
610 timewait_info
->work
.local_id
= local_id
;
611 INIT_WORK(&timewait_info
->work
.work
, cm_work_handler
,
612 &timewait_info
->work
);
613 timewait_info
->work
.cm_event
.event
= IB_CM_TIMEWAIT_EXIT
;
614 return timewait_info
;
617 static void cm_enter_timewait(struct cm_id_private
*cm_id_priv
)
622 * The cm_id could be destroyed by the user before we exit timewait.
623 * To protect against this, we search for the cm_id after exiting
624 * timewait before notifying the user that we've exited timewait.
626 cm_id_priv
->id
.state
= IB_CM_TIMEWAIT
;
627 wait_time
= cm_convert_to_ms(cm_id_priv
->local_ack_timeout
);
628 queue_delayed_work(cm
.wq
, &cm_id_priv
->timewait_info
->work
.work
,
629 msecs_to_jiffies(wait_time
));
630 cm_id_priv
->timewait_info
= NULL
;
633 static void cm_reset_to_idle(struct cm_id_private
*cm_id_priv
)
635 cm_id_priv
->id
.state
= IB_CM_IDLE
;
636 if (cm_id_priv
->timewait_info
) {
637 cm_cleanup_timewait(cm_id_priv
->timewait_info
);
638 kfree(cm_id_priv
->timewait_info
);
639 cm_id_priv
->timewait_info
= NULL
;
643 void ib_destroy_cm_id(struct ib_cm_id
*cm_id
)
645 struct cm_id_private
*cm_id_priv
;
646 struct cm_work
*work
;
649 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
651 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
652 switch (cm_id
->state
) {
654 cm_id
->state
= IB_CM_IDLE
;
655 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
656 spin_lock_irqsave(&cm
.lock
, flags
);
657 rb_erase(&cm_id_priv
->service_node
, &cm
.listen_service_table
);
658 spin_unlock_irqrestore(&cm
.lock
, flags
);
660 case IB_CM_SIDR_REQ_SENT
:
661 cm_id
->state
= IB_CM_IDLE
;
662 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
,
663 (unsigned long) cm_id_priv
->msg
);
664 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
666 case IB_CM_SIDR_REQ_RCVD
:
667 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
668 cm_reject_sidr_req(cm_id_priv
, IB_SIDR_REJECT
);
671 case IB_CM_MRA_REQ_RCVD
:
673 case IB_CM_MRA_REP_RCVD
:
674 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
,
675 (unsigned long) cm_id_priv
->msg
);
678 case IB_CM_MRA_REQ_SENT
:
680 case IB_CM_MRA_REP_SENT
:
681 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
682 ib_send_cm_rej(cm_id
, IB_CM_REJ_TIMEOUT
,
683 &cm_id_priv
->av
.port
->cm_dev
->ca_guid
,
684 sizeof cm_id_priv
->av
.port
->cm_dev
->ca_guid
,
687 case IB_CM_ESTABLISHED
:
688 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
689 ib_send_cm_dreq(cm_id
, NULL
, 0);
691 case IB_CM_DREQ_SENT
:
692 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
,
693 (unsigned long) cm_id_priv
->msg
);
694 cm_enter_timewait(cm_id_priv
);
695 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
697 case IB_CM_DREQ_RCVD
:
698 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
699 ib_send_cm_drep(cm_id
, NULL
, 0);
702 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
706 cm_free_id(cm_id
->local_id
);
707 atomic_dec(&cm_id_priv
->refcount
);
708 wait_event(cm_id_priv
->wait
, !atomic_read(&cm_id_priv
->refcount
));
709 while ((work
= cm_dequeue_work(cm_id_priv
)) != NULL
)
711 if (cm_id_priv
->private_data
&& cm_id_priv
->private_data_len
)
712 kfree(cm_id_priv
->private_data
);
715 EXPORT_SYMBOL(ib_destroy_cm_id
);
717 int ib_cm_listen(struct ib_cm_id
*cm_id
,
721 struct cm_id_private
*cm_id_priv
, *cur_cm_id_priv
;
725 service_mask
= service_mask
? service_mask
: ~0ULL;
726 service_id
&= service_mask
;
727 if ((service_id
& IB_SERVICE_ID_AGN_MASK
) == IB_CM_ASSIGN_SERVICE_ID
&&
728 (service_id
!= IB_CM_ASSIGN_SERVICE_ID
))
731 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
732 BUG_ON(cm_id
->state
!= IB_CM_IDLE
);
734 cm_id
->state
= IB_CM_LISTEN
;
736 spin_lock_irqsave(&cm
.lock
, flags
);
737 if (service_id
== IB_CM_ASSIGN_SERVICE_ID
) {
738 cm_id
->service_id
= __cpu_to_be64(cm
.listen_service_id
++);
739 cm_id
->service_mask
= ~0ULL;
741 cm_id
->service_id
= service_id
;
742 cm_id
->service_mask
= service_mask
;
744 cur_cm_id_priv
= cm_insert_listen(cm_id_priv
);
745 spin_unlock_irqrestore(&cm
.lock
, flags
);
747 if (cur_cm_id_priv
) {
748 cm_id
->state
= IB_CM_IDLE
;
753 EXPORT_SYMBOL(ib_cm_listen
);
755 static u64
cm_form_tid(struct cm_id_private
*cm_id_priv
,
756 enum cm_msg_sequence msg_seq
)
760 hi_tid
= ((u64
) cm_id_priv
->av
.port
->mad_agent
->hi_tid
) << 32;
761 low_tid
= (u64
) (cm_id_priv
->id
.local_id
| (msg_seq
<< 30));
762 return cpu_to_be64(hi_tid
| low_tid
);
765 static void cm_format_mad_hdr(struct ib_mad_hdr
*hdr
,
766 enum cm_msg_attr_id attr_id
, u64 tid
)
768 hdr
->base_version
= IB_MGMT_BASE_VERSION
;
769 hdr
->mgmt_class
= IB_MGMT_CLASS_CM
;
770 hdr
->class_version
= IB_CM_CLASS_VERSION
;
771 hdr
->method
= IB_MGMT_METHOD_SEND
;
772 hdr
->attr_id
= attr_id
;
776 static void cm_format_req(struct cm_req_msg
*req_msg
,
777 struct cm_id_private
*cm_id_priv
,
778 struct ib_cm_req_param
*param
)
780 cm_format_mad_hdr(&req_msg
->hdr
, CM_REQ_ATTR_ID
,
781 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_REQ
));
783 req_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
784 req_msg
->service_id
= param
->service_id
;
785 req_msg
->local_ca_guid
= cm_id_priv
->av
.port
->cm_dev
->ca_guid
;
786 cm_req_set_local_qpn(req_msg
, cpu_to_be32(param
->qp_num
));
787 cm_req_set_resp_res(req_msg
, param
->responder_resources
);
788 cm_req_set_init_depth(req_msg
, param
->initiator_depth
);
789 cm_req_set_remote_resp_timeout(req_msg
,
790 param
->remote_cm_response_timeout
);
791 cm_req_set_qp_type(req_msg
, param
->qp_type
);
792 cm_req_set_flow_ctrl(req_msg
, param
->flow_control
);
793 cm_req_set_starting_psn(req_msg
, cpu_to_be32(param
->starting_psn
));
794 cm_req_set_local_resp_timeout(req_msg
,
795 param
->local_cm_response_timeout
);
796 cm_req_set_retry_count(req_msg
, param
->retry_count
);
797 req_msg
->pkey
= param
->primary_path
->pkey
;
798 cm_req_set_path_mtu(req_msg
, param
->primary_path
->mtu
);
799 cm_req_set_rnr_retry_count(req_msg
, param
->rnr_retry_count
);
800 cm_req_set_max_cm_retries(req_msg
, param
->max_cm_retries
);
801 cm_req_set_srq(req_msg
, param
->srq
);
803 req_msg
->primary_local_lid
= param
->primary_path
->slid
;
804 req_msg
->primary_remote_lid
= param
->primary_path
->dlid
;
805 req_msg
->primary_local_gid
= param
->primary_path
->sgid
;
806 req_msg
->primary_remote_gid
= param
->primary_path
->dgid
;
807 cm_req_set_primary_flow_label(req_msg
, param
->primary_path
->flow_label
);
808 cm_req_set_primary_packet_rate(req_msg
, param
->primary_path
->rate
);
809 req_msg
->primary_traffic_class
= param
->primary_path
->traffic_class
;
810 req_msg
->primary_hop_limit
= param
->primary_path
->hop_limit
;
811 cm_req_set_primary_sl(req_msg
, param
->primary_path
->sl
);
812 cm_req_set_primary_subnet_local(req_msg
, 1); /* local only... */
813 cm_req_set_primary_local_ack_timeout(req_msg
,
814 min(31, param
->primary_path
->packet_life_time
+ 1));
816 if (param
->alternate_path
) {
817 req_msg
->alt_local_lid
= param
->alternate_path
->slid
;
818 req_msg
->alt_remote_lid
= param
->alternate_path
->dlid
;
819 req_msg
->alt_local_gid
= param
->alternate_path
->sgid
;
820 req_msg
->alt_remote_gid
= param
->alternate_path
->dgid
;
821 cm_req_set_alt_flow_label(req_msg
,
822 param
->alternate_path
->flow_label
);
823 cm_req_set_alt_packet_rate(req_msg
, param
->alternate_path
->rate
);
824 req_msg
->alt_traffic_class
= param
->alternate_path
->traffic_class
;
825 req_msg
->alt_hop_limit
= param
->alternate_path
->hop_limit
;
826 cm_req_set_alt_sl(req_msg
, param
->alternate_path
->sl
);
827 cm_req_set_alt_subnet_local(req_msg
, 1); /* local only... */
828 cm_req_set_alt_local_ack_timeout(req_msg
,
829 min(31, param
->alternate_path
->packet_life_time
+ 1));
832 if (param
->private_data
&& param
->private_data_len
)
833 memcpy(req_msg
->private_data
, param
->private_data
,
834 param
->private_data_len
);
837 static inline int cm_validate_req_param(struct ib_cm_req_param
*param
)
839 /* peer-to-peer not supported */
840 if (param
->peer_to_peer
)
843 if (!param
->primary_path
)
846 if (param
->qp_type
!= IB_QPT_RC
&& param
->qp_type
!= IB_QPT_UC
)
849 if (param
->private_data
&&
850 param
->private_data_len
> IB_CM_REQ_PRIVATE_DATA_SIZE
)
853 if (param
->alternate_path
&&
854 (param
->alternate_path
->pkey
!= param
->primary_path
->pkey
||
855 param
->alternate_path
->mtu
!= param
->primary_path
->mtu
))
861 int ib_send_cm_req(struct ib_cm_id
*cm_id
,
862 struct ib_cm_req_param
*param
)
864 struct cm_id_private
*cm_id_priv
;
865 struct ib_send_wr
*bad_send_wr
;
866 struct cm_req_msg
*req_msg
;
870 ret
= cm_validate_req_param(param
);
874 /* Verify that we're not in timewait. */
875 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
876 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
877 if (cm_id
->state
!= IB_CM_IDLE
) {
878 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
882 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
884 cm_id_priv
->timewait_info
= cm_create_timewait_info(cm_id_priv
->
886 if (IS_ERR(cm_id_priv
->timewait_info
))
889 ret
= cm_init_av_by_path(param
->primary_path
, &cm_id_priv
->av
);
892 if (param
->alternate_path
) {
893 ret
= cm_init_av_by_path(param
->alternate_path
,
894 &cm_id_priv
->alt_av
);
898 cm_id
->service_id
= param
->service_id
;
899 cm_id
->service_mask
= ~0ULL;
900 cm_id_priv
->timeout_ms
= cm_convert_to_ms(
901 param
->primary_path
->packet_life_time
) * 2 +
903 param
->remote_cm_response_timeout
);
904 cm_id_priv
->max_cm_retries
= param
->max_cm_retries
;
905 cm_id_priv
->initiator_depth
= param
->initiator_depth
;
906 cm_id_priv
->responder_resources
= param
->responder_resources
;
907 cm_id_priv
->retry_count
= param
->retry_count
;
908 cm_id_priv
->path_mtu
= param
->primary_path
->mtu
;
910 ret
= cm_alloc_msg(cm_id_priv
, &cm_id_priv
->msg
);
914 req_msg
= (struct cm_req_msg
*) cm_id_priv
->msg
->mad
;
915 cm_format_req(req_msg
, cm_id_priv
, param
);
916 cm_id_priv
->tid
= req_msg
->hdr
.tid
;
917 cm_id_priv
->msg
->send_wr
.wr
.ud
.timeout_ms
= cm_id_priv
->timeout_ms
;
918 cm_id_priv
->msg
->context
[1] = (void *) (unsigned long) IB_CM_REQ_SENT
;
920 cm_id_priv
->local_qpn
= cm_req_get_local_qpn(req_msg
);
921 cm_id_priv
->rq_psn
= cm_req_get_starting_psn(req_msg
);
922 cm_id_priv
->local_ack_timeout
=
923 cm_req_get_primary_local_ack_timeout(req_msg
);
925 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
926 ret
= ib_post_send_mad(cm_id_priv
->av
.port
->mad_agent
,
927 &cm_id_priv
->msg
->send_wr
, &bad_send_wr
);
929 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
932 BUG_ON(cm_id
->state
!= IB_CM_IDLE
);
933 cm_id
->state
= IB_CM_REQ_SENT
;
934 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
937 error2
: cm_free_msg(cm_id_priv
->msg
);
938 error1
: kfree(cm_id_priv
->timewait_info
);
941 EXPORT_SYMBOL(ib_send_cm_req
);
943 static int cm_issue_rej(struct cm_port
*port
,
944 struct ib_mad_recv_wc
*mad_recv_wc
,
945 enum ib_cm_rej_reason reason
,
946 enum cm_msg_response msg_rejected
,
947 void *ari
, u8 ari_length
)
949 struct ib_mad_send_buf
*msg
= NULL
;
950 struct ib_send_wr
*bad_send_wr
;
951 struct cm_rej_msg
*rej_msg
, *rcv_msg
;
954 ret
= cm_alloc_response_msg(port
, mad_recv_wc
, &msg
);
958 /* We just need common CM header information. Cast to any message. */
959 rcv_msg
= (struct cm_rej_msg
*) mad_recv_wc
->recv_buf
.mad
;
960 rej_msg
= (struct cm_rej_msg
*) msg
->mad
;
962 cm_format_mad_hdr(&rej_msg
->hdr
, CM_REJ_ATTR_ID
, rcv_msg
->hdr
.tid
);
963 rej_msg
->remote_comm_id
= rcv_msg
->local_comm_id
;
964 rej_msg
->local_comm_id
= rcv_msg
->remote_comm_id
;
965 cm_rej_set_msg_rejected(rej_msg
, msg_rejected
);
966 rej_msg
->reason
= reason
;
968 if (ari
&& ari_length
) {
969 cm_rej_set_reject_info_len(rej_msg
, ari_length
);
970 memcpy(rej_msg
->ari
, ari
, ari_length
);
973 ret
= ib_post_send_mad(port
->mad_agent
, &msg
->send_wr
, &bad_send_wr
);
980 static inline int cm_is_active_peer(u64 local_ca_guid
, u64 remote_ca_guid
,
981 u32 local_qpn
, u32 remote_qpn
)
983 return (be64_to_cpu(local_ca_guid
) > be64_to_cpu(remote_ca_guid
) ||
984 ((local_ca_guid
== remote_ca_guid
) &&
985 (be32_to_cpu(local_qpn
) > be32_to_cpu(remote_qpn
))));
988 static inline void cm_format_paths_from_req(struct cm_req_msg
*req_msg
,
989 struct ib_sa_path_rec
*primary_path
,
990 struct ib_sa_path_rec
*alt_path
)
992 memset(primary_path
, 0, sizeof *primary_path
);
993 primary_path
->dgid
= req_msg
->primary_local_gid
;
994 primary_path
->sgid
= req_msg
->primary_remote_gid
;
995 primary_path
->dlid
= req_msg
->primary_local_lid
;
996 primary_path
->slid
= req_msg
->primary_remote_lid
;
997 primary_path
->flow_label
= cm_req_get_primary_flow_label(req_msg
);
998 primary_path
->hop_limit
= req_msg
->primary_hop_limit
;
999 primary_path
->traffic_class
= req_msg
->primary_traffic_class
;
1000 primary_path
->reversible
= 1;
1001 primary_path
->pkey
= req_msg
->pkey
;
1002 primary_path
->sl
= cm_req_get_primary_sl(req_msg
);
1003 primary_path
->mtu_selector
= IB_SA_EQ
;
1004 primary_path
->mtu
= cm_req_get_path_mtu(req_msg
);
1005 primary_path
->rate_selector
= IB_SA_EQ
;
1006 primary_path
->rate
= cm_req_get_primary_packet_rate(req_msg
);
1007 primary_path
->packet_life_time_selector
= IB_SA_EQ
;
1008 primary_path
->packet_life_time
=
1009 cm_req_get_primary_local_ack_timeout(req_msg
);
1010 primary_path
->packet_life_time
-= (primary_path
->packet_life_time
> 0);
1012 if (req_msg
->alt_local_lid
) {
1013 memset(alt_path
, 0, sizeof *alt_path
);
1014 alt_path
->dgid
= req_msg
->alt_local_gid
;
1015 alt_path
->sgid
= req_msg
->alt_remote_gid
;
1016 alt_path
->dlid
= req_msg
->alt_local_lid
;
1017 alt_path
->slid
= req_msg
->alt_remote_lid
;
1018 alt_path
->flow_label
= cm_req_get_alt_flow_label(req_msg
);
1019 alt_path
->hop_limit
= req_msg
->alt_hop_limit
;
1020 alt_path
->traffic_class
= req_msg
->alt_traffic_class
;
1021 alt_path
->reversible
= 1;
1022 alt_path
->pkey
= req_msg
->pkey
;
1023 alt_path
->sl
= cm_req_get_alt_sl(req_msg
);
1024 alt_path
->mtu_selector
= IB_SA_EQ
;
1025 alt_path
->mtu
= cm_req_get_path_mtu(req_msg
);
1026 alt_path
->rate_selector
= IB_SA_EQ
;
1027 alt_path
->rate
= cm_req_get_alt_packet_rate(req_msg
);
1028 alt_path
->packet_life_time_selector
= IB_SA_EQ
;
1029 alt_path
->packet_life_time
=
1030 cm_req_get_alt_local_ack_timeout(req_msg
);
1031 alt_path
->packet_life_time
-= (alt_path
->packet_life_time
> 0);
1035 static void cm_format_req_event(struct cm_work
*work
,
1036 struct cm_id_private
*cm_id_priv
,
1037 struct ib_cm_id
*listen_id
)
1039 struct cm_req_msg
*req_msg
;
1040 struct ib_cm_req_event_param
*param
;
1042 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1043 param
= &work
->cm_event
.param
.req_rcvd
;
1044 param
->listen_id
= listen_id
;
1045 param
->device
= cm_id_priv
->av
.port
->mad_agent
->device
;
1046 param
->port
= cm_id_priv
->av
.port
->port_num
;
1047 param
->primary_path
= &work
->path
[0];
1048 if (req_msg
->alt_local_lid
)
1049 param
->alternate_path
= &work
->path
[1];
1051 param
->alternate_path
= NULL
;
1052 param
->remote_ca_guid
= req_msg
->local_ca_guid
;
1053 param
->remote_qkey
= be32_to_cpu(req_msg
->local_qkey
);
1054 param
->remote_qpn
= be32_to_cpu(cm_req_get_local_qpn(req_msg
));
1055 param
->qp_type
= cm_req_get_qp_type(req_msg
);
1056 param
->starting_psn
= be32_to_cpu(cm_req_get_starting_psn(req_msg
));
1057 param
->responder_resources
= cm_req_get_init_depth(req_msg
);
1058 param
->initiator_depth
= cm_req_get_resp_res(req_msg
);
1059 param
->local_cm_response_timeout
=
1060 cm_req_get_remote_resp_timeout(req_msg
);
1061 param
->flow_control
= cm_req_get_flow_ctrl(req_msg
);
1062 param
->remote_cm_response_timeout
=
1063 cm_req_get_local_resp_timeout(req_msg
);
1064 param
->retry_count
= cm_req_get_retry_count(req_msg
);
1065 param
->rnr_retry_count
= cm_req_get_rnr_retry_count(req_msg
);
1066 param
->srq
= cm_req_get_srq(req_msg
);
1067 work
->cm_event
.private_data
= &req_msg
->private_data
;
1070 static void cm_process_work(struct cm_id_private
*cm_id_priv
,
1071 struct cm_work
*work
)
1073 unsigned long flags
;
1076 /* We will typically only have the current event to report. */
1077 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, &work
->cm_event
);
1080 while (!ret
&& !atomic_add_negative(-1, &cm_id_priv
->work_count
)) {
1081 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1082 work
= cm_dequeue_work(cm_id_priv
);
1083 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1085 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
,
1089 cm_deref_id(cm_id_priv
);
1091 ib_destroy_cm_id(&cm_id_priv
->id
);
1094 static void cm_format_mra(struct cm_mra_msg
*mra_msg
,
1095 struct cm_id_private
*cm_id_priv
,
1096 enum cm_msg_response msg_mraed
, u8 service_timeout
,
1097 const void *private_data
, u8 private_data_len
)
1099 cm_format_mad_hdr(&mra_msg
->hdr
, CM_MRA_ATTR_ID
, cm_id_priv
->tid
);
1100 cm_mra_set_msg_mraed(mra_msg
, msg_mraed
);
1101 mra_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1102 mra_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1103 cm_mra_set_service_timeout(mra_msg
, service_timeout
);
1105 if (private_data
&& private_data_len
)
1106 memcpy(mra_msg
->private_data
, private_data
, private_data_len
);
1109 static void cm_format_rej(struct cm_rej_msg
*rej_msg
,
1110 struct cm_id_private
*cm_id_priv
,
1111 enum ib_cm_rej_reason reason
,
1114 const void *private_data
,
1115 u8 private_data_len
)
1117 cm_format_mad_hdr(&rej_msg
->hdr
, CM_REJ_ATTR_ID
, cm_id_priv
->tid
);
1118 rej_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1120 switch(cm_id_priv
->id
.state
) {
1121 case IB_CM_REQ_RCVD
:
1122 rej_msg
->local_comm_id
= 0;
1123 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REQ
);
1125 case IB_CM_MRA_REQ_SENT
:
1126 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1127 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REQ
);
1129 case IB_CM_REP_RCVD
:
1130 case IB_CM_MRA_REP_SENT
:
1131 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1132 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REP
);
1135 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1136 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_OTHER
);
1140 rej_msg
->reason
= reason
;
1141 if (ari
&& ari_length
) {
1142 cm_rej_set_reject_info_len(rej_msg
, ari_length
);
1143 memcpy(rej_msg
->ari
, ari
, ari_length
);
1146 if (private_data
&& private_data_len
)
1147 memcpy(rej_msg
->private_data
, private_data
, private_data_len
);
1150 static void cm_dup_req_handler(struct cm_work
*work
,
1151 struct cm_id_private
*cm_id_priv
)
1153 struct ib_mad_send_buf
*msg
= NULL
;
1154 struct ib_send_wr
*bad_send_wr
;
1155 unsigned long flags
;
1158 /* Quick state check to discard duplicate REQs. */
1159 if (cm_id_priv
->id
.state
== IB_CM_REQ_RCVD
)
1162 ret
= cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
);
1166 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1167 switch (cm_id_priv
->id
.state
) {
1168 case IB_CM_MRA_REQ_SENT
:
1169 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
1170 CM_MSG_RESPONSE_REQ
, cm_id_priv
->service_timeout
,
1171 cm_id_priv
->private_data
,
1172 cm_id_priv
->private_data_len
);
1174 case IB_CM_TIMEWAIT
:
1175 cm_format_rej((struct cm_rej_msg
*) msg
->mad
, cm_id_priv
,
1176 IB_CM_REJ_STALE_CONN
, NULL
, 0, NULL
, 0);
1181 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1183 ret
= ib_post_send_mad(cm_id_priv
->av
.port
->mad_agent
, &msg
->send_wr
,
1189 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1190 free
: cm_free_msg(msg
);
1193 static struct cm_id_private
* cm_match_req(struct cm_work
*work
,
1194 struct cm_id_private
*cm_id_priv
)
1196 struct cm_id_private
*listen_cm_id_priv
, *cur_cm_id_priv
;
1197 struct cm_timewait_info
*timewait_info
;
1198 struct cm_req_msg
*req_msg
;
1199 unsigned long flags
;
1201 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1203 /* Check for duplicate REQ and stale connections. */
1204 spin_lock_irqsave(&cm
.lock
, flags
);
1205 timewait_info
= cm_insert_remote_id(cm_id_priv
->timewait_info
);
1207 timewait_info
= cm_insert_remote_qpn(cm_id_priv
->timewait_info
);
1209 if (timewait_info
) {
1210 cur_cm_id_priv
= cm_get_id(timewait_info
->work
.local_id
,
1211 timewait_info
->work
.remote_id
);
1212 spin_unlock_irqrestore(&cm
.lock
, flags
);
1213 if (cur_cm_id_priv
) {
1214 cm_dup_req_handler(work
, cur_cm_id_priv
);
1215 cm_deref_id(cur_cm_id_priv
);
1217 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1218 IB_CM_REJ_STALE_CONN
, CM_MSG_RESPONSE_REQ
,
1223 /* Find matching listen request. */
1224 listen_cm_id_priv
= cm_find_listen(req_msg
->service_id
);
1225 if (!listen_cm_id_priv
) {
1226 spin_unlock_irqrestore(&cm
.lock
, flags
);
1227 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1228 IB_CM_REJ_INVALID_SERVICE_ID
, CM_MSG_RESPONSE_REQ
,
1232 atomic_inc(&listen_cm_id_priv
->refcount
);
1233 atomic_inc(&cm_id_priv
->refcount
);
1234 cm_id_priv
->id
.state
= IB_CM_REQ_RCVD
;
1235 atomic_inc(&cm_id_priv
->work_count
);
1236 spin_unlock_irqrestore(&cm
.lock
, flags
);
1237 return listen_cm_id_priv
;
1239 error
: cm_cleanup_timewait(cm_id_priv
->timewait_info
);
1243 static int cm_req_handler(struct cm_work
*work
)
1245 struct ib_cm_id
*cm_id
;
1246 struct cm_id_private
*cm_id_priv
, *listen_cm_id_priv
;
1247 struct cm_req_msg
*req_msg
;
1250 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1252 cm_id
= ib_create_cm_id(NULL
, NULL
);
1254 return PTR_ERR(cm_id
);
1256 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1257 cm_id_priv
->id
.remote_id
= req_msg
->local_comm_id
;
1258 cm_init_av_for_response(work
->port
, work
->mad_recv_wc
->wc
,
1260 cm_id_priv
->timewait_info
= cm_create_timewait_info(cm_id_priv
->
1262 if (IS_ERR(cm_id_priv
->timewait_info
)) {
1263 ret
= PTR_ERR(cm_id_priv
->timewait_info
);
1266 cm_id_priv
->timewait_info
->work
.remote_id
= req_msg
->local_comm_id
;
1267 cm_id_priv
->timewait_info
->remote_ca_guid
= req_msg
->local_ca_guid
;
1268 cm_id_priv
->timewait_info
->remote_qpn
= cm_req_get_local_qpn(req_msg
);
1270 listen_cm_id_priv
= cm_match_req(work
, cm_id_priv
);
1271 if (!listen_cm_id_priv
) {
1276 cm_id_priv
->id
.cm_handler
= listen_cm_id_priv
->id
.cm_handler
;
1277 cm_id_priv
->id
.context
= listen_cm_id_priv
->id
.context
;
1278 cm_id_priv
->id
.service_id
= req_msg
->service_id
;
1279 cm_id_priv
->id
.service_mask
= ~0ULL;
1281 cm_format_paths_from_req(req_msg
, &work
->path
[0], &work
->path
[1]);
1282 ret
= cm_init_av_by_path(&work
->path
[0], &cm_id_priv
->av
);
1285 if (req_msg
->alt_local_lid
) {
1286 ret
= cm_init_av_by_path(&work
->path
[1], &cm_id_priv
->alt_av
);
1290 cm_id_priv
->tid
= req_msg
->hdr
.tid
;
1291 cm_id_priv
->timeout_ms
= cm_convert_to_ms(
1292 cm_req_get_local_resp_timeout(req_msg
));
1293 cm_id_priv
->max_cm_retries
= cm_req_get_max_cm_retries(req_msg
);
1294 cm_id_priv
->remote_qpn
= cm_req_get_local_qpn(req_msg
);
1295 cm_id_priv
->initiator_depth
= cm_req_get_resp_res(req_msg
);
1296 cm_id_priv
->responder_resources
= cm_req_get_init_depth(req_msg
);
1297 cm_id_priv
->path_mtu
= cm_req_get_path_mtu(req_msg
);
1298 cm_id_priv
->sq_psn
= cm_req_get_starting_psn(req_msg
);
1299 cm_id_priv
->local_ack_timeout
=
1300 cm_req_get_primary_local_ack_timeout(req_msg
);
1301 cm_id_priv
->retry_count
= cm_req_get_retry_count(req_msg
);
1302 cm_id_priv
->rnr_retry_count
= cm_req_get_rnr_retry_count(req_msg
);
1304 cm_format_req_event(work
, cm_id_priv
, &listen_cm_id_priv
->id
);
1305 cm_process_work(cm_id_priv
, work
);
1306 cm_deref_id(listen_cm_id_priv
);
1309 error3
: atomic_dec(&cm_id_priv
->refcount
);
1310 cm_deref_id(listen_cm_id_priv
);
1311 cm_cleanup_timewait(cm_id_priv
->timewait_info
);
1312 error2
: kfree(cm_id_priv
->timewait_info
);
1313 error1
: ib_destroy_cm_id(&cm_id_priv
->id
);
1317 static void cm_format_rep(struct cm_rep_msg
*rep_msg
,
1318 struct cm_id_private
*cm_id_priv
,
1319 struct ib_cm_rep_param
*param
)
1321 cm_format_mad_hdr(&rep_msg
->hdr
, CM_REP_ATTR_ID
, cm_id_priv
->tid
);
1322 rep_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1323 rep_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1324 cm_rep_set_local_qpn(rep_msg
, cpu_to_be32(param
->qp_num
));
1325 cm_rep_set_starting_psn(rep_msg
, cpu_to_be32(param
->starting_psn
));
1326 rep_msg
->resp_resources
= param
->responder_resources
;
1327 rep_msg
->initiator_depth
= param
->initiator_depth
;
1328 cm_rep_set_target_ack_delay(rep_msg
, param
->target_ack_delay
);
1329 cm_rep_set_failover(rep_msg
, param
->failover_accepted
);
1330 cm_rep_set_flow_ctrl(rep_msg
, param
->flow_control
);
1331 cm_rep_set_rnr_retry_count(rep_msg
, param
->rnr_retry_count
);
1332 cm_rep_set_srq(rep_msg
, param
->srq
);
1333 rep_msg
->local_ca_guid
= cm_id_priv
->av
.port
->cm_dev
->ca_guid
;
1335 if (param
->private_data
&& param
->private_data_len
)
1336 memcpy(rep_msg
->private_data
, param
->private_data
,
1337 param
->private_data_len
);
1340 int ib_send_cm_rep(struct ib_cm_id
*cm_id
,
1341 struct ib_cm_rep_param
*param
)
1343 struct cm_id_private
*cm_id_priv
;
1344 struct ib_mad_send_buf
*msg
;
1345 struct cm_rep_msg
*rep_msg
;
1346 struct ib_send_wr
*bad_send_wr
;
1347 unsigned long flags
;
1350 if (param
->private_data
&&
1351 param
->private_data_len
> IB_CM_REP_PRIVATE_DATA_SIZE
)
1354 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1355 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1356 if (cm_id
->state
!= IB_CM_REQ_RCVD
&&
1357 cm_id
->state
!= IB_CM_MRA_REQ_SENT
) {
1362 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1366 rep_msg
= (struct cm_rep_msg
*) msg
->mad
;
1367 cm_format_rep(rep_msg
, cm_id_priv
, param
);
1368 msg
->send_wr
.wr
.ud
.timeout_ms
= cm_id_priv
->timeout_ms
;
1369 msg
->context
[1] = (void *) (unsigned long) IB_CM_REP_SENT
;
1371 ret
= ib_post_send_mad(cm_id_priv
->av
.port
->mad_agent
,
1372 &msg
->send_wr
, &bad_send_wr
);
1374 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1379 cm_id
->state
= IB_CM_REP_SENT
;
1380 cm_id_priv
->msg
= msg
;
1381 cm_id_priv
->initiator_depth
= param
->initiator_depth
;
1382 cm_id_priv
->responder_resources
= param
->responder_resources
;
1383 cm_id_priv
->rq_psn
= cm_rep_get_starting_psn(rep_msg
);
1384 cm_id_priv
->local_qpn
= cm_rep_get_local_qpn(rep_msg
);
1386 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1389 EXPORT_SYMBOL(ib_send_cm_rep
);
1391 static void cm_format_rtu(struct cm_rtu_msg
*rtu_msg
,
1392 struct cm_id_private
*cm_id_priv
,
1393 const void *private_data
,
1394 u8 private_data_len
)
1396 cm_format_mad_hdr(&rtu_msg
->hdr
, CM_RTU_ATTR_ID
, cm_id_priv
->tid
);
1397 rtu_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1398 rtu_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1400 if (private_data
&& private_data_len
)
1401 memcpy(rtu_msg
->private_data
, private_data
, private_data_len
);
1404 int ib_send_cm_rtu(struct ib_cm_id
*cm_id
,
1405 const void *private_data
,
1406 u8 private_data_len
)
1408 struct cm_id_private
*cm_id_priv
;
1409 struct ib_mad_send_buf
*msg
;
1410 struct ib_send_wr
*bad_send_wr
;
1411 unsigned long flags
;
1415 if (private_data
&& private_data_len
> IB_CM_RTU_PRIVATE_DATA_SIZE
)
1418 data
= cm_copy_private_data(private_data
, private_data_len
);
1420 return PTR_ERR(data
);
1422 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1423 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1424 if (cm_id
->state
!= IB_CM_REP_RCVD
&&
1425 cm_id
->state
!= IB_CM_MRA_REP_SENT
) {
1430 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1434 cm_format_rtu((struct cm_rtu_msg
*) msg
->mad
, cm_id_priv
,
1435 private_data
, private_data_len
);
1437 ret
= ib_post_send_mad(cm_id_priv
->av
.port
->mad_agent
,
1438 &msg
->send_wr
, &bad_send_wr
);
1440 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1446 cm_id
->state
= IB_CM_ESTABLISHED
;
1447 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
1448 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1451 error
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1455 EXPORT_SYMBOL(ib_send_cm_rtu
);
1457 static void cm_format_rep_event(struct cm_work
*work
)
1459 struct cm_rep_msg
*rep_msg
;
1460 struct ib_cm_rep_event_param
*param
;
1462 rep_msg
= (struct cm_rep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1463 param
= &work
->cm_event
.param
.rep_rcvd
;
1464 param
->remote_ca_guid
= rep_msg
->local_ca_guid
;
1465 param
->remote_qkey
= be32_to_cpu(rep_msg
->local_qkey
);
1466 param
->remote_qpn
= be32_to_cpu(cm_rep_get_local_qpn(rep_msg
));
1467 param
->starting_psn
= be32_to_cpu(cm_rep_get_starting_psn(rep_msg
));
1468 param
->responder_resources
= rep_msg
->initiator_depth
;
1469 param
->initiator_depth
= rep_msg
->resp_resources
;
1470 param
->target_ack_delay
= cm_rep_get_target_ack_delay(rep_msg
);
1471 param
->failover_accepted
= cm_rep_get_failover(rep_msg
);
1472 param
->flow_control
= cm_rep_get_flow_ctrl(rep_msg
);
1473 param
->rnr_retry_count
= cm_rep_get_rnr_retry_count(rep_msg
);
1474 param
->srq
= cm_rep_get_srq(rep_msg
);
1475 work
->cm_event
.private_data
= &rep_msg
->private_data
;
1478 static void cm_dup_rep_handler(struct cm_work
*work
)
1480 struct cm_id_private
*cm_id_priv
;
1481 struct cm_rep_msg
*rep_msg
;
1482 struct ib_mad_send_buf
*msg
= NULL
;
1483 struct ib_send_wr
*bad_send_wr
;
1484 unsigned long flags
;
1487 rep_msg
= (struct cm_rep_msg
*) work
->mad_recv_wc
->recv_buf
.mad
;
1488 cm_id_priv
= cm_acquire_id(rep_msg
->remote_comm_id
,
1489 rep_msg
->local_comm_id
);
1493 ret
= cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
);
1497 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1498 if (cm_id_priv
->id
.state
== IB_CM_ESTABLISHED
)
1499 cm_format_rtu((struct cm_rtu_msg
*) msg
->mad
, cm_id_priv
,
1500 cm_id_priv
->private_data
,
1501 cm_id_priv
->private_data_len
);
1502 else if (cm_id_priv
->id
.state
== IB_CM_MRA_REP_SENT
)
1503 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
1504 CM_MSG_RESPONSE_REP
, cm_id_priv
->service_timeout
,
1505 cm_id_priv
->private_data
,
1506 cm_id_priv
->private_data_len
);
1509 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1511 ret
= ib_post_send_mad(cm_id_priv
->av
.port
->mad_agent
, &msg
->send_wr
,
1517 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1518 free
: cm_free_msg(msg
);
1519 deref
: cm_deref_id(cm_id_priv
);
1522 static int cm_rep_handler(struct cm_work
*work
)
1524 struct cm_id_private
*cm_id_priv
;
1525 struct cm_rep_msg
*rep_msg
;
1526 unsigned long flags
;
1529 rep_msg
= (struct cm_rep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1530 cm_id_priv
= cm_acquire_id(rep_msg
->remote_comm_id
, 0);
1532 cm_dup_rep_handler(work
);
1536 cm_id_priv
->timewait_info
->work
.remote_id
= rep_msg
->local_comm_id
;
1537 cm_id_priv
->timewait_info
->remote_ca_guid
= rep_msg
->local_ca_guid
;
1538 cm_id_priv
->timewait_info
->remote_qpn
= cm_rep_get_local_qpn(rep_msg
);
1540 spin_lock_irqsave(&cm
.lock
, flags
);
1541 /* Check for duplicate REP. */
1542 if (cm_insert_remote_id(cm_id_priv
->timewait_info
)) {
1543 spin_unlock_irqrestore(&cm
.lock
, flags
);
1547 /* Check for a stale connection. */
1548 if (cm_insert_remote_qpn(cm_id_priv
->timewait_info
)) {
1549 spin_unlock_irqrestore(&cm
.lock
, flags
);
1550 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1551 IB_CM_REJ_STALE_CONN
, CM_MSG_RESPONSE_REP
,
1556 spin_unlock_irqrestore(&cm
.lock
, flags
);
1558 cm_format_rep_event(work
);
1560 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1561 switch (cm_id_priv
->id
.state
) {
1562 case IB_CM_REQ_SENT
:
1563 case IB_CM_MRA_REQ_RCVD
:
1566 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1570 cm_id_priv
->id
.state
= IB_CM_REP_RCVD
;
1571 cm_id_priv
->id
.remote_id
= rep_msg
->local_comm_id
;
1572 cm_id_priv
->remote_qpn
= cm_rep_get_local_qpn(rep_msg
);
1573 cm_id_priv
->initiator_depth
= rep_msg
->resp_resources
;
1574 cm_id_priv
->responder_resources
= rep_msg
->initiator_depth
;
1575 cm_id_priv
->sq_psn
= cm_rep_get_starting_psn(rep_msg
);
1576 cm_id_priv
->rnr_retry_count
= cm_rep_get_rnr_retry_count(rep_msg
);
1578 /* todo: handle peer_to_peer */
1580 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
,
1581 (unsigned long) cm_id_priv
->msg
);
1582 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1584 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1585 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1588 cm_process_work(cm_id_priv
, work
);
1590 cm_deref_id(cm_id_priv
);
1593 error
: cm_cleanup_timewait(cm_id_priv
->timewait_info
);
1594 cm_deref_id(cm_id_priv
);
1598 static int cm_establish_handler(struct cm_work
*work
)
1600 struct cm_id_private
*cm_id_priv
;
1601 unsigned long flags
;
1604 /* See comment in ib_cm_establish about lookup. */
1605 cm_id_priv
= cm_acquire_id(work
->local_id
, work
->remote_id
);
1609 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1610 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
) {
1611 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1615 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
,
1616 (unsigned long) cm_id_priv
->msg
);
1617 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1619 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1620 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1623 cm_process_work(cm_id_priv
, work
);
1625 cm_deref_id(cm_id_priv
);
1628 cm_deref_id(cm_id_priv
);
1632 static int cm_rtu_handler(struct cm_work
*work
)
1634 struct cm_id_private
*cm_id_priv
;
1635 struct cm_rtu_msg
*rtu_msg
;
1636 unsigned long flags
;
1639 rtu_msg
= (struct cm_rtu_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1640 cm_id_priv
= cm_acquire_id(rtu_msg
->remote_comm_id
,
1641 rtu_msg
->local_comm_id
);
1645 work
->cm_event
.private_data
= &rtu_msg
->private_data
;
1647 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1648 if (cm_id_priv
->id
.state
!= IB_CM_REP_SENT
&&
1649 cm_id_priv
->id
.state
!= IB_CM_MRA_REP_RCVD
) {
1650 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1653 cm_id_priv
->id
.state
= IB_CM_ESTABLISHED
;
1655 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
,
1656 (unsigned long) cm_id_priv
->msg
);
1657 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1659 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1660 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1663 cm_process_work(cm_id_priv
, work
);
1665 cm_deref_id(cm_id_priv
);
1668 cm_deref_id(cm_id_priv
);
1672 static void cm_format_dreq(struct cm_dreq_msg
*dreq_msg
,
1673 struct cm_id_private
*cm_id_priv
,
1674 const void *private_data
,
1675 u8 private_data_len
)
1677 cm_format_mad_hdr(&dreq_msg
->hdr
, CM_DREQ_ATTR_ID
,
1678 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_DREQ
));
1679 dreq_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1680 dreq_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1681 cm_dreq_set_remote_qpn(dreq_msg
, cm_id_priv
->remote_qpn
);
1683 if (private_data
&& private_data_len
)
1684 memcpy(dreq_msg
->private_data
, private_data
, private_data_len
);
1687 int ib_send_cm_dreq(struct ib_cm_id
*cm_id
,
1688 const void *private_data
,
1689 u8 private_data_len
)
1691 struct cm_id_private
*cm_id_priv
;
1692 struct ib_mad_send_buf
*msg
;
1693 struct ib_send_wr
*bad_send_wr
;
1694 unsigned long flags
;
1697 if (private_data
&& private_data_len
> IB_CM_DREQ_PRIVATE_DATA_SIZE
)
1700 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1701 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1702 if (cm_id
->state
!= IB_CM_ESTABLISHED
) {
1707 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1709 cm_enter_timewait(cm_id_priv
);
1713 cm_format_dreq((struct cm_dreq_msg
*) msg
->mad
, cm_id_priv
,
1714 private_data
, private_data_len
);
1715 msg
->send_wr
.wr
.ud
.timeout_ms
= cm_id_priv
->timeout_ms
;
1716 msg
->context
[1] = (void *) (unsigned long) IB_CM_DREQ_SENT
;
1718 ret
= ib_post_send_mad(cm_id_priv
->av
.port
->mad_agent
,
1719 &msg
->send_wr
, &bad_send_wr
);
1721 cm_enter_timewait(cm_id_priv
);
1722 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1727 cm_id
->state
= IB_CM_DREQ_SENT
;
1728 cm_id_priv
->msg
= msg
;
1729 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1732 EXPORT_SYMBOL(ib_send_cm_dreq
);
1734 static void cm_format_drep(struct cm_drep_msg
*drep_msg
,
1735 struct cm_id_private
*cm_id_priv
,
1736 const void *private_data
,
1737 u8 private_data_len
)
1739 cm_format_mad_hdr(&drep_msg
->hdr
, CM_DREP_ATTR_ID
, cm_id_priv
->tid
);
1740 drep_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1741 drep_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1743 if (private_data
&& private_data_len
)
1744 memcpy(drep_msg
->private_data
, private_data
, private_data_len
);
1747 int ib_send_cm_drep(struct ib_cm_id
*cm_id
,
1748 const void *private_data
,
1749 u8 private_data_len
)
1751 struct cm_id_private
*cm_id_priv
;
1752 struct ib_mad_send_buf
*msg
;
1753 struct ib_send_wr
*bad_send_wr
;
1754 unsigned long flags
;
1758 if (private_data
&& private_data_len
> IB_CM_DREP_PRIVATE_DATA_SIZE
)
1761 data
= cm_copy_private_data(private_data
, private_data_len
);
1763 return PTR_ERR(data
);
1765 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1766 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1767 if (cm_id
->state
!= IB_CM_DREQ_RCVD
) {
1768 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1773 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
1774 cm_enter_timewait(cm_id_priv
);
1776 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1780 cm_format_drep((struct cm_drep_msg
*) msg
->mad
, cm_id_priv
,
1781 private_data
, private_data_len
);
1783 ret
= ib_post_send_mad(cm_id_priv
->av
.port
->mad_agent
, &msg
->send_wr
,
1786 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1791 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1794 EXPORT_SYMBOL(ib_send_cm_drep
);
1796 static int cm_dreq_handler(struct cm_work
*work
)
1798 struct cm_id_private
*cm_id_priv
;
1799 struct cm_dreq_msg
*dreq_msg
;
1800 struct ib_mad_send_buf
*msg
= NULL
;
1801 struct ib_send_wr
*bad_send_wr
;
1802 unsigned long flags
;
1805 dreq_msg
= (struct cm_dreq_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1806 cm_id_priv
= cm_acquire_id(dreq_msg
->remote_comm_id
,
1807 dreq_msg
->local_comm_id
);
1811 work
->cm_event
.private_data
= &dreq_msg
->private_data
;
1813 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1814 if (cm_id_priv
->local_qpn
!= cm_dreq_get_remote_qpn(dreq_msg
))
1817 switch (cm_id_priv
->id
.state
) {
1818 case IB_CM_REP_SENT
:
1819 case IB_CM_DREQ_SENT
:
1820 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
,
1821 (unsigned long) cm_id_priv
->msg
);
1823 case IB_CM_ESTABLISHED
:
1824 case IB_CM_MRA_REP_RCVD
:
1826 case IB_CM_TIMEWAIT
:
1827 if (cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
))
1830 cm_format_drep((struct cm_drep_msg
*) msg
->mad
, cm_id_priv
,
1831 cm_id_priv
->private_data
,
1832 cm_id_priv
->private_data_len
);
1833 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1835 if (ib_post_send_mad(cm_id_priv
->av
.port
->mad_agent
,
1836 &msg
->send_wr
, &bad_send_wr
))
1842 cm_id_priv
->id
.state
= IB_CM_DREQ_RCVD
;
1843 cm_id_priv
->tid
= dreq_msg
->hdr
.tid
;
1844 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1846 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1847 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1850 cm_process_work(cm_id_priv
, work
);
1852 cm_deref_id(cm_id_priv
);
1855 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1856 deref
: cm_deref_id(cm_id_priv
);
1860 static int cm_drep_handler(struct cm_work
*work
)
1862 struct cm_id_private
*cm_id_priv
;
1863 struct cm_drep_msg
*drep_msg
;
1864 unsigned long flags
;
1867 drep_msg
= (struct cm_drep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1868 cm_id_priv
= cm_acquire_id(drep_msg
->remote_comm_id
,
1869 drep_msg
->local_comm_id
);
1873 work
->cm_event
.private_data
= &drep_msg
->private_data
;
1875 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1876 if (cm_id_priv
->id
.state
!= IB_CM_DREQ_SENT
&&
1877 cm_id_priv
->id
.state
!= IB_CM_DREQ_RCVD
) {
1878 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1881 cm_enter_timewait(cm_id_priv
);
1883 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
,
1884 (unsigned long) cm_id_priv
->msg
);
1885 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1887 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1888 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1891 cm_process_work(cm_id_priv
, work
);
1893 cm_deref_id(cm_id_priv
);
1896 cm_deref_id(cm_id_priv
);
1900 int ib_send_cm_rej(struct ib_cm_id
*cm_id
,
1901 enum ib_cm_rej_reason reason
,
1904 const void *private_data
,
1905 u8 private_data_len
)
1907 struct cm_id_private
*cm_id_priv
;
1908 struct ib_mad_send_buf
*msg
;
1909 struct ib_send_wr
*bad_send_wr
;
1910 unsigned long flags
;
1913 if ((private_data
&& private_data_len
> IB_CM_REJ_PRIVATE_DATA_SIZE
) ||
1914 (ari
&& ari_length
> IB_CM_REJ_ARI_LENGTH
))
1917 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1919 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1920 switch (cm_id
->state
) {
1921 case IB_CM_REQ_SENT
:
1922 case IB_CM_MRA_REQ_RCVD
:
1923 case IB_CM_REQ_RCVD
:
1924 case IB_CM_MRA_REQ_SENT
:
1925 case IB_CM_REP_RCVD
:
1926 case IB_CM_MRA_REP_SENT
:
1927 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1929 cm_format_rej((struct cm_rej_msg
*) msg
->mad
,
1930 cm_id_priv
, reason
, ari
, ari_length
,
1931 private_data
, private_data_len
);
1933 cm_reset_to_idle(cm_id_priv
);
1935 case IB_CM_REP_SENT
:
1936 case IB_CM_MRA_REP_RCVD
:
1937 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1939 cm_format_rej((struct cm_rej_msg
*) msg
->mad
,
1940 cm_id_priv
, reason
, ari
, ari_length
,
1941 private_data
, private_data_len
);
1943 cm_enter_timewait(cm_id_priv
);
1953 ret
= ib_post_send_mad(cm_id_priv
->av
.port
->mad_agent
,
1954 &msg
->send_wr
, &bad_send_wr
);
1958 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1961 EXPORT_SYMBOL(ib_send_cm_rej
);
1963 static void cm_format_rej_event(struct cm_work
*work
)
1965 struct cm_rej_msg
*rej_msg
;
1966 struct ib_cm_rej_event_param
*param
;
1968 rej_msg
= (struct cm_rej_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1969 param
= &work
->cm_event
.param
.rej_rcvd
;
1970 param
->ari
= rej_msg
->ari
;
1971 param
->ari_length
= cm_rej_get_reject_info_len(rej_msg
);
1972 param
->reason
= rej_msg
->reason
;
1973 work
->cm_event
.private_data
= &rej_msg
->private_data
;
1976 static struct cm_id_private
* cm_acquire_rejected_id(struct cm_rej_msg
*rej_msg
)
1978 struct cm_timewait_info
*timewait_info
;
1979 struct cm_id_private
*cm_id_priv
;
1980 unsigned long flags
;
1983 remote_id
= rej_msg
->local_comm_id
;
1985 if (rej_msg
->reason
== IB_CM_REJ_TIMEOUT
) {
1986 spin_lock_irqsave(&cm
.lock
, flags
);
1987 timewait_info
= cm_find_remote_id( *((u64
*) rej_msg
->ari
),
1989 if (!timewait_info
) {
1990 spin_unlock_irqrestore(&cm
.lock
, flags
);
1993 cm_id_priv
= idr_find(&cm
.local_id_table
,
1994 (int) timewait_info
->work
.local_id
);
1996 if (cm_id_priv
->id
.remote_id
== remote_id
)
1997 atomic_inc(&cm_id_priv
->refcount
);
2001 spin_unlock_irqrestore(&cm
.lock
, flags
);
2002 } else if (cm_rej_get_msg_rejected(rej_msg
) == CM_MSG_RESPONSE_REQ
)
2003 cm_id_priv
= cm_acquire_id(rej_msg
->remote_comm_id
, 0);
2005 cm_id_priv
= cm_acquire_id(rej_msg
->remote_comm_id
, remote_id
);
2010 static int cm_rej_handler(struct cm_work
*work
)
2012 struct cm_id_private
*cm_id_priv
;
2013 struct cm_rej_msg
*rej_msg
;
2014 unsigned long flags
;
2017 rej_msg
= (struct cm_rej_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2018 cm_id_priv
= cm_acquire_rejected_id(rej_msg
);
2022 cm_format_rej_event(work
);
2024 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2025 switch (cm_id_priv
->id
.state
) {
2026 case IB_CM_REQ_SENT
:
2027 case IB_CM_MRA_REQ_RCVD
:
2028 case IB_CM_REP_SENT
:
2029 case IB_CM_MRA_REP_RCVD
:
2030 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
,
2031 (unsigned long) cm_id_priv
->msg
);
2033 case IB_CM_REQ_RCVD
:
2034 case IB_CM_MRA_REQ_SENT
:
2035 if (rej_msg
->reason
== IB_CM_REJ_STALE_CONN
)
2036 cm_enter_timewait(cm_id_priv
);
2038 cm_reset_to_idle(cm_id_priv
);
2040 case IB_CM_DREQ_SENT
:
2041 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
,
2042 (unsigned long) cm_id_priv
->msg
);
2044 case IB_CM_REP_RCVD
:
2045 case IB_CM_MRA_REP_SENT
:
2046 case IB_CM_ESTABLISHED
:
2047 cm_enter_timewait(cm_id_priv
);
2050 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2055 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2057 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2058 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2061 cm_process_work(cm_id_priv
, work
);
2063 cm_deref_id(cm_id_priv
);
2066 cm_deref_id(cm_id_priv
);
2070 int ib_send_cm_mra(struct ib_cm_id
*cm_id
,
2072 const void *private_data
,
2073 u8 private_data_len
)
2075 struct cm_id_private
*cm_id_priv
;
2076 struct ib_mad_send_buf
*msg
;
2077 struct ib_send_wr
*bad_send_wr
;
2079 unsigned long flags
;
2082 if (private_data
&& private_data_len
> IB_CM_MRA_PRIVATE_DATA_SIZE
)
2085 data
= cm_copy_private_data(private_data
, private_data_len
);
2087 return PTR_ERR(data
);
2089 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2091 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2092 switch(cm_id_priv
->id
.state
) {
2093 case IB_CM_REQ_RCVD
:
2094 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2098 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2099 CM_MSG_RESPONSE_REQ
, service_timeout
,
2100 private_data
, private_data_len
);
2101 ret
= ib_post_send_mad(cm_id_priv
->av
.port
->mad_agent
,
2102 &msg
->send_wr
, &bad_send_wr
);
2105 cm_id
->state
= IB_CM_MRA_REQ_SENT
;
2107 case IB_CM_REP_RCVD
:
2108 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2112 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2113 CM_MSG_RESPONSE_REP
, service_timeout
,
2114 private_data
, private_data_len
);
2115 ret
= ib_post_send_mad(cm_id_priv
->av
.port
->mad_agent
,
2116 &msg
->send_wr
, &bad_send_wr
);
2119 cm_id
->state
= IB_CM_MRA_REP_SENT
;
2121 case IB_CM_ESTABLISHED
:
2122 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2126 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2127 CM_MSG_RESPONSE_OTHER
, service_timeout
,
2128 private_data
, private_data_len
);
2129 ret
= ib_post_send_mad(cm_id_priv
->av
.port
->mad_agent
,
2130 &msg
->send_wr
, &bad_send_wr
);
2133 cm_id
->lap_state
= IB_CM_MRA_LAP_SENT
;
2139 cm_id_priv
->service_timeout
= service_timeout
;
2140 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
2141 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2144 error1
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2148 error2
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2153 EXPORT_SYMBOL(ib_send_cm_mra
);
2155 static struct cm_id_private
* cm_acquire_mraed_id(struct cm_mra_msg
*mra_msg
)
2157 switch (cm_mra_get_msg_mraed(mra_msg
)) {
2158 case CM_MSG_RESPONSE_REQ
:
2159 return cm_acquire_id(mra_msg
->remote_comm_id
, 0);
2160 case CM_MSG_RESPONSE_REP
:
2161 case CM_MSG_RESPONSE_OTHER
:
2162 return cm_acquire_id(mra_msg
->remote_comm_id
,
2163 mra_msg
->local_comm_id
);
2169 static int cm_mra_handler(struct cm_work
*work
)
2171 struct cm_id_private
*cm_id_priv
;
2172 struct cm_mra_msg
*mra_msg
;
2173 unsigned long flags
;
2176 mra_msg
= (struct cm_mra_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2177 cm_id_priv
= cm_acquire_mraed_id(mra_msg
);
2181 work
->cm_event
.private_data
= &mra_msg
->private_data
;
2182 work
->cm_event
.param
.mra_rcvd
.service_timeout
=
2183 cm_mra_get_service_timeout(mra_msg
);
2184 timeout
= cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg
)) +
2185 cm_convert_to_ms(cm_id_priv
->av
.packet_life_time
);
2187 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2188 switch (cm_id_priv
->id
.state
) {
2189 case IB_CM_REQ_SENT
:
2190 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_REQ
||
2191 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2192 (unsigned long) cm_id_priv
->msg
, timeout
))
2194 cm_id_priv
->id
.state
= IB_CM_MRA_REQ_RCVD
;
2196 case IB_CM_REP_SENT
:
2197 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_REP
||
2198 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2199 (unsigned long) cm_id_priv
->msg
, timeout
))
2201 cm_id_priv
->id
.state
= IB_CM_MRA_REP_RCVD
;
2203 case IB_CM_ESTABLISHED
:
2204 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_OTHER
||
2205 cm_id_priv
->id
.lap_state
!= IB_CM_LAP_SENT
||
2206 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2207 (unsigned long) cm_id_priv
->msg
, timeout
))
2209 cm_id_priv
->id
.lap_state
= IB_CM_MRA_LAP_RCVD
;
2215 cm_id_priv
->msg
->context
[1] = (void *) (unsigned long)
2216 cm_id_priv
->id
.state
;
2217 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2219 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2220 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2223 cm_process_work(cm_id_priv
, work
);
2225 cm_deref_id(cm_id_priv
);
2228 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2229 cm_deref_id(cm_id_priv
);
2233 static void cm_format_lap(struct cm_lap_msg
*lap_msg
,
2234 struct cm_id_private
*cm_id_priv
,
2235 struct ib_sa_path_rec
*alternate_path
,
2236 const void *private_data
,
2237 u8 private_data_len
)
2239 cm_format_mad_hdr(&lap_msg
->hdr
, CM_LAP_ATTR_ID
,
2240 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_LAP
));
2241 lap_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
2242 lap_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
2243 cm_lap_set_remote_qpn(lap_msg
, cm_id_priv
->remote_qpn
);
2244 /* todo: need remote CM response timeout */
2245 cm_lap_set_remote_resp_timeout(lap_msg
, 0x1F);
2246 lap_msg
->alt_local_lid
= alternate_path
->slid
;
2247 lap_msg
->alt_remote_lid
= alternate_path
->dlid
;
2248 lap_msg
->alt_local_gid
= alternate_path
->sgid
;
2249 lap_msg
->alt_remote_gid
= alternate_path
->dgid
;
2250 cm_lap_set_flow_label(lap_msg
, alternate_path
->flow_label
);
2251 cm_lap_set_traffic_class(lap_msg
, alternate_path
->traffic_class
);
2252 lap_msg
->alt_hop_limit
= alternate_path
->hop_limit
;
2253 cm_lap_set_packet_rate(lap_msg
, alternate_path
->rate
);
2254 cm_lap_set_sl(lap_msg
, alternate_path
->sl
);
2255 cm_lap_set_subnet_local(lap_msg
, 1); /* local only... */
2256 cm_lap_set_local_ack_timeout(lap_msg
,
2257 min(31, alternate_path
->packet_life_time
+ 1));
2259 if (private_data
&& private_data_len
)
2260 memcpy(lap_msg
->private_data
, private_data
, private_data_len
);
2263 int ib_send_cm_lap(struct ib_cm_id
*cm_id
,
2264 struct ib_sa_path_rec
*alternate_path
,
2265 const void *private_data
,
2266 u8 private_data_len
)
2268 struct cm_id_private
*cm_id_priv
;
2269 struct ib_mad_send_buf
*msg
;
2270 struct ib_send_wr
*bad_send_wr
;
2271 unsigned long flags
;
2274 if (private_data
&& private_data_len
> IB_CM_LAP_PRIVATE_DATA_SIZE
)
2277 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2278 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2279 if (cm_id
->state
!= IB_CM_ESTABLISHED
||
2280 cm_id
->lap_state
!= IB_CM_LAP_IDLE
) {
2285 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2289 cm_format_lap((struct cm_lap_msg
*) msg
->mad
, cm_id_priv
,
2290 alternate_path
, private_data
, private_data_len
);
2291 msg
->send_wr
.wr
.ud
.timeout_ms
= cm_id_priv
->timeout_ms
;
2292 msg
->context
[1] = (void *) (unsigned long) IB_CM_ESTABLISHED
;
2294 ret
= ib_post_send_mad(cm_id_priv
->av
.port
->mad_agent
,
2295 &msg
->send_wr
, &bad_send_wr
);
2297 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2302 cm_id
->lap_state
= IB_CM_LAP_SENT
;
2303 cm_id_priv
->msg
= msg
;
2305 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2308 EXPORT_SYMBOL(ib_send_cm_lap
);
2310 static void cm_format_path_from_lap(struct ib_sa_path_rec
*path
,
2311 struct cm_lap_msg
*lap_msg
)
2313 memset(path
, 0, sizeof *path
);
2314 path
->dgid
= lap_msg
->alt_local_gid
;
2315 path
->sgid
= lap_msg
->alt_remote_gid
;
2316 path
->dlid
= lap_msg
->alt_local_lid
;
2317 path
->slid
= lap_msg
->alt_remote_lid
;
2318 path
->flow_label
= cm_lap_get_flow_label(lap_msg
);
2319 path
->hop_limit
= lap_msg
->alt_hop_limit
;
2320 path
->traffic_class
= cm_lap_get_traffic_class(lap_msg
);
2321 path
->reversible
= 1;
2322 /* pkey is same as in REQ */
2323 path
->sl
= cm_lap_get_sl(lap_msg
);
2324 path
->mtu_selector
= IB_SA_EQ
;
2325 /* mtu is same as in REQ */
2326 path
->rate_selector
= IB_SA_EQ
;
2327 path
->rate
= cm_lap_get_packet_rate(lap_msg
);
2328 path
->packet_life_time_selector
= IB_SA_EQ
;
2329 path
->packet_life_time
= cm_lap_get_local_ack_timeout(lap_msg
);
2330 path
->packet_life_time
-= (path
->packet_life_time
> 0);
2333 static int cm_lap_handler(struct cm_work
*work
)
2335 struct cm_id_private
*cm_id_priv
;
2336 struct cm_lap_msg
*lap_msg
;
2337 struct ib_cm_lap_event_param
*param
;
2338 struct ib_mad_send_buf
*msg
= NULL
;
2339 struct ib_send_wr
*bad_send_wr
;
2340 unsigned long flags
;
2343 /* todo: verify LAP request and send reject APR if invalid. */
2344 lap_msg
= (struct cm_lap_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2345 cm_id_priv
= cm_acquire_id(lap_msg
->remote_comm_id
,
2346 lap_msg
->local_comm_id
);
2350 param
= &work
->cm_event
.param
.lap_rcvd
;
2351 param
->alternate_path
= &work
->path
[0];
2352 cm_format_path_from_lap(param
->alternate_path
, lap_msg
);
2353 work
->cm_event
.private_data
= &lap_msg
->private_data
;
2355 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2356 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
)
2359 switch (cm_id_priv
->id
.lap_state
) {
2360 case IB_CM_LAP_IDLE
:
2362 case IB_CM_MRA_LAP_SENT
:
2363 if (cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
))
2366 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2367 CM_MSG_RESPONSE_OTHER
,
2368 cm_id_priv
->service_timeout
,
2369 cm_id_priv
->private_data
,
2370 cm_id_priv
->private_data_len
);
2371 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2373 if (ib_post_send_mad(cm_id_priv
->av
.port
->mad_agent
,
2374 &msg
->send_wr
, &bad_send_wr
))
2381 cm_id_priv
->id
.lap_state
= IB_CM_LAP_RCVD
;
2382 cm_id_priv
->tid
= lap_msg
->hdr
.tid
;
2383 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2385 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2386 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2389 cm_process_work(cm_id_priv
, work
);
2391 cm_deref_id(cm_id_priv
);
2394 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2395 deref
: cm_deref_id(cm_id_priv
);
2399 static void cm_format_apr(struct cm_apr_msg
*apr_msg
,
2400 struct cm_id_private
*cm_id_priv
,
2401 enum ib_cm_apr_status status
,
2404 const void *private_data
,
2405 u8 private_data_len
)
2407 cm_format_mad_hdr(&apr_msg
->hdr
, CM_APR_ATTR_ID
, cm_id_priv
->tid
);
2408 apr_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
2409 apr_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
2410 apr_msg
->ap_status
= (u8
) status
;
2412 if (info
&& info_length
) {
2413 apr_msg
->info_length
= info_length
;
2414 memcpy(apr_msg
->info
, info
, info_length
);
2417 if (private_data
&& private_data_len
)
2418 memcpy(apr_msg
->private_data
, private_data
, private_data_len
);
2421 int ib_send_cm_apr(struct ib_cm_id
*cm_id
,
2422 enum ib_cm_apr_status status
,
2425 const void *private_data
,
2426 u8 private_data_len
)
2428 struct cm_id_private
*cm_id_priv
;
2429 struct ib_mad_send_buf
*msg
;
2430 struct ib_send_wr
*bad_send_wr
;
2431 unsigned long flags
;
2434 if ((private_data
&& private_data_len
> IB_CM_APR_PRIVATE_DATA_SIZE
) ||
2435 (info
&& info_length
> IB_CM_APR_INFO_LENGTH
))
2438 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2439 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2440 if (cm_id
->state
!= IB_CM_ESTABLISHED
||
2441 (cm_id
->lap_state
!= IB_CM_LAP_RCVD
&&
2442 cm_id
->lap_state
!= IB_CM_MRA_LAP_SENT
)) {
2447 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2451 cm_format_apr((struct cm_apr_msg
*) msg
->mad
, cm_id_priv
, status
,
2452 info
, info_length
, private_data
, private_data_len
);
2453 ret
= ib_post_send_mad(cm_id_priv
->av
.port
->mad_agent
,
2454 &msg
->send_wr
, &bad_send_wr
);
2456 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2461 cm_id
->lap_state
= IB_CM_LAP_IDLE
;
2462 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2465 EXPORT_SYMBOL(ib_send_cm_apr
);
2467 static int cm_apr_handler(struct cm_work
*work
)
2469 struct cm_id_private
*cm_id_priv
;
2470 struct cm_apr_msg
*apr_msg
;
2471 unsigned long flags
;
2474 apr_msg
= (struct cm_apr_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2475 cm_id_priv
= cm_acquire_id(apr_msg
->remote_comm_id
,
2476 apr_msg
->local_comm_id
);
2478 return -EINVAL
; /* Unmatched reply. */
2480 work
->cm_event
.param
.apr_rcvd
.ap_status
= apr_msg
->ap_status
;
2481 work
->cm_event
.param
.apr_rcvd
.apr_info
= &apr_msg
->info
;
2482 work
->cm_event
.param
.apr_rcvd
.info_len
= apr_msg
->info_length
;
2483 work
->cm_event
.private_data
= &apr_msg
->private_data
;
2485 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2486 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
||
2487 (cm_id_priv
->id
.lap_state
!= IB_CM_LAP_SENT
&&
2488 cm_id_priv
->id
.lap_state
!= IB_CM_MRA_LAP_RCVD
)) {
2489 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2492 cm_id_priv
->id
.lap_state
= IB_CM_LAP_IDLE
;
2493 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
,
2494 (unsigned long) cm_id_priv
->msg
);
2495 cm_id_priv
->msg
= NULL
;
2497 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2499 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2500 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2503 cm_process_work(cm_id_priv
, work
);
2505 cm_deref_id(cm_id_priv
);
2508 cm_deref_id(cm_id_priv
);
2512 static int cm_timewait_handler(struct cm_work
*work
)
2514 struct cm_timewait_info
*timewait_info
;
2515 struct cm_id_private
*cm_id_priv
;
2516 unsigned long flags
;
2519 timewait_info
= (struct cm_timewait_info
*)work
;
2520 cm_cleanup_timewait(timewait_info
);
2522 cm_id_priv
= cm_acquire_id(timewait_info
->work
.local_id
,
2523 timewait_info
->work
.remote_id
);
2527 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2528 if (cm_id_priv
->id
.state
!= IB_CM_TIMEWAIT
||
2529 cm_id_priv
->remote_qpn
!= timewait_info
->remote_qpn
) {
2530 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2533 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2534 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2536 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2537 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2540 cm_process_work(cm_id_priv
, work
);
2542 cm_deref_id(cm_id_priv
);
2545 cm_deref_id(cm_id_priv
);
2549 static void cm_format_sidr_req(struct cm_sidr_req_msg
*sidr_req_msg
,
2550 struct cm_id_private
*cm_id_priv
,
2551 struct ib_cm_sidr_req_param
*param
)
2553 cm_format_mad_hdr(&sidr_req_msg
->hdr
, CM_SIDR_REQ_ATTR_ID
,
2554 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_SIDR
));
2555 sidr_req_msg
->request_id
= cm_id_priv
->id
.local_id
;
2556 sidr_req_msg
->pkey
= param
->pkey
;
2557 sidr_req_msg
->service_id
= param
->service_id
;
2559 if (param
->private_data
&& param
->private_data_len
)
2560 memcpy(sidr_req_msg
->private_data
, param
->private_data
,
2561 param
->private_data_len
);
2564 int ib_send_cm_sidr_req(struct ib_cm_id
*cm_id
,
2565 struct ib_cm_sidr_req_param
*param
)
2567 struct cm_id_private
*cm_id_priv
;
2568 struct ib_mad_send_buf
*msg
;
2569 struct ib_send_wr
*bad_send_wr
;
2570 unsigned long flags
;
2573 if (!param
->path
|| (param
->private_data
&&
2574 param
->private_data_len
> IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
))
2577 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2578 ret
= cm_init_av_by_path(param
->path
, &cm_id_priv
->av
);
2582 cm_id
->service_id
= param
->service_id
;
2583 cm_id
->service_mask
= ~0ULL;
2584 cm_id_priv
->timeout_ms
= param
->timeout_ms
;
2585 cm_id_priv
->max_cm_retries
= param
->max_cm_retries
;
2586 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2590 cm_format_sidr_req((struct cm_sidr_req_msg
*) msg
->mad
, cm_id_priv
,
2592 msg
->send_wr
.wr
.ud
.timeout_ms
= cm_id_priv
->timeout_ms
;
2593 msg
->context
[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT
;
2595 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2596 if (cm_id
->state
== IB_CM_IDLE
)
2597 ret
= ib_post_send_mad(cm_id_priv
->av
.port
->mad_agent
,
2598 &msg
->send_wr
, &bad_send_wr
);
2603 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2607 cm_id
->state
= IB_CM_SIDR_REQ_SENT
;
2608 cm_id_priv
->msg
= msg
;
2609 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2613 EXPORT_SYMBOL(ib_send_cm_sidr_req
);
2615 static void cm_format_sidr_req_event(struct cm_work
*work
,
2616 struct ib_cm_id
*listen_id
)
2618 struct cm_sidr_req_msg
*sidr_req_msg
;
2619 struct ib_cm_sidr_req_event_param
*param
;
2621 sidr_req_msg
= (struct cm_sidr_req_msg
*)
2622 work
->mad_recv_wc
->recv_buf
.mad
;
2623 param
= &work
->cm_event
.param
.sidr_req_rcvd
;
2624 param
->pkey
= sidr_req_msg
->pkey
;
2625 param
->listen_id
= listen_id
;
2626 param
->device
= work
->port
->mad_agent
->device
;
2627 param
->port
= work
->port
->port_num
;
2628 work
->cm_event
.private_data
= &sidr_req_msg
->private_data
;
2631 static int cm_sidr_req_handler(struct cm_work
*work
)
2633 struct ib_cm_id
*cm_id
;
2634 struct cm_id_private
*cm_id_priv
, *cur_cm_id_priv
;
2635 struct cm_sidr_req_msg
*sidr_req_msg
;
2637 unsigned long flags
;
2639 cm_id
= ib_create_cm_id(NULL
, NULL
);
2641 return PTR_ERR(cm_id
);
2642 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2644 /* Record SGID/SLID and request ID for lookup. */
2645 sidr_req_msg
= (struct cm_sidr_req_msg
*)
2646 work
->mad_recv_wc
->recv_buf
.mad
;
2647 wc
= work
->mad_recv_wc
->wc
;
2648 cm_id_priv
->av
.dgid
.global
.subnet_prefix
= wc
->slid
;
2649 cm_id_priv
->av
.dgid
.global
.interface_id
= 0;
2650 cm_init_av_for_response(work
->port
, work
->mad_recv_wc
->wc
,
2652 cm_id_priv
->id
.remote_id
= sidr_req_msg
->request_id
;
2653 cm_id_priv
->id
.state
= IB_CM_SIDR_REQ_RCVD
;
2654 cm_id_priv
->tid
= sidr_req_msg
->hdr
.tid
;
2655 atomic_inc(&cm_id_priv
->work_count
);
2657 spin_lock_irqsave(&cm
.lock
, flags
);
2658 cur_cm_id_priv
= cm_insert_remote_sidr(cm_id_priv
);
2659 if (cur_cm_id_priv
) {
2660 spin_unlock_irqrestore(&cm
.lock
, flags
);
2661 goto out
; /* Duplicate message. */
2663 cur_cm_id_priv
= cm_find_listen(sidr_req_msg
->service_id
);
2664 if (!cur_cm_id_priv
) {
2665 rb_erase(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
2666 spin_unlock_irqrestore(&cm
.lock
, flags
);
2667 /* todo: reply with no match */
2668 goto out
; /* No match. */
2670 atomic_inc(&cur_cm_id_priv
->refcount
);
2671 spin_unlock_irqrestore(&cm
.lock
, flags
);
2673 cm_id_priv
->id
.cm_handler
= cur_cm_id_priv
->id
.cm_handler
;
2674 cm_id_priv
->id
.context
= cur_cm_id_priv
->id
.context
;
2675 cm_id_priv
->id
.service_id
= sidr_req_msg
->service_id
;
2676 cm_id_priv
->id
.service_mask
= ~0ULL;
2678 cm_format_sidr_req_event(work
, &cur_cm_id_priv
->id
);
2679 cm_process_work(cm_id_priv
, work
);
2680 cm_deref_id(cur_cm_id_priv
);
2683 ib_destroy_cm_id(&cm_id_priv
->id
);
2687 static void cm_format_sidr_rep(struct cm_sidr_rep_msg
*sidr_rep_msg
,
2688 struct cm_id_private
*cm_id_priv
,
2689 struct ib_cm_sidr_rep_param
*param
)
2691 cm_format_mad_hdr(&sidr_rep_msg
->hdr
, CM_SIDR_REP_ATTR_ID
,
2693 sidr_rep_msg
->request_id
= cm_id_priv
->id
.remote_id
;
2694 sidr_rep_msg
->status
= param
->status
;
2695 cm_sidr_rep_set_qpn(sidr_rep_msg
, cpu_to_be32(param
->qp_num
));
2696 sidr_rep_msg
->service_id
= cm_id_priv
->id
.service_id
;
2697 sidr_rep_msg
->qkey
= cpu_to_be32(param
->qkey
);
2699 if (param
->info
&& param
->info_length
)
2700 memcpy(sidr_rep_msg
->info
, param
->info
, param
->info_length
);
2702 if (param
->private_data
&& param
->private_data_len
)
2703 memcpy(sidr_rep_msg
->private_data
, param
->private_data
,
2704 param
->private_data_len
);
2707 int ib_send_cm_sidr_rep(struct ib_cm_id
*cm_id
,
2708 struct ib_cm_sidr_rep_param
*param
)
2710 struct cm_id_private
*cm_id_priv
;
2711 struct ib_mad_send_buf
*msg
;
2712 struct ib_send_wr
*bad_send_wr
;
2713 unsigned long flags
;
2716 if ((param
->info
&& param
->info_length
> IB_CM_SIDR_REP_INFO_LENGTH
) ||
2717 (param
->private_data
&&
2718 param
->private_data_len
> IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
))
2721 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2722 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2723 if (cm_id
->state
!= IB_CM_SIDR_REQ_RCVD
) {
2728 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2732 cm_format_sidr_rep((struct cm_sidr_rep_msg
*) msg
->mad
, cm_id_priv
,
2734 ret
= ib_post_send_mad(cm_id_priv
->av
.port
->mad_agent
,
2735 &msg
->send_wr
, &bad_send_wr
);
2737 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2741 cm_id
->state
= IB_CM_IDLE
;
2742 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2744 spin_lock_irqsave(&cm
.lock
, flags
);
2745 rb_erase(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
2746 spin_unlock_irqrestore(&cm
.lock
, flags
);
2749 error
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2752 EXPORT_SYMBOL(ib_send_cm_sidr_rep
);
2754 static void cm_format_sidr_rep_event(struct cm_work
*work
)
2756 struct cm_sidr_rep_msg
*sidr_rep_msg
;
2757 struct ib_cm_sidr_rep_event_param
*param
;
2759 sidr_rep_msg
= (struct cm_sidr_rep_msg
*)
2760 work
->mad_recv_wc
->recv_buf
.mad
;
2761 param
= &work
->cm_event
.param
.sidr_rep_rcvd
;
2762 param
->status
= sidr_rep_msg
->status
;
2763 param
->qkey
= be32_to_cpu(sidr_rep_msg
->qkey
);
2764 param
->qpn
= be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg
));
2765 param
->info
= &sidr_rep_msg
->info
;
2766 param
->info_len
= sidr_rep_msg
->info_length
;
2767 work
->cm_event
.private_data
= &sidr_rep_msg
->private_data
;
2770 static int cm_sidr_rep_handler(struct cm_work
*work
)
2772 struct cm_sidr_rep_msg
*sidr_rep_msg
;
2773 struct cm_id_private
*cm_id_priv
;
2774 unsigned long flags
;
2776 sidr_rep_msg
= (struct cm_sidr_rep_msg
*)
2777 work
->mad_recv_wc
->recv_buf
.mad
;
2778 cm_id_priv
= cm_acquire_id(sidr_rep_msg
->request_id
, 0);
2780 return -EINVAL
; /* Unmatched reply. */
2782 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2783 if (cm_id_priv
->id
.state
!= IB_CM_SIDR_REQ_SENT
) {
2784 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2787 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2788 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
,
2789 (unsigned long) cm_id_priv
->msg
);
2790 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2792 cm_format_sidr_rep_event(work
);
2793 cm_process_work(cm_id_priv
, work
);
2796 cm_deref_id(cm_id_priv
);
2800 static void cm_process_send_error(struct ib_mad_send_buf
*msg
,
2801 enum ib_wc_status wc_status
)
2803 struct cm_id_private
*cm_id_priv
;
2804 struct ib_cm_event cm_event
;
2805 enum ib_cm_state state
;
2806 unsigned long flags
;
2809 memset(&cm_event
, 0, sizeof cm_event
);
2810 cm_id_priv
= msg
->context
[0];
2812 /* Discard old sends or ones without a response. */
2813 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2814 state
= (enum ib_cm_state
) (unsigned long) msg
->context
[1];
2815 if (msg
!= cm_id_priv
->msg
|| state
!= cm_id_priv
->id
.state
)
2819 case IB_CM_REQ_SENT
:
2820 case IB_CM_MRA_REQ_RCVD
:
2821 cm_reset_to_idle(cm_id_priv
);
2822 cm_event
.event
= IB_CM_REQ_ERROR
;
2824 case IB_CM_REP_SENT
:
2825 case IB_CM_MRA_REP_RCVD
:
2826 cm_reset_to_idle(cm_id_priv
);
2827 cm_event
.event
= IB_CM_REP_ERROR
;
2829 case IB_CM_DREQ_SENT
:
2830 cm_enter_timewait(cm_id_priv
);
2831 cm_event
.event
= IB_CM_DREQ_ERROR
;
2833 case IB_CM_SIDR_REQ_SENT
:
2834 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2835 cm_event
.event
= IB_CM_SIDR_REQ_ERROR
;
2840 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2841 cm_event
.param
.send_status
= wc_status
;
2843 /* No other events can occur on the cm_id at this point. */
2844 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, &cm_event
);
2847 ib_destroy_cm_id(&cm_id_priv
->id
);
2850 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2854 static void cm_send_handler(struct ib_mad_agent
*mad_agent
,
2855 struct ib_mad_send_wc
*mad_send_wc
)
2857 struct ib_mad_send_buf
*msg
;
2859 msg
= (struct ib_mad_send_buf
*)(unsigned long)mad_send_wc
->wr_id
;
2861 switch (mad_send_wc
->status
) {
2863 case IB_WC_WR_FLUSH_ERR
:
2867 if (msg
->context
[0] && msg
->context
[1])
2868 cm_process_send_error(msg
, mad_send_wc
->status
);
2875 static void cm_work_handler(void *data
)
2877 struct cm_work
*work
= data
;
2880 switch (work
->cm_event
.event
) {
2881 case IB_CM_REQ_RECEIVED
:
2882 ret
= cm_req_handler(work
);
2884 case IB_CM_MRA_RECEIVED
:
2885 ret
= cm_mra_handler(work
);
2887 case IB_CM_REJ_RECEIVED
:
2888 ret
= cm_rej_handler(work
);
2890 case IB_CM_REP_RECEIVED
:
2891 ret
= cm_rep_handler(work
);
2893 case IB_CM_RTU_RECEIVED
:
2894 ret
= cm_rtu_handler(work
);
2896 case IB_CM_USER_ESTABLISHED
:
2897 ret
= cm_establish_handler(work
);
2899 case IB_CM_DREQ_RECEIVED
:
2900 ret
= cm_dreq_handler(work
);
2902 case IB_CM_DREP_RECEIVED
:
2903 ret
= cm_drep_handler(work
);
2905 case IB_CM_SIDR_REQ_RECEIVED
:
2906 ret
= cm_sidr_req_handler(work
);
2908 case IB_CM_SIDR_REP_RECEIVED
:
2909 ret
= cm_sidr_rep_handler(work
);
2911 case IB_CM_LAP_RECEIVED
:
2912 ret
= cm_lap_handler(work
);
2914 case IB_CM_APR_RECEIVED
:
2915 ret
= cm_apr_handler(work
);
2917 case IB_CM_TIMEWAIT_EXIT
:
2918 ret
= cm_timewait_handler(work
);
2928 int ib_cm_establish(struct ib_cm_id
*cm_id
)
2930 struct cm_id_private
*cm_id_priv
;
2931 struct cm_work
*work
;
2932 unsigned long flags
;
2935 work
= kmalloc(sizeof *work
, GFP_ATOMIC
);
2939 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2940 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2941 switch (cm_id
->state
)
2943 case IB_CM_REP_SENT
:
2944 case IB_CM_MRA_REP_RCVD
:
2945 cm_id
->state
= IB_CM_ESTABLISHED
;
2947 case IB_CM_ESTABLISHED
:
2954 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2962 * The CM worker thread may try to destroy the cm_id before it
2963 * can execute this work item. To prevent potential deadlock,
2964 * we need to find the cm_id once we're in the context of the
2965 * worker thread, rather than holding a reference on it.
2967 INIT_WORK(&work
->work
, cm_work_handler
, work
);
2968 work
->local_id
= cm_id
->local_id
;
2969 work
->remote_id
= cm_id
->remote_id
;
2970 work
->mad_recv_wc
= NULL
;
2971 work
->cm_event
.event
= IB_CM_USER_ESTABLISHED
;
2972 queue_work(cm
.wq
, &work
->work
);
2976 EXPORT_SYMBOL(ib_cm_establish
);
2978 static void cm_recv_handler(struct ib_mad_agent
*mad_agent
,
2979 struct ib_mad_recv_wc
*mad_recv_wc
)
2981 struct cm_work
*work
;
2982 enum ib_cm_event_type event
;
2985 switch (mad_recv_wc
->recv_buf
.mad
->mad_hdr
.attr_id
) {
2986 case CM_REQ_ATTR_ID
:
2987 paths
= 1 + (((struct cm_req_msg
*) mad_recv_wc
->recv_buf
.mad
)->
2988 alt_local_lid
!= 0);
2989 event
= IB_CM_REQ_RECEIVED
;
2991 case CM_MRA_ATTR_ID
:
2992 event
= IB_CM_MRA_RECEIVED
;
2994 case CM_REJ_ATTR_ID
:
2995 event
= IB_CM_REJ_RECEIVED
;
2997 case CM_REP_ATTR_ID
:
2998 event
= IB_CM_REP_RECEIVED
;
3000 case CM_RTU_ATTR_ID
:
3001 event
= IB_CM_RTU_RECEIVED
;
3003 case CM_DREQ_ATTR_ID
:
3004 event
= IB_CM_DREQ_RECEIVED
;
3006 case CM_DREP_ATTR_ID
:
3007 event
= IB_CM_DREP_RECEIVED
;
3009 case CM_SIDR_REQ_ATTR_ID
:
3010 event
= IB_CM_SIDR_REQ_RECEIVED
;
3012 case CM_SIDR_REP_ATTR_ID
:
3013 event
= IB_CM_SIDR_REP_RECEIVED
;
3015 case CM_LAP_ATTR_ID
:
3017 event
= IB_CM_LAP_RECEIVED
;
3019 case CM_APR_ATTR_ID
:
3020 event
= IB_CM_APR_RECEIVED
;
3023 ib_free_recv_mad(mad_recv_wc
);
3027 work
= kmalloc(sizeof *work
+ sizeof(struct ib_sa_path_rec
) * paths
,
3030 ib_free_recv_mad(mad_recv_wc
);
3034 INIT_WORK(&work
->work
, cm_work_handler
, work
);
3035 work
->cm_event
.event
= event
;
3036 work
->mad_recv_wc
= mad_recv_wc
;
3037 work
->port
= (struct cm_port
*)mad_agent
->context
;
3038 queue_work(cm
.wq
, &work
->work
);
3041 static int cm_init_qp_init_attr(struct cm_id_private
*cm_id_priv
,
3042 struct ib_qp_attr
*qp_attr
,
3045 unsigned long flags
;
3048 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3049 switch (cm_id_priv
->id
.state
) {
3050 case IB_CM_REQ_SENT
:
3051 case IB_CM_MRA_REQ_RCVD
:
3052 case IB_CM_REQ_RCVD
:
3053 case IB_CM_MRA_REQ_SENT
:
3054 case IB_CM_REP_RCVD
:
3055 case IB_CM_MRA_REP_SENT
:
3056 case IB_CM_REP_SENT
:
3057 case IB_CM_MRA_REP_RCVD
:
3058 case IB_CM_ESTABLISHED
:
3059 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
|
3060 IB_QP_PKEY_INDEX
| IB_QP_PORT
;
3061 qp_attr
->qp_access_flags
= IB_ACCESS_LOCAL_WRITE
;
3062 if (cm_id_priv
->responder_resources
)
3063 qp_attr
->qp_access_flags
|= IB_ACCESS_REMOTE_WRITE
|
3064 IB_ACCESS_REMOTE_READ
;
3065 qp_attr
->pkey_index
= cm_id_priv
->av
.pkey_index
;
3066 qp_attr
->port_num
= cm_id_priv
->av
.port
->port_num
;
3073 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3077 static int cm_init_qp_rtr_attr(struct cm_id_private
*cm_id_priv
,
3078 struct ib_qp_attr
*qp_attr
,
3081 unsigned long flags
;
3084 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3085 switch (cm_id_priv
->id
.state
) {
3086 case IB_CM_REQ_RCVD
:
3087 case IB_CM_MRA_REQ_SENT
:
3088 case IB_CM_REP_RCVD
:
3089 case IB_CM_MRA_REP_SENT
:
3090 case IB_CM_REP_SENT
:
3091 case IB_CM_MRA_REP_RCVD
:
3092 case IB_CM_ESTABLISHED
:
3093 *qp_attr_mask
= IB_QP_STATE
| IB_QP_AV
| IB_QP_PATH_MTU
|
3094 IB_QP_DEST_QPN
| IB_QP_RQ_PSN
|
3095 IB_QP_MAX_DEST_RD_ATOMIC
| IB_QP_MIN_RNR_TIMER
;
3096 qp_attr
->ah_attr
= cm_id_priv
->av
.ah_attr
;
3097 qp_attr
->path_mtu
= cm_id_priv
->path_mtu
;
3098 qp_attr
->dest_qp_num
= be32_to_cpu(cm_id_priv
->remote_qpn
);
3099 qp_attr
->rq_psn
= be32_to_cpu(cm_id_priv
->rq_psn
);
3100 qp_attr
->max_dest_rd_atomic
= cm_id_priv
->responder_resources
;
3101 qp_attr
->min_rnr_timer
= 0;
3102 if (cm_id_priv
->alt_av
.ah_attr
.dlid
) {
3103 *qp_attr_mask
|= IB_QP_ALT_PATH
;
3104 qp_attr
->alt_ah_attr
= cm_id_priv
->alt_av
.ah_attr
;
3112 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3116 static int cm_init_qp_rts_attr(struct cm_id_private
*cm_id_priv
,
3117 struct ib_qp_attr
*qp_attr
,
3120 unsigned long flags
;
3123 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3124 switch (cm_id_priv
->id
.state
) {
3125 case IB_CM_REP_RCVD
:
3126 case IB_CM_MRA_REP_SENT
:
3127 case IB_CM_REP_SENT
:
3128 case IB_CM_MRA_REP_RCVD
:
3129 case IB_CM_ESTABLISHED
:
3130 *qp_attr_mask
= IB_QP_STATE
| IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
|
3131 IB_QP_RNR_RETRY
| IB_QP_SQ_PSN
|
3132 IB_QP_MAX_QP_RD_ATOMIC
;
3133 qp_attr
->timeout
= cm_id_priv
->local_ack_timeout
;
3134 qp_attr
->retry_cnt
= cm_id_priv
->retry_count
;
3135 qp_attr
->rnr_retry
= cm_id_priv
->rnr_retry_count
;
3136 qp_attr
->sq_psn
= be32_to_cpu(cm_id_priv
->sq_psn
);
3137 qp_attr
->max_rd_atomic
= cm_id_priv
->initiator_depth
;
3138 if (cm_id_priv
->alt_av
.ah_attr
.dlid
) {
3139 *qp_attr_mask
|= IB_QP_PATH_MIG_STATE
;
3140 qp_attr
->path_mig_state
= IB_MIG_REARM
;
3148 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3152 int ib_cm_init_qp_attr(struct ib_cm_id
*cm_id
,
3153 struct ib_qp_attr
*qp_attr
,
3156 struct cm_id_private
*cm_id_priv
;
3159 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
3160 switch (qp_attr
->qp_state
) {
3162 ret
= cm_init_qp_init_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3165 ret
= cm_init_qp_rtr_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3168 ret
= cm_init_qp_rts_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3176 EXPORT_SYMBOL(ib_cm_init_qp_attr
);
3178 static u64
cm_get_ca_guid(struct ib_device
*device
)
3180 struct ib_device_attr
*device_attr
;
3184 device_attr
= kmalloc(sizeof *device_attr
, GFP_KERNEL
);
3188 ret
= ib_query_device(device
, device_attr
);
3189 guid
= ret
? 0 : device_attr
->node_guid
;
3194 static void cm_add_one(struct ib_device
*device
)
3196 struct cm_device
*cm_dev
;
3197 struct cm_port
*port
;
3198 struct ib_mad_reg_req reg_req
= {
3199 .mgmt_class
= IB_MGMT_CLASS_CM
,
3200 .mgmt_class_version
= IB_CM_CLASS_VERSION
3202 struct ib_port_modify port_modify
= {
3203 .set_port_cap_mask
= IB_PORT_CM_SUP
3205 unsigned long flags
;
3209 cm_dev
= kmalloc(sizeof(*cm_dev
) + sizeof(*port
) *
3210 device
->phys_port_cnt
, GFP_KERNEL
);
3214 cm_dev
->device
= device
;
3215 cm_dev
->ca_guid
= cm_get_ca_guid(device
);
3216 if (!cm_dev
->ca_guid
)
3219 set_bit(IB_MGMT_METHOD_SEND
, reg_req
.method_mask
);
3220 for (i
= 1; i
<= device
->phys_port_cnt
; i
++) {
3221 port
= &cm_dev
->port
[i
-1];
3222 port
->cm_dev
= cm_dev
;
3224 port
->mad_agent
= ib_register_mad_agent(device
, i
,
3231 if (IS_ERR(port
->mad_agent
))
3234 ret
= ib_modify_port(device
, i
, 0, &port_modify
);
3238 ib_set_client_data(device
, &cm_client
, cm_dev
);
3240 write_lock_irqsave(&cm
.device_lock
, flags
);
3241 list_add_tail(&cm_dev
->list
, &cm
.device_list
);
3242 write_unlock_irqrestore(&cm
.device_lock
, flags
);
3246 ib_unregister_mad_agent(port
->mad_agent
);
3248 port_modify
.set_port_cap_mask
= 0;
3249 port_modify
.clr_port_cap_mask
= IB_PORT_CM_SUP
;
3251 port
= &cm_dev
->port
[i
-1];
3252 ib_modify_port(device
, port
->port_num
, 0, &port_modify
);
3253 ib_unregister_mad_agent(port
->mad_agent
);
3259 static void cm_remove_one(struct ib_device
*device
)
3261 struct cm_device
*cm_dev
;
3262 struct cm_port
*port
;
3263 struct ib_port_modify port_modify
= {
3264 .clr_port_cap_mask
= IB_PORT_CM_SUP
3266 unsigned long flags
;
3269 cm_dev
= ib_get_client_data(device
, &cm_client
);
3273 write_lock_irqsave(&cm
.device_lock
, flags
);
3274 list_del(&cm_dev
->list
);
3275 write_unlock_irqrestore(&cm
.device_lock
, flags
);
3277 for (i
= 1; i
<= device
->phys_port_cnt
; i
++) {
3278 port
= &cm_dev
->port
[i
-1];
3279 ib_modify_port(device
, port
->port_num
, 0, &port_modify
);
3280 ib_unregister_mad_agent(port
->mad_agent
);
3285 static int __init
ib_cm_init(void)
3289 memset(&cm
, 0, sizeof cm
);
3290 INIT_LIST_HEAD(&cm
.device_list
);
3291 rwlock_init(&cm
.device_lock
);
3292 spin_lock_init(&cm
.lock
);
3293 cm
.listen_service_table
= RB_ROOT
;
3294 cm
.listen_service_id
= __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID
);
3295 cm
.remote_id_table
= RB_ROOT
;
3296 cm
.remote_qp_table
= RB_ROOT
;
3297 cm
.remote_sidr_table
= RB_ROOT
;
3298 idr_init(&cm
.local_id_table
);
3299 idr_pre_get(&cm
.local_id_table
, GFP_KERNEL
);
3301 cm
.wq
= create_workqueue("ib_cm");
3305 ret
= ib_register_client(&cm_client
);
3311 destroy_workqueue(cm
.wq
);
3315 static void __exit
ib_cm_cleanup(void)
3317 flush_workqueue(cm
.wq
);
3318 destroy_workqueue(cm
.wq
);
3319 ib_unregister_client(&cm_client
);
3322 module_init(ib_cm_init
);
3323 module_exit(ib_cm_cleanup
);