2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
7 * Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <linux/dma-mapping.h>
39 #include <linux/err.h>
40 #include <linux/idr.h>
41 #include <linux/interrupt.h>
42 #include <linux/pci.h>
43 #include <linux/rbtree.h>
44 #include <linux/spinlock.h>
45 #include <linux/workqueue.h>
46 #include <linux/completion.h>
48 #include <rdma/iw_cm.h>
49 #include <rdma/ib_addr.h>
53 MODULE_AUTHOR("Tom Tucker");
54 MODULE_DESCRIPTION("iWARP CM");
55 MODULE_LICENSE("Dual BSD/GPL");
57 static struct workqueue_struct
*iwcm_wq
;
59 struct work_struct work
;
60 struct iwcm_id_private
*cm_id
;
61 struct list_head list
;
62 struct iw_cm_event event
;
63 struct list_head free_list
;
67 * The following services provide a mechanism for pre-allocating iwcm_work
68 * elements. The design pre-allocates them based on the cm_id type:
69 * LISTENING IDS: Get enough elements preallocated to handle the
71 * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE
72 * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE
74 * Allocating them in connect and listen avoids having to deal
75 * with allocation failures on the event upcall from the provider (which
76 * is called in the interrupt context).
78 * One exception is when creating the cm_id for incoming connection requests.
79 * There are two cases:
80 * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If
81 * the backlog is exceeded, then no more connection request events will
82 * be processed. cm_event_handler() returns -ENOMEM in this case. Its up
83 * to the provider to reject the connection request.
84 * 2) in the connection request workqueue handler, cm_conn_req_handler().
85 * If work elements cannot be allocated for the new connect request cm_id,
86 * then IWCM will call the provider reject method. This is ok since
87 * cm_conn_req_handler() runs in the workqueue thread context.
90 static struct iwcm_work
*get_work(struct iwcm_id_private
*cm_id_priv
)
92 struct iwcm_work
*work
;
94 if (list_empty(&cm_id_priv
->work_free_list
))
96 work
= list_entry(cm_id_priv
->work_free_list
.next
, struct iwcm_work
,
98 list_del_init(&work
->free_list
);
102 static void put_work(struct iwcm_work
*work
)
104 list_add(&work
->free_list
, &work
->cm_id
->work_free_list
);
107 static void dealloc_work_entries(struct iwcm_id_private
*cm_id_priv
)
109 struct list_head
*e
, *tmp
;
111 list_for_each_safe(e
, tmp
, &cm_id_priv
->work_free_list
)
112 kfree(list_entry(e
, struct iwcm_work
, free_list
));
115 static int alloc_work_entries(struct iwcm_id_private
*cm_id_priv
, int count
)
117 struct iwcm_work
*work
;
119 BUG_ON(!list_empty(&cm_id_priv
->work_free_list
));
121 work
= kmalloc(sizeof(struct iwcm_work
), GFP_KERNEL
);
123 dealloc_work_entries(cm_id_priv
);
126 work
->cm_id
= cm_id_priv
;
127 INIT_LIST_HEAD(&work
->list
);
134 * Save private data from incoming connection requests to
135 * iw_cm_event, so the low level driver doesn't have to. Adjust
136 * the event ptr to point to the local copy.
138 static int copy_private_data(struct iw_cm_event
*event
)
142 p
= kmemdup(event
->private_data
, event
->private_data_len
, GFP_ATOMIC
);
145 event
->private_data
= p
;
149 static void free_cm_id(struct iwcm_id_private
*cm_id_priv
)
151 dealloc_work_entries(cm_id_priv
);
156 * Release a reference on cm_id. If the last reference is being
157 * released, enable the waiting thread (in iw_destroy_cm_id) to
158 * get woken up, and return 1 if a thread is already waiting.
160 static int iwcm_deref_id(struct iwcm_id_private
*cm_id_priv
)
162 BUG_ON(atomic_read(&cm_id_priv
->refcount
)==0);
163 if (atomic_dec_and_test(&cm_id_priv
->refcount
)) {
164 BUG_ON(!list_empty(&cm_id_priv
->work_list
));
165 complete(&cm_id_priv
->destroy_comp
);
172 static void add_ref(struct iw_cm_id
*cm_id
)
174 struct iwcm_id_private
*cm_id_priv
;
175 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
176 atomic_inc(&cm_id_priv
->refcount
);
179 static void rem_ref(struct iw_cm_id
*cm_id
)
181 struct iwcm_id_private
*cm_id_priv
;
182 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
183 if (iwcm_deref_id(cm_id_priv
) &&
184 test_bit(IWCM_F_CALLBACK_DESTROY
, &cm_id_priv
->flags
)) {
185 BUG_ON(!list_empty(&cm_id_priv
->work_list
));
186 free_cm_id(cm_id_priv
);
190 static int cm_event_handler(struct iw_cm_id
*cm_id
, struct iw_cm_event
*event
);
192 struct iw_cm_id
*iw_create_cm_id(struct ib_device
*device
,
193 iw_cm_handler cm_handler
,
196 struct iwcm_id_private
*cm_id_priv
;
198 cm_id_priv
= kzalloc(sizeof(*cm_id_priv
), GFP_KERNEL
);
200 return ERR_PTR(-ENOMEM
);
202 cm_id_priv
->state
= IW_CM_STATE_IDLE
;
203 cm_id_priv
->id
.device
= device
;
204 cm_id_priv
->id
.cm_handler
= cm_handler
;
205 cm_id_priv
->id
.context
= context
;
206 cm_id_priv
->id
.event_handler
= cm_event_handler
;
207 cm_id_priv
->id
.add_ref
= add_ref
;
208 cm_id_priv
->id
.rem_ref
= rem_ref
;
209 spin_lock_init(&cm_id_priv
->lock
);
210 atomic_set(&cm_id_priv
->refcount
, 1);
211 init_waitqueue_head(&cm_id_priv
->connect_wait
);
212 init_completion(&cm_id_priv
->destroy_comp
);
213 INIT_LIST_HEAD(&cm_id_priv
->work_list
);
214 INIT_LIST_HEAD(&cm_id_priv
->work_free_list
);
216 return &cm_id_priv
->id
;
218 EXPORT_SYMBOL(iw_create_cm_id
);
221 static int iwcm_modify_qp_err(struct ib_qp
*qp
)
223 struct ib_qp_attr qp_attr
;
228 qp_attr
.qp_state
= IB_QPS_ERR
;
229 return ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
233 * This is really the RDMAC CLOSING state. It is most similar to the
236 static int iwcm_modify_qp_sqd(struct ib_qp
*qp
)
238 struct ib_qp_attr qp_attr
;
241 qp_attr
.qp_state
= IB_QPS_SQD
;
242 return ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
248 * Block if a passive or active connection is currently being processed. Then
249 * process the event as follows:
250 * - If we are ESTABLISHED, move to CLOSING and modify the QP state
251 * based on the abrupt flag
252 * - If the connection is already in the CLOSING or IDLE state, the peer is
253 * disconnecting concurrently with us and we've already seen the
254 * DISCONNECT event -- ignore the request and return 0
255 * - Disconnect on a listening endpoint returns -EINVAL
257 int iw_cm_disconnect(struct iw_cm_id
*cm_id
, int abrupt
)
259 struct iwcm_id_private
*cm_id_priv
;
262 struct ib_qp
*qp
= NULL
;
264 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
265 /* Wait if we're currently in a connect or accept downcall */
266 wait_event(cm_id_priv
->connect_wait
,
267 !test_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
));
269 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
270 switch (cm_id_priv
->state
) {
271 case IW_CM_STATE_ESTABLISHED
:
272 cm_id_priv
->state
= IW_CM_STATE_CLOSING
;
274 /* QP could be <nul> for user-mode client */
280 case IW_CM_STATE_LISTEN
:
283 case IW_CM_STATE_CLOSING
:
284 /* remote peer closed first */
285 case IW_CM_STATE_IDLE
:
286 /* accept or connect returned !0 */
288 case IW_CM_STATE_CONN_RECV
:
290 * App called disconnect before/without calling accept after
291 * connect_request event delivered.
294 case IW_CM_STATE_CONN_SENT
:
295 /* Can only get here if wait above fails */
299 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
303 ret
= iwcm_modify_qp_err(qp
);
305 ret
= iwcm_modify_qp_sqd(qp
);
308 * If both sides are disconnecting the QP could
309 * already be in ERR or SQD states
316 EXPORT_SYMBOL(iw_cm_disconnect
);
319 * CM_ID <-- DESTROYING
321 * Clean up all resources associated with the connection and release
322 * the initial reference taken by iw_create_cm_id.
324 static void destroy_cm_id(struct iw_cm_id
*cm_id
)
326 struct iwcm_id_private
*cm_id_priv
;
330 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
332 * Wait if we're currently in a connect or accept downcall. A
333 * listening endpoint should never block here.
335 wait_event(cm_id_priv
->connect_wait
,
336 !test_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
));
338 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
339 switch (cm_id_priv
->state
) {
340 case IW_CM_STATE_LISTEN
:
341 cm_id_priv
->state
= IW_CM_STATE_DESTROYING
;
342 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
343 /* destroy the listening endpoint */
344 ret
= cm_id
->device
->iwcm
->destroy_listen(cm_id
);
345 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
347 case IW_CM_STATE_ESTABLISHED
:
348 cm_id_priv
->state
= IW_CM_STATE_DESTROYING
;
349 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
350 /* Abrupt close of the connection */
351 (void)iwcm_modify_qp_err(cm_id_priv
->qp
);
352 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
354 case IW_CM_STATE_IDLE
:
355 case IW_CM_STATE_CLOSING
:
356 cm_id_priv
->state
= IW_CM_STATE_DESTROYING
;
358 case IW_CM_STATE_CONN_RECV
:
360 * App called destroy before/without calling accept after
361 * receiving connection request event notification or
362 * returned non zero from the event callback function.
363 * In either case, must tell the provider to reject.
365 cm_id_priv
->state
= IW_CM_STATE_DESTROYING
;
367 case IW_CM_STATE_CONN_SENT
:
368 case IW_CM_STATE_DESTROYING
:
373 if (cm_id_priv
->qp
) {
374 cm_id_priv
->id
.device
->iwcm
->rem_ref(cm_id_priv
->qp
);
375 cm_id_priv
->qp
= NULL
;
377 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
379 (void)iwcm_deref_id(cm_id_priv
);
383 * This function is only called by the application thread and cannot
384 * be called by the event thread. The function will wait for all
385 * references to be released on the cm_id and then kfree the cm_id
388 void iw_destroy_cm_id(struct iw_cm_id
*cm_id
)
390 struct iwcm_id_private
*cm_id_priv
;
392 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
393 BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY
, &cm_id_priv
->flags
));
395 destroy_cm_id(cm_id
);
397 wait_for_completion(&cm_id_priv
->destroy_comp
);
399 free_cm_id(cm_id_priv
);
401 EXPORT_SYMBOL(iw_destroy_cm_id
);
406 * Start listening for connect requests. Generates one CONNECT_REQUEST
407 * event for each inbound connect request.
409 int iw_cm_listen(struct iw_cm_id
*cm_id
, int backlog
)
411 struct iwcm_id_private
*cm_id_priv
;
415 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
417 ret
= alloc_work_entries(cm_id_priv
, backlog
);
421 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
422 switch (cm_id_priv
->state
) {
423 case IW_CM_STATE_IDLE
:
424 cm_id_priv
->state
= IW_CM_STATE_LISTEN
;
425 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
426 ret
= cm_id
->device
->iwcm
->create_listen(cm_id
, backlog
);
428 cm_id_priv
->state
= IW_CM_STATE_IDLE
;
429 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
434 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
438 EXPORT_SYMBOL(iw_cm_listen
);
443 * Rejects an inbound connection request. No events are generated.
445 int iw_cm_reject(struct iw_cm_id
*cm_id
,
446 const void *private_data
,
449 struct iwcm_id_private
*cm_id_priv
;
453 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
454 set_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
456 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
457 if (cm_id_priv
->state
!= IW_CM_STATE_CONN_RECV
) {
458 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
459 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
460 wake_up_all(&cm_id_priv
->connect_wait
);
463 cm_id_priv
->state
= IW_CM_STATE_IDLE
;
464 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
466 ret
= cm_id
->device
->iwcm
->reject(cm_id
, private_data
,
469 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
470 wake_up_all(&cm_id_priv
->connect_wait
);
474 EXPORT_SYMBOL(iw_cm_reject
);
477 * CM_ID <-- ESTABLISHED
479 * Accepts an inbound connection request and generates an ESTABLISHED
480 * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block
481 * until the ESTABLISHED event is received from the provider.
483 int iw_cm_accept(struct iw_cm_id
*cm_id
,
484 struct iw_cm_conn_param
*iw_param
)
486 struct iwcm_id_private
*cm_id_priv
;
491 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
492 set_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
494 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
495 if (cm_id_priv
->state
!= IW_CM_STATE_CONN_RECV
) {
496 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
497 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
498 wake_up_all(&cm_id_priv
->connect_wait
);
501 /* Get the ib_qp given the QPN */
502 qp
= cm_id
->device
->iwcm
->get_qp(cm_id
->device
, iw_param
->qpn
);
504 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
507 cm_id
->device
->iwcm
->add_ref(qp
);
509 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
511 ret
= cm_id
->device
->iwcm
->accept(cm_id
, iw_param
);
513 /* An error on accept precludes provider events */
514 BUG_ON(cm_id_priv
->state
!= IW_CM_STATE_CONN_RECV
);
515 cm_id_priv
->state
= IW_CM_STATE_IDLE
;
516 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
517 if (cm_id_priv
->qp
) {
518 cm_id
->device
->iwcm
->rem_ref(qp
);
519 cm_id_priv
->qp
= NULL
;
521 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
522 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
523 wake_up_all(&cm_id_priv
->connect_wait
);
528 EXPORT_SYMBOL(iw_cm_accept
);
531 * Active Side: CM_ID <-- CONN_SENT
533 * If successful, results in the generation of a CONNECT_REPLY
534 * event. iw_cm_disconnect and iw_cm_destroy will block until the
535 * CONNECT_REPLY event is received from the provider.
537 int iw_cm_connect(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*iw_param
)
539 struct iwcm_id_private
*cm_id_priv
;
544 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
546 ret
= alloc_work_entries(cm_id_priv
, 4);
550 set_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
551 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
553 if (cm_id_priv
->state
!= IW_CM_STATE_IDLE
) {
554 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
555 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
556 wake_up_all(&cm_id_priv
->connect_wait
);
560 /* Get the ib_qp given the QPN */
561 qp
= cm_id
->device
->iwcm
->get_qp(cm_id
->device
, iw_param
->qpn
);
563 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
566 cm_id
->device
->iwcm
->add_ref(qp
);
568 cm_id_priv
->state
= IW_CM_STATE_CONN_SENT
;
569 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
571 ret
= cm_id
->device
->iwcm
->connect(cm_id
, iw_param
);
573 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
574 if (cm_id_priv
->qp
) {
575 cm_id
->device
->iwcm
->rem_ref(qp
);
576 cm_id_priv
->qp
= NULL
;
578 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
579 BUG_ON(cm_id_priv
->state
!= IW_CM_STATE_CONN_SENT
);
580 cm_id_priv
->state
= IW_CM_STATE_IDLE
;
581 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
582 wake_up_all(&cm_id_priv
->connect_wait
);
587 EXPORT_SYMBOL(iw_cm_connect
);
590 * Passive Side: new CM_ID <-- CONN_RECV
592 * Handles an inbound connect request. The function creates a new
593 * iw_cm_id to represent the new connection and inherits the client
594 * callback function and other attributes from the listening parent.
596 * The work item contains a pointer to the listen_cm_id and the event. The
597 * listen_cm_id contains the client cm_handler, context and
598 * device. These are copied when the device is cloned. The event
599 * contains the new four tuple.
601 * An error on the child should not affect the parent, so this
602 * function does not return a value.
604 static void cm_conn_req_handler(struct iwcm_id_private
*listen_id_priv
,
605 struct iw_cm_event
*iw_event
)
608 struct iw_cm_id
*cm_id
;
609 struct iwcm_id_private
*cm_id_priv
;
613 * The provider should never generate a connection request
614 * event with a bad status.
616 BUG_ON(iw_event
->status
);
619 * We could be destroying the listening id. If so, ignore this
622 spin_lock_irqsave(&listen_id_priv
->lock
, flags
);
623 if (listen_id_priv
->state
!= IW_CM_STATE_LISTEN
) {
624 spin_unlock_irqrestore(&listen_id_priv
->lock
, flags
);
627 spin_unlock_irqrestore(&listen_id_priv
->lock
, flags
);
629 cm_id
= iw_create_cm_id(listen_id_priv
->id
.device
,
630 listen_id_priv
->id
.cm_handler
,
631 listen_id_priv
->id
.context
);
632 /* If the cm_id could not be created, ignore the request */
636 cm_id
->provider_data
= iw_event
->provider_data
;
637 cm_id
->local_addr
= iw_event
->local_addr
;
638 cm_id
->remote_addr
= iw_event
->remote_addr
;
640 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
641 cm_id_priv
->state
= IW_CM_STATE_CONN_RECV
;
643 ret
= alloc_work_entries(cm_id_priv
, 3);
645 iw_cm_reject(cm_id
, NULL
, 0);
646 iw_destroy_cm_id(cm_id
);
650 /* Call the client CM handler */
651 ret
= cm_id
->cm_handler(cm_id
, iw_event
);
653 iw_cm_reject(cm_id
, NULL
, 0);
654 set_bit(IWCM_F_CALLBACK_DESTROY
, &cm_id_priv
->flags
);
655 destroy_cm_id(cm_id
);
656 if (atomic_read(&cm_id_priv
->refcount
)==0)
657 free_cm_id(cm_id_priv
);
661 if (iw_event
->private_data_len
)
662 kfree(iw_event
->private_data
);
666 * Passive Side: CM_ID <-- ESTABLISHED
668 * The provider generated an ESTABLISHED event which means that
669 * the MPA negotion has completed successfully and we are now in MPA
672 * This event can only be received in the CONN_RECV state. If the
673 * remote peer closed, the ESTABLISHED event would be received followed
674 * by the CLOSE event. If the app closes, it will block until we wake
675 * it up after processing this event.
677 static int cm_conn_est_handler(struct iwcm_id_private
*cm_id_priv
,
678 struct iw_cm_event
*iw_event
)
683 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
686 * We clear the CONNECT_WAIT bit here to allow the callback
687 * function to call iw_cm_disconnect. Calling iw_destroy_cm_id
688 * from a callback handler is not allowed.
690 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
691 BUG_ON(cm_id_priv
->state
!= IW_CM_STATE_CONN_RECV
);
692 cm_id_priv
->state
= IW_CM_STATE_ESTABLISHED
;
693 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
694 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, iw_event
);
695 wake_up_all(&cm_id_priv
->connect_wait
);
701 * Active Side: CM_ID <-- ESTABLISHED
703 * The app has called connect and is waiting for the established event to
704 * post it's requests to the server. This event will wake up anyone
705 * blocked in iw_cm_disconnect or iw_destroy_id.
707 static int cm_conn_rep_handler(struct iwcm_id_private
*cm_id_priv
,
708 struct iw_cm_event
*iw_event
)
713 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
715 * Clear the connect wait bit so a callback function calling
716 * iw_cm_disconnect will not wait and deadlock this thread
718 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
719 BUG_ON(cm_id_priv
->state
!= IW_CM_STATE_CONN_SENT
);
720 if (iw_event
->status
== IW_CM_EVENT_STATUS_ACCEPTED
) {
721 cm_id_priv
->id
.local_addr
= iw_event
->local_addr
;
722 cm_id_priv
->id
.remote_addr
= iw_event
->remote_addr
;
723 cm_id_priv
->state
= IW_CM_STATE_ESTABLISHED
;
725 /* REJECTED or RESET */
726 cm_id_priv
->id
.device
->iwcm
->rem_ref(cm_id_priv
->qp
);
727 cm_id_priv
->qp
= NULL
;
728 cm_id_priv
->state
= IW_CM_STATE_IDLE
;
730 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
731 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, iw_event
);
733 if (iw_event
->private_data_len
)
734 kfree(iw_event
->private_data
);
736 /* Wake up waiters on connect complete */
737 wake_up_all(&cm_id_priv
->connect_wait
);
745 * If in the ESTABLISHED state, move to CLOSING.
747 static void cm_disconnect_handler(struct iwcm_id_private
*cm_id_priv
,
748 struct iw_cm_event
*iw_event
)
752 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
753 if (cm_id_priv
->state
== IW_CM_STATE_ESTABLISHED
)
754 cm_id_priv
->state
= IW_CM_STATE_CLOSING
;
755 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
761 * If in the ESTBLISHED or CLOSING states, the QP will have have been
762 * moved by the provider to the ERR state. Disassociate the CM_ID from
763 * the QP, move to IDLE, and remove the 'connected' reference.
765 * If in some other state, the cm_id was destroyed asynchronously.
766 * This is the last reference that will result in waking up
767 * the app thread blocked in iw_destroy_cm_id.
769 static int cm_close_handler(struct iwcm_id_private
*cm_id_priv
,
770 struct iw_cm_event
*iw_event
)
774 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
776 if (cm_id_priv
->qp
) {
777 cm_id_priv
->id
.device
->iwcm
->rem_ref(cm_id_priv
->qp
);
778 cm_id_priv
->qp
= NULL
;
780 switch (cm_id_priv
->state
) {
781 case IW_CM_STATE_ESTABLISHED
:
782 case IW_CM_STATE_CLOSING
:
783 cm_id_priv
->state
= IW_CM_STATE_IDLE
;
784 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
785 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, iw_event
);
786 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
788 case IW_CM_STATE_DESTROYING
:
793 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
798 static int process_event(struct iwcm_id_private
*cm_id_priv
,
799 struct iw_cm_event
*iw_event
)
803 switch (iw_event
->event
) {
804 case IW_CM_EVENT_CONNECT_REQUEST
:
805 cm_conn_req_handler(cm_id_priv
, iw_event
);
807 case IW_CM_EVENT_CONNECT_REPLY
:
808 ret
= cm_conn_rep_handler(cm_id_priv
, iw_event
);
810 case IW_CM_EVENT_ESTABLISHED
:
811 ret
= cm_conn_est_handler(cm_id_priv
, iw_event
);
813 case IW_CM_EVENT_DISCONNECT
:
814 cm_disconnect_handler(cm_id_priv
, iw_event
);
816 case IW_CM_EVENT_CLOSE
:
817 ret
= cm_close_handler(cm_id_priv
, iw_event
);
827 * Process events on the work_list for the cm_id. If the callback
828 * function requests that the cm_id be deleted, a flag is set in the
829 * cm_id flags to indicate that when the last reference is
830 * removed, the cm_id is to be destroyed. This is necessary to
831 * distinguish between an object that will be destroyed by the app
832 * thread asleep on the destroy_comp list vs. an object destroyed
833 * here synchronously when the last reference is removed.
835 static void cm_work_handler(struct work_struct
*_work
)
837 struct iwcm_work
*work
= container_of(_work
, struct iwcm_work
, work
);
838 struct iw_cm_event levent
;
839 struct iwcm_id_private
*cm_id_priv
= work
->cm_id
;
844 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
845 empty
= list_empty(&cm_id_priv
->work_list
);
847 work
= list_entry(cm_id_priv
->work_list
.next
,
848 struct iwcm_work
, list
);
849 list_del_init(&work
->list
);
850 empty
= list_empty(&cm_id_priv
->work_list
);
851 levent
= work
->event
;
853 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
855 ret
= process_event(cm_id_priv
, &levent
);
857 set_bit(IWCM_F_CALLBACK_DESTROY
, &cm_id_priv
->flags
);
858 destroy_cm_id(&cm_id_priv
->id
);
860 BUG_ON(atomic_read(&cm_id_priv
->refcount
)==0);
861 if (iwcm_deref_id(cm_id_priv
)) {
862 if (test_bit(IWCM_F_CALLBACK_DESTROY
,
863 &cm_id_priv
->flags
)) {
864 BUG_ON(!list_empty(&cm_id_priv
->work_list
));
865 free_cm_id(cm_id_priv
);
869 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
871 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
875 * This function is called on interrupt context. Schedule events on
876 * the iwcm_wq thread to allow callback functions to downcall into
877 * the CM and/or block. Events are queued to a per-CM_ID
878 * work_list. If this is the first event on the work_list, the work
879 * element is also queued on the iwcm_wq thread.
881 * Each event holds a reference on the cm_id. Until the last posted
882 * event has been delivered and processed, the cm_id cannot be
886 * 0 - the event was handled.
887 * -ENOMEM - the event was not handled due to lack of resources.
889 static int cm_event_handler(struct iw_cm_id
*cm_id
,
890 struct iw_cm_event
*iw_event
)
892 struct iwcm_work
*work
;
893 struct iwcm_id_private
*cm_id_priv
;
897 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
899 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
900 work
= get_work(cm_id_priv
);
906 INIT_WORK(&work
->work
, cm_work_handler
);
907 work
->cm_id
= cm_id_priv
;
908 work
->event
= *iw_event
;
910 if ((work
->event
.event
== IW_CM_EVENT_CONNECT_REQUEST
||
911 work
->event
.event
== IW_CM_EVENT_CONNECT_REPLY
) &&
912 work
->event
.private_data_len
) {
913 ret
= copy_private_data(&work
->event
);
920 atomic_inc(&cm_id_priv
->refcount
);
921 if (list_empty(&cm_id_priv
->work_list
)) {
922 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
923 queue_work(iwcm_wq
, &work
->work
);
925 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
927 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
931 static int iwcm_init_qp_init_attr(struct iwcm_id_private
*cm_id_priv
,
932 struct ib_qp_attr
*qp_attr
,
938 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
939 switch (cm_id_priv
->state
) {
940 case IW_CM_STATE_IDLE
:
941 case IW_CM_STATE_CONN_SENT
:
942 case IW_CM_STATE_CONN_RECV
:
943 case IW_CM_STATE_ESTABLISHED
:
944 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
;
945 qp_attr
->qp_access_flags
= IB_ACCESS_LOCAL_WRITE
|
946 IB_ACCESS_REMOTE_WRITE
|
947 IB_ACCESS_REMOTE_READ
;
954 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
958 static int iwcm_init_qp_rts_attr(struct iwcm_id_private
*cm_id_priv
,
959 struct ib_qp_attr
*qp_attr
,
965 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
966 switch (cm_id_priv
->state
) {
967 case IW_CM_STATE_IDLE
:
968 case IW_CM_STATE_CONN_SENT
:
969 case IW_CM_STATE_CONN_RECV
:
970 case IW_CM_STATE_ESTABLISHED
:
978 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
982 int iw_cm_init_qp_attr(struct iw_cm_id
*cm_id
,
983 struct ib_qp_attr
*qp_attr
,
986 struct iwcm_id_private
*cm_id_priv
;
989 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
990 switch (qp_attr
->qp_state
) {
993 ret
= iwcm_init_qp_init_attr(cm_id_priv
,
994 qp_attr
, qp_attr_mask
);
997 ret
= iwcm_init_qp_rts_attr(cm_id_priv
,
998 qp_attr
, qp_attr_mask
);
1006 EXPORT_SYMBOL(iw_cm_init_qp_attr
);
1008 static int __init
iw_cm_init(void)
1010 iwcm_wq
= create_singlethread_workqueue("iw_cm_wq");
1017 static void __exit
iw_cm_cleanup(void)
1019 destroy_workqueue(iwcm_wq
);
1022 module_init(iw_cm_init
);
1023 module_exit(iw_cm_cleanup
);