2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
7 * Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <linux/dma-mapping.h>
39 #include <linux/err.h>
40 #include <linux/idr.h>
41 #include <linux/interrupt.h>
42 #include <linux/rbtree.h>
43 #include <linux/spinlock.h>
44 #include <linux/workqueue.h>
45 #include <linux/completion.h>
47 #include <rdma/iw_cm.h>
48 #include <rdma/ib_addr.h>
52 MODULE_AUTHOR("Tom Tucker");
53 MODULE_DESCRIPTION("iWARP CM");
54 MODULE_LICENSE("Dual BSD/GPL");
56 static struct workqueue_struct
*iwcm_wq
;
58 struct work_struct work
;
59 struct iwcm_id_private
*cm_id
;
60 struct list_head list
;
61 struct iw_cm_event event
;
62 struct list_head free_list
;
66 * The following services provide a mechanism for pre-allocating iwcm_work
67 * elements. The design pre-allocates them based on the cm_id type:
68 * LISTENING IDS: Get enough elements preallocated to handle the
70 * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE
71 * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE
73 * Allocating them in connect and listen avoids having to deal
74 * with allocation failures on the event upcall from the provider (which
75 * is called in the interrupt context).
77 * One exception is when creating the cm_id for incoming connection requests.
78 * There are two cases:
79 * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If
80 * the backlog is exceeded, then no more connection request events will
81 * be processed. cm_event_handler() returns -ENOMEM in this case. Its up
82 * to the provider to reject the connection request.
83 * 2) in the connection request workqueue handler, cm_conn_req_handler().
84 * If work elements cannot be allocated for the new connect request cm_id,
85 * then IWCM will call the provider reject method. This is ok since
86 * cm_conn_req_handler() runs in the workqueue thread context.
89 static struct iwcm_work
*get_work(struct iwcm_id_private
*cm_id_priv
)
91 struct iwcm_work
*work
;
93 if (list_empty(&cm_id_priv
->work_free_list
))
95 work
= list_entry(cm_id_priv
->work_free_list
.next
, struct iwcm_work
,
97 list_del_init(&work
->free_list
);
101 static void put_work(struct iwcm_work
*work
)
103 list_add(&work
->free_list
, &work
->cm_id
->work_free_list
);
106 static void dealloc_work_entries(struct iwcm_id_private
*cm_id_priv
)
108 struct list_head
*e
, *tmp
;
110 list_for_each_safe(e
, tmp
, &cm_id_priv
->work_free_list
)
111 kfree(list_entry(e
, struct iwcm_work
, free_list
));
114 static int alloc_work_entries(struct iwcm_id_private
*cm_id_priv
, int count
)
116 struct iwcm_work
*work
;
118 BUG_ON(!list_empty(&cm_id_priv
->work_free_list
));
120 work
= kmalloc(sizeof(struct iwcm_work
), GFP_KERNEL
);
122 dealloc_work_entries(cm_id_priv
);
125 work
->cm_id
= cm_id_priv
;
126 INIT_LIST_HEAD(&work
->list
);
133 * Save private data from incoming connection requests to
134 * iw_cm_event, so the low level driver doesn't have to. Adjust
135 * the event ptr to point to the local copy.
137 static int copy_private_data(struct iw_cm_event
*event
)
141 p
= kmemdup(event
->private_data
, event
->private_data_len
, GFP_ATOMIC
);
144 event
->private_data
= p
;
148 static void free_cm_id(struct iwcm_id_private
*cm_id_priv
)
150 dealloc_work_entries(cm_id_priv
);
155 * Release a reference on cm_id. If the last reference is being
156 * released, enable the waiting thread (in iw_destroy_cm_id) to
157 * get woken up, and return 1 if a thread is already waiting.
159 static int iwcm_deref_id(struct iwcm_id_private
*cm_id_priv
)
161 BUG_ON(atomic_read(&cm_id_priv
->refcount
)==0);
162 if (atomic_dec_and_test(&cm_id_priv
->refcount
)) {
163 BUG_ON(!list_empty(&cm_id_priv
->work_list
));
164 complete(&cm_id_priv
->destroy_comp
);
171 static void add_ref(struct iw_cm_id
*cm_id
)
173 struct iwcm_id_private
*cm_id_priv
;
174 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
175 atomic_inc(&cm_id_priv
->refcount
);
178 static void rem_ref(struct iw_cm_id
*cm_id
)
180 struct iwcm_id_private
*cm_id_priv
;
181 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
182 if (iwcm_deref_id(cm_id_priv
) &&
183 test_bit(IWCM_F_CALLBACK_DESTROY
, &cm_id_priv
->flags
)) {
184 BUG_ON(!list_empty(&cm_id_priv
->work_list
));
185 free_cm_id(cm_id_priv
);
189 static int cm_event_handler(struct iw_cm_id
*cm_id
, struct iw_cm_event
*event
);
191 struct iw_cm_id
*iw_create_cm_id(struct ib_device
*device
,
192 iw_cm_handler cm_handler
,
195 struct iwcm_id_private
*cm_id_priv
;
197 cm_id_priv
= kzalloc(sizeof(*cm_id_priv
), GFP_KERNEL
);
199 return ERR_PTR(-ENOMEM
);
201 cm_id_priv
->state
= IW_CM_STATE_IDLE
;
202 cm_id_priv
->id
.device
= device
;
203 cm_id_priv
->id
.cm_handler
= cm_handler
;
204 cm_id_priv
->id
.context
= context
;
205 cm_id_priv
->id
.event_handler
= cm_event_handler
;
206 cm_id_priv
->id
.add_ref
= add_ref
;
207 cm_id_priv
->id
.rem_ref
= rem_ref
;
208 spin_lock_init(&cm_id_priv
->lock
);
209 atomic_set(&cm_id_priv
->refcount
, 1);
210 init_waitqueue_head(&cm_id_priv
->connect_wait
);
211 init_completion(&cm_id_priv
->destroy_comp
);
212 INIT_LIST_HEAD(&cm_id_priv
->work_list
);
213 INIT_LIST_HEAD(&cm_id_priv
->work_free_list
);
215 return &cm_id_priv
->id
;
217 EXPORT_SYMBOL(iw_create_cm_id
);
220 static int iwcm_modify_qp_err(struct ib_qp
*qp
)
222 struct ib_qp_attr qp_attr
;
227 qp_attr
.qp_state
= IB_QPS_ERR
;
228 return ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
232 * This is really the RDMAC CLOSING state. It is most similar to the
235 static int iwcm_modify_qp_sqd(struct ib_qp
*qp
)
237 struct ib_qp_attr qp_attr
;
240 qp_attr
.qp_state
= IB_QPS_SQD
;
241 return ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
247 * Block if a passive or active connection is currently being processed. Then
248 * process the event as follows:
249 * - If we are ESTABLISHED, move to CLOSING and modify the QP state
250 * based on the abrupt flag
251 * - If the connection is already in the CLOSING or IDLE state, the peer is
252 * disconnecting concurrently with us and we've already seen the
253 * DISCONNECT event -- ignore the request and return 0
254 * - Disconnect on a listening endpoint returns -EINVAL
256 int iw_cm_disconnect(struct iw_cm_id
*cm_id
, int abrupt
)
258 struct iwcm_id_private
*cm_id_priv
;
261 struct ib_qp
*qp
= NULL
;
263 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
264 /* Wait if we're currently in a connect or accept downcall */
265 wait_event(cm_id_priv
->connect_wait
,
266 !test_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
));
268 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
269 switch (cm_id_priv
->state
) {
270 case IW_CM_STATE_ESTABLISHED
:
271 cm_id_priv
->state
= IW_CM_STATE_CLOSING
;
273 /* QP could be <nul> for user-mode client */
279 case IW_CM_STATE_LISTEN
:
282 case IW_CM_STATE_CLOSING
:
283 /* remote peer closed first */
284 case IW_CM_STATE_IDLE
:
285 /* accept or connect returned !0 */
287 case IW_CM_STATE_CONN_RECV
:
289 * App called disconnect before/without calling accept after
290 * connect_request event delivered.
293 case IW_CM_STATE_CONN_SENT
:
294 /* Can only get here if wait above fails */
298 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
302 ret
= iwcm_modify_qp_err(qp
);
304 ret
= iwcm_modify_qp_sqd(qp
);
307 * If both sides are disconnecting the QP could
308 * already be in ERR or SQD states
315 EXPORT_SYMBOL(iw_cm_disconnect
);
318 * CM_ID <-- DESTROYING
320 * Clean up all resources associated with the connection and release
321 * the initial reference taken by iw_create_cm_id.
323 static void destroy_cm_id(struct iw_cm_id
*cm_id
)
325 struct iwcm_id_private
*cm_id_priv
;
329 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
331 * Wait if we're currently in a connect or accept downcall. A
332 * listening endpoint should never block here.
334 wait_event(cm_id_priv
->connect_wait
,
335 !test_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
));
337 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
338 switch (cm_id_priv
->state
) {
339 case IW_CM_STATE_LISTEN
:
340 cm_id_priv
->state
= IW_CM_STATE_DESTROYING
;
341 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
342 /* destroy the listening endpoint */
343 ret
= cm_id
->device
->iwcm
->destroy_listen(cm_id
);
344 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
346 case IW_CM_STATE_ESTABLISHED
:
347 cm_id_priv
->state
= IW_CM_STATE_DESTROYING
;
348 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
349 /* Abrupt close of the connection */
350 (void)iwcm_modify_qp_err(cm_id_priv
->qp
);
351 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
353 case IW_CM_STATE_IDLE
:
354 case IW_CM_STATE_CLOSING
:
355 cm_id_priv
->state
= IW_CM_STATE_DESTROYING
;
357 case IW_CM_STATE_CONN_RECV
:
359 * App called destroy before/without calling accept after
360 * receiving connection request event notification or
361 * returned non zero from the event callback function.
362 * In either case, must tell the provider to reject.
364 cm_id_priv
->state
= IW_CM_STATE_DESTROYING
;
366 case IW_CM_STATE_CONN_SENT
:
367 case IW_CM_STATE_DESTROYING
:
372 if (cm_id_priv
->qp
) {
373 cm_id_priv
->id
.device
->iwcm
->rem_ref(cm_id_priv
->qp
);
374 cm_id_priv
->qp
= NULL
;
376 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
378 (void)iwcm_deref_id(cm_id_priv
);
382 * This function is only called by the application thread and cannot
383 * be called by the event thread. The function will wait for all
384 * references to be released on the cm_id and then kfree the cm_id
387 void iw_destroy_cm_id(struct iw_cm_id
*cm_id
)
389 struct iwcm_id_private
*cm_id_priv
;
391 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
392 BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY
, &cm_id_priv
->flags
));
394 destroy_cm_id(cm_id
);
396 wait_for_completion(&cm_id_priv
->destroy_comp
);
398 free_cm_id(cm_id_priv
);
400 EXPORT_SYMBOL(iw_destroy_cm_id
);
405 * Start listening for connect requests. Generates one CONNECT_REQUEST
406 * event for each inbound connect request.
408 int iw_cm_listen(struct iw_cm_id
*cm_id
, int backlog
)
410 struct iwcm_id_private
*cm_id_priv
;
414 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
416 ret
= alloc_work_entries(cm_id_priv
, backlog
);
420 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
421 switch (cm_id_priv
->state
) {
422 case IW_CM_STATE_IDLE
:
423 cm_id_priv
->state
= IW_CM_STATE_LISTEN
;
424 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
425 ret
= cm_id
->device
->iwcm
->create_listen(cm_id
, backlog
);
427 cm_id_priv
->state
= IW_CM_STATE_IDLE
;
428 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
433 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
437 EXPORT_SYMBOL(iw_cm_listen
);
442 * Rejects an inbound connection request. No events are generated.
444 int iw_cm_reject(struct iw_cm_id
*cm_id
,
445 const void *private_data
,
448 struct iwcm_id_private
*cm_id_priv
;
452 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
453 set_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
455 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
456 if (cm_id_priv
->state
!= IW_CM_STATE_CONN_RECV
) {
457 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
458 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
459 wake_up_all(&cm_id_priv
->connect_wait
);
462 cm_id_priv
->state
= IW_CM_STATE_IDLE
;
463 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
465 ret
= cm_id
->device
->iwcm
->reject(cm_id
, private_data
,
468 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
469 wake_up_all(&cm_id_priv
->connect_wait
);
473 EXPORT_SYMBOL(iw_cm_reject
);
476 * CM_ID <-- ESTABLISHED
478 * Accepts an inbound connection request and generates an ESTABLISHED
479 * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block
480 * until the ESTABLISHED event is received from the provider.
482 int iw_cm_accept(struct iw_cm_id
*cm_id
,
483 struct iw_cm_conn_param
*iw_param
)
485 struct iwcm_id_private
*cm_id_priv
;
490 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
491 set_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
493 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
494 if (cm_id_priv
->state
!= IW_CM_STATE_CONN_RECV
) {
495 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
496 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
497 wake_up_all(&cm_id_priv
->connect_wait
);
500 /* Get the ib_qp given the QPN */
501 qp
= cm_id
->device
->iwcm
->get_qp(cm_id
->device
, iw_param
->qpn
);
503 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
506 cm_id
->device
->iwcm
->add_ref(qp
);
508 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
510 ret
= cm_id
->device
->iwcm
->accept(cm_id
, iw_param
);
512 /* An error on accept precludes provider events */
513 BUG_ON(cm_id_priv
->state
!= IW_CM_STATE_CONN_RECV
);
514 cm_id_priv
->state
= IW_CM_STATE_IDLE
;
515 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
516 if (cm_id_priv
->qp
) {
517 cm_id
->device
->iwcm
->rem_ref(qp
);
518 cm_id_priv
->qp
= NULL
;
520 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
521 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
522 wake_up_all(&cm_id_priv
->connect_wait
);
527 EXPORT_SYMBOL(iw_cm_accept
);
530 * Active Side: CM_ID <-- CONN_SENT
532 * If successful, results in the generation of a CONNECT_REPLY
533 * event. iw_cm_disconnect and iw_cm_destroy will block until the
534 * CONNECT_REPLY event is received from the provider.
536 int iw_cm_connect(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*iw_param
)
538 struct iwcm_id_private
*cm_id_priv
;
543 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
545 ret
= alloc_work_entries(cm_id_priv
, 4);
549 set_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
550 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
552 if (cm_id_priv
->state
!= IW_CM_STATE_IDLE
) {
553 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
554 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
555 wake_up_all(&cm_id_priv
->connect_wait
);
559 /* Get the ib_qp given the QPN */
560 qp
= cm_id
->device
->iwcm
->get_qp(cm_id
->device
, iw_param
->qpn
);
562 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
565 cm_id
->device
->iwcm
->add_ref(qp
);
567 cm_id_priv
->state
= IW_CM_STATE_CONN_SENT
;
568 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
570 ret
= cm_id
->device
->iwcm
->connect(cm_id
, iw_param
);
572 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
573 if (cm_id_priv
->qp
) {
574 cm_id
->device
->iwcm
->rem_ref(qp
);
575 cm_id_priv
->qp
= NULL
;
577 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
578 BUG_ON(cm_id_priv
->state
!= IW_CM_STATE_CONN_SENT
);
579 cm_id_priv
->state
= IW_CM_STATE_IDLE
;
580 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
581 wake_up_all(&cm_id_priv
->connect_wait
);
586 EXPORT_SYMBOL(iw_cm_connect
);
589 * Passive Side: new CM_ID <-- CONN_RECV
591 * Handles an inbound connect request. The function creates a new
592 * iw_cm_id to represent the new connection and inherits the client
593 * callback function and other attributes from the listening parent.
595 * The work item contains a pointer to the listen_cm_id and the event. The
596 * listen_cm_id contains the client cm_handler, context and
597 * device. These are copied when the device is cloned. The event
598 * contains the new four tuple.
600 * An error on the child should not affect the parent, so this
601 * function does not return a value.
603 static void cm_conn_req_handler(struct iwcm_id_private
*listen_id_priv
,
604 struct iw_cm_event
*iw_event
)
607 struct iw_cm_id
*cm_id
;
608 struct iwcm_id_private
*cm_id_priv
;
612 * The provider should never generate a connection request
613 * event with a bad status.
615 BUG_ON(iw_event
->status
);
618 * We could be destroying the listening id. If so, ignore this
621 spin_lock_irqsave(&listen_id_priv
->lock
, flags
);
622 if (listen_id_priv
->state
!= IW_CM_STATE_LISTEN
) {
623 spin_unlock_irqrestore(&listen_id_priv
->lock
, flags
);
626 spin_unlock_irqrestore(&listen_id_priv
->lock
, flags
);
628 cm_id
= iw_create_cm_id(listen_id_priv
->id
.device
,
629 listen_id_priv
->id
.cm_handler
,
630 listen_id_priv
->id
.context
);
631 /* If the cm_id could not be created, ignore the request */
635 cm_id
->provider_data
= iw_event
->provider_data
;
636 cm_id
->local_addr
= iw_event
->local_addr
;
637 cm_id
->remote_addr
= iw_event
->remote_addr
;
639 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
640 cm_id_priv
->state
= IW_CM_STATE_CONN_RECV
;
642 ret
= alloc_work_entries(cm_id_priv
, 3);
644 iw_cm_reject(cm_id
, NULL
, 0);
645 iw_destroy_cm_id(cm_id
);
649 /* Call the client CM handler */
650 ret
= cm_id
->cm_handler(cm_id
, iw_event
);
652 iw_cm_reject(cm_id
, NULL
, 0);
653 set_bit(IWCM_F_CALLBACK_DESTROY
, &cm_id_priv
->flags
);
654 destroy_cm_id(cm_id
);
655 if (atomic_read(&cm_id_priv
->refcount
)==0)
656 free_cm_id(cm_id_priv
);
660 if (iw_event
->private_data_len
)
661 kfree(iw_event
->private_data
);
665 * Passive Side: CM_ID <-- ESTABLISHED
667 * The provider generated an ESTABLISHED event which means that
668 * the MPA negotion has completed successfully and we are now in MPA
671 * This event can only be received in the CONN_RECV state. If the
672 * remote peer closed, the ESTABLISHED event would be received followed
673 * by the CLOSE event. If the app closes, it will block until we wake
674 * it up after processing this event.
676 static int cm_conn_est_handler(struct iwcm_id_private
*cm_id_priv
,
677 struct iw_cm_event
*iw_event
)
682 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
685 * We clear the CONNECT_WAIT bit here to allow the callback
686 * function to call iw_cm_disconnect. Calling iw_destroy_cm_id
687 * from a callback handler is not allowed.
689 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
690 BUG_ON(cm_id_priv
->state
!= IW_CM_STATE_CONN_RECV
);
691 cm_id_priv
->state
= IW_CM_STATE_ESTABLISHED
;
692 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
693 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, iw_event
);
694 wake_up_all(&cm_id_priv
->connect_wait
);
700 * Active Side: CM_ID <-- ESTABLISHED
702 * The app has called connect and is waiting for the established event to
703 * post it's requests to the server. This event will wake up anyone
704 * blocked in iw_cm_disconnect or iw_destroy_id.
706 static int cm_conn_rep_handler(struct iwcm_id_private
*cm_id_priv
,
707 struct iw_cm_event
*iw_event
)
712 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
714 * Clear the connect wait bit so a callback function calling
715 * iw_cm_disconnect will not wait and deadlock this thread
717 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
718 BUG_ON(cm_id_priv
->state
!= IW_CM_STATE_CONN_SENT
);
719 if (iw_event
->status
== IW_CM_EVENT_STATUS_ACCEPTED
) {
720 cm_id_priv
->id
.local_addr
= iw_event
->local_addr
;
721 cm_id_priv
->id
.remote_addr
= iw_event
->remote_addr
;
722 cm_id_priv
->state
= IW_CM_STATE_ESTABLISHED
;
724 /* REJECTED or RESET */
725 cm_id_priv
->id
.device
->iwcm
->rem_ref(cm_id_priv
->qp
);
726 cm_id_priv
->qp
= NULL
;
727 cm_id_priv
->state
= IW_CM_STATE_IDLE
;
729 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
730 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, iw_event
);
732 if (iw_event
->private_data_len
)
733 kfree(iw_event
->private_data
);
735 /* Wake up waiters on connect complete */
736 wake_up_all(&cm_id_priv
->connect_wait
);
744 * If in the ESTABLISHED state, move to CLOSING.
746 static void cm_disconnect_handler(struct iwcm_id_private
*cm_id_priv
,
747 struct iw_cm_event
*iw_event
)
751 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
752 if (cm_id_priv
->state
== IW_CM_STATE_ESTABLISHED
)
753 cm_id_priv
->state
= IW_CM_STATE_CLOSING
;
754 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
760 * If in the ESTBLISHED or CLOSING states, the QP will have have been
761 * moved by the provider to the ERR state. Disassociate the CM_ID from
762 * the QP, move to IDLE, and remove the 'connected' reference.
764 * If in some other state, the cm_id was destroyed asynchronously.
765 * This is the last reference that will result in waking up
766 * the app thread blocked in iw_destroy_cm_id.
768 static int cm_close_handler(struct iwcm_id_private
*cm_id_priv
,
769 struct iw_cm_event
*iw_event
)
773 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
775 if (cm_id_priv
->qp
) {
776 cm_id_priv
->id
.device
->iwcm
->rem_ref(cm_id_priv
->qp
);
777 cm_id_priv
->qp
= NULL
;
779 switch (cm_id_priv
->state
) {
780 case IW_CM_STATE_ESTABLISHED
:
781 case IW_CM_STATE_CLOSING
:
782 cm_id_priv
->state
= IW_CM_STATE_IDLE
;
783 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
784 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, iw_event
);
785 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
787 case IW_CM_STATE_DESTROYING
:
792 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
797 static int process_event(struct iwcm_id_private
*cm_id_priv
,
798 struct iw_cm_event
*iw_event
)
802 switch (iw_event
->event
) {
803 case IW_CM_EVENT_CONNECT_REQUEST
:
804 cm_conn_req_handler(cm_id_priv
, iw_event
);
806 case IW_CM_EVENT_CONNECT_REPLY
:
807 ret
= cm_conn_rep_handler(cm_id_priv
, iw_event
);
809 case IW_CM_EVENT_ESTABLISHED
:
810 ret
= cm_conn_est_handler(cm_id_priv
, iw_event
);
812 case IW_CM_EVENT_DISCONNECT
:
813 cm_disconnect_handler(cm_id_priv
, iw_event
);
815 case IW_CM_EVENT_CLOSE
:
816 ret
= cm_close_handler(cm_id_priv
, iw_event
);
826 * Process events on the work_list for the cm_id. If the callback
827 * function requests that the cm_id be deleted, a flag is set in the
828 * cm_id flags to indicate that when the last reference is
829 * removed, the cm_id is to be destroyed. This is necessary to
830 * distinguish between an object that will be destroyed by the app
831 * thread asleep on the destroy_comp list vs. an object destroyed
832 * here synchronously when the last reference is removed.
834 static void cm_work_handler(struct work_struct
*_work
)
836 struct iwcm_work
*work
= container_of(_work
, struct iwcm_work
, work
);
837 struct iw_cm_event levent
;
838 struct iwcm_id_private
*cm_id_priv
= work
->cm_id
;
843 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
844 empty
= list_empty(&cm_id_priv
->work_list
);
846 work
= list_entry(cm_id_priv
->work_list
.next
,
847 struct iwcm_work
, list
);
848 list_del_init(&work
->list
);
849 empty
= list_empty(&cm_id_priv
->work_list
);
850 levent
= work
->event
;
852 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
854 ret
= process_event(cm_id_priv
, &levent
);
856 set_bit(IWCM_F_CALLBACK_DESTROY
, &cm_id_priv
->flags
);
857 destroy_cm_id(&cm_id_priv
->id
);
859 BUG_ON(atomic_read(&cm_id_priv
->refcount
)==0);
860 if (iwcm_deref_id(cm_id_priv
)) {
861 if (test_bit(IWCM_F_CALLBACK_DESTROY
,
862 &cm_id_priv
->flags
)) {
863 BUG_ON(!list_empty(&cm_id_priv
->work_list
));
864 free_cm_id(cm_id_priv
);
868 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
870 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
874 * This function is called on interrupt context. Schedule events on
875 * the iwcm_wq thread to allow callback functions to downcall into
876 * the CM and/or block. Events are queued to a per-CM_ID
877 * work_list. If this is the first event on the work_list, the work
878 * element is also queued on the iwcm_wq thread.
880 * Each event holds a reference on the cm_id. Until the last posted
881 * event has been delivered and processed, the cm_id cannot be
885 * 0 - the event was handled.
886 * -ENOMEM - the event was not handled due to lack of resources.
888 static int cm_event_handler(struct iw_cm_id
*cm_id
,
889 struct iw_cm_event
*iw_event
)
891 struct iwcm_work
*work
;
892 struct iwcm_id_private
*cm_id_priv
;
896 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
898 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
899 work
= get_work(cm_id_priv
);
905 INIT_WORK(&work
->work
, cm_work_handler
);
906 work
->cm_id
= cm_id_priv
;
907 work
->event
= *iw_event
;
909 if ((work
->event
.event
== IW_CM_EVENT_CONNECT_REQUEST
||
910 work
->event
.event
== IW_CM_EVENT_CONNECT_REPLY
) &&
911 work
->event
.private_data_len
) {
912 ret
= copy_private_data(&work
->event
);
919 atomic_inc(&cm_id_priv
->refcount
);
920 if (list_empty(&cm_id_priv
->work_list
)) {
921 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
922 queue_work(iwcm_wq
, &work
->work
);
924 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
926 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
930 static int iwcm_init_qp_init_attr(struct iwcm_id_private
*cm_id_priv
,
931 struct ib_qp_attr
*qp_attr
,
937 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
938 switch (cm_id_priv
->state
) {
939 case IW_CM_STATE_IDLE
:
940 case IW_CM_STATE_CONN_SENT
:
941 case IW_CM_STATE_CONN_RECV
:
942 case IW_CM_STATE_ESTABLISHED
:
943 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
;
944 qp_attr
->qp_access_flags
= IB_ACCESS_LOCAL_WRITE
|
945 IB_ACCESS_REMOTE_WRITE
|
946 IB_ACCESS_REMOTE_READ
;
953 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
957 static int iwcm_init_qp_rts_attr(struct iwcm_id_private
*cm_id_priv
,
958 struct ib_qp_attr
*qp_attr
,
964 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
965 switch (cm_id_priv
->state
) {
966 case IW_CM_STATE_IDLE
:
967 case IW_CM_STATE_CONN_SENT
:
968 case IW_CM_STATE_CONN_RECV
:
969 case IW_CM_STATE_ESTABLISHED
:
977 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
981 int iw_cm_init_qp_attr(struct iw_cm_id
*cm_id
,
982 struct ib_qp_attr
*qp_attr
,
985 struct iwcm_id_private
*cm_id_priv
;
988 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
989 switch (qp_attr
->qp_state
) {
992 ret
= iwcm_init_qp_init_attr(cm_id_priv
,
993 qp_attr
, qp_attr_mask
);
996 ret
= iwcm_init_qp_rts_attr(cm_id_priv
,
997 qp_attr
, qp_attr_mask
);
1005 EXPORT_SYMBOL(iw_cm_init_qp_attr
);
1007 static int __init
iw_cm_init(void)
1009 iwcm_wq
= create_singlethread_workqueue("iw_cm_wq");
1016 static void __exit
iw_cm_cleanup(void)
1018 destroy_workqueue(iwcm_wq
);
1021 module_init(iw_cm_init
);
1022 module_exit(iw_cm_cleanup
);