2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
7 * Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <linux/dma-mapping.h>
39 #include <linux/err.h>
40 #include <linux/idr.h>
41 #include <linux/interrupt.h>
42 #include <linux/rbtree.h>
43 #include <linux/sched.h>
44 #include <linux/spinlock.h>
45 #include <linux/workqueue.h>
46 #include <linux/completion.h>
47 #include <linux/slab.h>
48 #include <linux/module.h>
50 #include <rdma/iw_cm.h>
51 #include <rdma/ib_addr.h>
55 MODULE_AUTHOR("Tom Tucker");
56 MODULE_DESCRIPTION("iWARP CM");
57 MODULE_LICENSE("Dual BSD/GPL");
59 static struct workqueue_struct
*iwcm_wq
;
61 struct work_struct work
;
62 struct iwcm_id_private
*cm_id
;
63 struct list_head list
;
64 struct iw_cm_event event
;
65 struct list_head free_list
;
69 * The following services provide a mechanism for pre-allocating iwcm_work
70 * elements. The design pre-allocates them based on the cm_id type:
71 * LISTENING IDS: Get enough elements preallocated to handle the
73 * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE
74 * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE
76 * Allocating them in connect and listen avoids having to deal
77 * with allocation failures on the event upcall from the provider (which
78 * is called in the interrupt context).
80 * One exception is when creating the cm_id for incoming connection requests.
81 * There are two cases:
82 * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If
83 * the backlog is exceeded, then no more connection request events will
84 * be processed. cm_event_handler() returns -ENOMEM in this case. Its up
85 * to the provider to reject the connection request.
86 * 2) in the connection request workqueue handler, cm_conn_req_handler().
87 * If work elements cannot be allocated for the new connect request cm_id,
88 * then IWCM will call the provider reject method. This is ok since
89 * cm_conn_req_handler() runs in the workqueue thread context.
92 static struct iwcm_work
*get_work(struct iwcm_id_private
*cm_id_priv
)
94 struct iwcm_work
*work
;
96 if (list_empty(&cm_id_priv
->work_free_list
))
98 work
= list_entry(cm_id_priv
->work_free_list
.next
, struct iwcm_work
,
100 list_del_init(&work
->free_list
);
104 static void put_work(struct iwcm_work
*work
)
106 list_add(&work
->free_list
, &work
->cm_id
->work_free_list
);
109 static void dealloc_work_entries(struct iwcm_id_private
*cm_id_priv
)
111 struct list_head
*e
, *tmp
;
113 list_for_each_safe(e
, tmp
, &cm_id_priv
->work_free_list
)
114 kfree(list_entry(e
, struct iwcm_work
, free_list
));
117 static int alloc_work_entries(struct iwcm_id_private
*cm_id_priv
, int count
)
119 struct iwcm_work
*work
;
121 BUG_ON(!list_empty(&cm_id_priv
->work_free_list
));
123 work
= kmalloc(sizeof(struct iwcm_work
), GFP_KERNEL
);
125 dealloc_work_entries(cm_id_priv
);
128 work
->cm_id
= cm_id_priv
;
129 INIT_LIST_HEAD(&work
->list
);
136 * Save private data from incoming connection requests to
137 * iw_cm_event, so the low level driver doesn't have to. Adjust
138 * the event ptr to point to the local copy.
140 static int copy_private_data(struct iw_cm_event
*event
)
144 p
= kmemdup(event
->private_data
, event
->private_data_len
, GFP_ATOMIC
);
147 event
->private_data
= p
;
151 static void free_cm_id(struct iwcm_id_private
*cm_id_priv
)
153 dealloc_work_entries(cm_id_priv
);
158 * Release a reference on cm_id. If the last reference is being
159 * released, enable the waiting thread (in iw_destroy_cm_id) to
160 * get woken up, and return 1 if a thread is already waiting.
162 static int iwcm_deref_id(struct iwcm_id_private
*cm_id_priv
)
164 BUG_ON(atomic_read(&cm_id_priv
->refcount
)==0);
165 if (atomic_dec_and_test(&cm_id_priv
->refcount
)) {
166 BUG_ON(!list_empty(&cm_id_priv
->work_list
));
167 complete(&cm_id_priv
->destroy_comp
);
174 static void add_ref(struct iw_cm_id
*cm_id
)
176 struct iwcm_id_private
*cm_id_priv
;
177 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
178 atomic_inc(&cm_id_priv
->refcount
);
181 static void rem_ref(struct iw_cm_id
*cm_id
)
183 struct iwcm_id_private
*cm_id_priv
;
186 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
189 * Test bit before deref in case the cm_id gets freed on another
192 cb_destroy
= test_bit(IWCM_F_CALLBACK_DESTROY
, &cm_id_priv
->flags
);
193 if (iwcm_deref_id(cm_id_priv
) && cb_destroy
) {
194 BUG_ON(!list_empty(&cm_id_priv
->work_list
));
195 free_cm_id(cm_id_priv
);
199 static int cm_event_handler(struct iw_cm_id
*cm_id
, struct iw_cm_event
*event
);
201 struct iw_cm_id
*iw_create_cm_id(struct ib_device
*device
,
202 iw_cm_handler cm_handler
,
205 struct iwcm_id_private
*cm_id_priv
;
207 cm_id_priv
= kzalloc(sizeof(*cm_id_priv
), GFP_KERNEL
);
209 return ERR_PTR(-ENOMEM
);
211 cm_id_priv
->state
= IW_CM_STATE_IDLE
;
212 cm_id_priv
->id
.device
= device
;
213 cm_id_priv
->id
.cm_handler
= cm_handler
;
214 cm_id_priv
->id
.context
= context
;
215 cm_id_priv
->id
.event_handler
= cm_event_handler
;
216 cm_id_priv
->id
.add_ref
= add_ref
;
217 cm_id_priv
->id
.rem_ref
= rem_ref
;
218 spin_lock_init(&cm_id_priv
->lock
);
219 atomic_set(&cm_id_priv
->refcount
, 1);
220 init_waitqueue_head(&cm_id_priv
->connect_wait
);
221 init_completion(&cm_id_priv
->destroy_comp
);
222 INIT_LIST_HEAD(&cm_id_priv
->work_list
);
223 INIT_LIST_HEAD(&cm_id_priv
->work_free_list
);
225 return &cm_id_priv
->id
;
227 EXPORT_SYMBOL(iw_create_cm_id
);
230 static int iwcm_modify_qp_err(struct ib_qp
*qp
)
232 struct ib_qp_attr qp_attr
;
237 qp_attr
.qp_state
= IB_QPS_ERR
;
238 return ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
242 * This is really the RDMAC CLOSING state. It is most similar to the
245 static int iwcm_modify_qp_sqd(struct ib_qp
*qp
)
247 struct ib_qp_attr qp_attr
;
250 qp_attr
.qp_state
= IB_QPS_SQD
;
251 return ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
257 * Block if a passive or active connection is currently being processed. Then
258 * process the event as follows:
259 * - If we are ESTABLISHED, move to CLOSING and modify the QP state
260 * based on the abrupt flag
261 * - If the connection is already in the CLOSING or IDLE state, the peer is
262 * disconnecting concurrently with us and we've already seen the
263 * DISCONNECT event -- ignore the request and return 0
264 * - Disconnect on a listening endpoint returns -EINVAL
266 int iw_cm_disconnect(struct iw_cm_id
*cm_id
, int abrupt
)
268 struct iwcm_id_private
*cm_id_priv
;
271 struct ib_qp
*qp
= NULL
;
273 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
274 /* Wait if we're currently in a connect or accept downcall */
275 wait_event(cm_id_priv
->connect_wait
,
276 !test_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
));
278 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
279 switch (cm_id_priv
->state
) {
280 case IW_CM_STATE_ESTABLISHED
:
281 cm_id_priv
->state
= IW_CM_STATE_CLOSING
;
283 /* QP could be <nul> for user-mode client */
289 case IW_CM_STATE_LISTEN
:
292 case IW_CM_STATE_CLOSING
:
293 /* remote peer closed first */
294 case IW_CM_STATE_IDLE
:
295 /* accept or connect returned !0 */
297 case IW_CM_STATE_CONN_RECV
:
299 * App called disconnect before/without calling accept after
300 * connect_request event delivered.
303 case IW_CM_STATE_CONN_SENT
:
304 /* Can only get here if wait above fails */
308 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
312 ret
= iwcm_modify_qp_err(qp
);
314 ret
= iwcm_modify_qp_sqd(qp
);
317 * If both sides are disconnecting the QP could
318 * already be in ERR or SQD states
325 EXPORT_SYMBOL(iw_cm_disconnect
);
328 * CM_ID <-- DESTROYING
330 * Clean up all resources associated with the connection and release
331 * the initial reference taken by iw_create_cm_id.
333 static void destroy_cm_id(struct iw_cm_id
*cm_id
)
335 struct iwcm_id_private
*cm_id_priv
;
338 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
340 * Wait if we're currently in a connect or accept downcall. A
341 * listening endpoint should never block here.
343 wait_event(cm_id_priv
->connect_wait
,
344 !test_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
));
346 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
347 switch (cm_id_priv
->state
) {
348 case IW_CM_STATE_LISTEN
:
349 cm_id_priv
->state
= IW_CM_STATE_DESTROYING
;
350 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
351 /* destroy the listening endpoint */
352 cm_id
->device
->iwcm
->destroy_listen(cm_id
);
353 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
355 case IW_CM_STATE_ESTABLISHED
:
356 cm_id_priv
->state
= IW_CM_STATE_DESTROYING
;
357 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
358 /* Abrupt close of the connection */
359 (void)iwcm_modify_qp_err(cm_id_priv
->qp
);
360 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
362 case IW_CM_STATE_IDLE
:
363 case IW_CM_STATE_CLOSING
:
364 cm_id_priv
->state
= IW_CM_STATE_DESTROYING
;
366 case IW_CM_STATE_CONN_RECV
:
368 * App called destroy before/without calling accept after
369 * receiving connection request event notification or
370 * returned non zero from the event callback function.
371 * In either case, must tell the provider to reject.
373 cm_id_priv
->state
= IW_CM_STATE_DESTROYING
;
374 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
375 cm_id
->device
->iwcm
->reject(cm_id
, NULL
, 0);
376 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
378 case IW_CM_STATE_CONN_SENT
:
379 case IW_CM_STATE_DESTROYING
:
384 if (cm_id_priv
->qp
) {
385 cm_id_priv
->id
.device
->iwcm
->rem_ref(cm_id_priv
->qp
);
386 cm_id_priv
->qp
= NULL
;
388 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
390 (void)iwcm_deref_id(cm_id_priv
);
394 * This function is only called by the application thread and cannot
395 * be called by the event thread. The function will wait for all
396 * references to be released on the cm_id and then kfree the cm_id
399 void iw_destroy_cm_id(struct iw_cm_id
*cm_id
)
401 struct iwcm_id_private
*cm_id_priv
;
403 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
404 BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY
, &cm_id_priv
->flags
));
406 destroy_cm_id(cm_id
);
408 wait_for_completion(&cm_id_priv
->destroy_comp
);
410 free_cm_id(cm_id_priv
);
412 EXPORT_SYMBOL(iw_destroy_cm_id
);
417 * Start listening for connect requests. Generates one CONNECT_REQUEST
418 * event for each inbound connect request.
420 int iw_cm_listen(struct iw_cm_id
*cm_id
, int backlog
)
422 struct iwcm_id_private
*cm_id_priv
;
426 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
428 ret
= alloc_work_entries(cm_id_priv
, backlog
);
432 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
433 switch (cm_id_priv
->state
) {
434 case IW_CM_STATE_IDLE
:
435 cm_id_priv
->state
= IW_CM_STATE_LISTEN
;
436 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
437 ret
= cm_id
->device
->iwcm
->create_listen(cm_id
, backlog
);
439 cm_id_priv
->state
= IW_CM_STATE_IDLE
;
440 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
445 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
449 EXPORT_SYMBOL(iw_cm_listen
);
454 * Rejects an inbound connection request. No events are generated.
456 int iw_cm_reject(struct iw_cm_id
*cm_id
,
457 const void *private_data
,
460 struct iwcm_id_private
*cm_id_priv
;
464 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
465 set_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
467 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
468 if (cm_id_priv
->state
!= IW_CM_STATE_CONN_RECV
) {
469 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
470 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
471 wake_up_all(&cm_id_priv
->connect_wait
);
474 cm_id_priv
->state
= IW_CM_STATE_IDLE
;
475 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
477 ret
= cm_id
->device
->iwcm
->reject(cm_id
, private_data
,
480 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
481 wake_up_all(&cm_id_priv
->connect_wait
);
485 EXPORT_SYMBOL(iw_cm_reject
);
488 * CM_ID <-- ESTABLISHED
490 * Accepts an inbound connection request and generates an ESTABLISHED
491 * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block
492 * until the ESTABLISHED event is received from the provider.
494 int iw_cm_accept(struct iw_cm_id
*cm_id
,
495 struct iw_cm_conn_param
*iw_param
)
497 struct iwcm_id_private
*cm_id_priv
;
502 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
503 set_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
505 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
506 if (cm_id_priv
->state
!= IW_CM_STATE_CONN_RECV
) {
507 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
508 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
509 wake_up_all(&cm_id_priv
->connect_wait
);
512 /* Get the ib_qp given the QPN */
513 qp
= cm_id
->device
->iwcm
->get_qp(cm_id
->device
, iw_param
->qpn
);
515 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
516 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
517 wake_up_all(&cm_id_priv
->connect_wait
);
520 cm_id
->device
->iwcm
->add_ref(qp
);
522 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
524 ret
= cm_id
->device
->iwcm
->accept(cm_id
, iw_param
);
526 /* An error on accept precludes provider events */
527 BUG_ON(cm_id_priv
->state
!= IW_CM_STATE_CONN_RECV
);
528 cm_id_priv
->state
= IW_CM_STATE_IDLE
;
529 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
530 if (cm_id_priv
->qp
) {
531 cm_id
->device
->iwcm
->rem_ref(qp
);
532 cm_id_priv
->qp
= NULL
;
534 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
535 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
536 wake_up_all(&cm_id_priv
->connect_wait
);
541 EXPORT_SYMBOL(iw_cm_accept
);
544 * Active Side: CM_ID <-- CONN_SENT
546 * If successful, results in the generation of a CONNECT_REPLY
547 * event. iw_cm_disconnect and iw_cm_destroy will block until the
548 * CONNECT_REPLY event is received from the provider.
550 int iw_cm_connect(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*iw_param
)
552 struct iwcm_id_private
*cm_id_priv
;
557 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
559 ret
= alloc_work_entries(cm_id_priv
, 4);
563 set_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
564 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
566 if (cm_id_priv
->state
!= IW_CM_STATE_IDLE
) {
567 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
568 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
569 wake_up_all(&cm_id_priv
->connect_wait
);
573 /* Get the ib_qp given the QPN */
574 qp
= cm_id
->device
->iwcm
->get_qp(cm_id
->device
, iw_param
->qpn
);
576 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
577 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
578 wake_up_all(&cm_id_priv
->connect_wait
);
581 cm_id
->device
->iwcm
->add_ref(qp
);
583 cm_id_priv
->state
= IW_CM_STATE_CONN_SENT
;
584 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
586 ret
= cm_id
->device
->iwcm
->connect(cm_id
, iw_param
);
588 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
589 if (cm_id_priv
->qp
) {
590 cm_id
->device
->iwcm
->rem_ref(qp
);
591 cm_id_priv
->qp
= NULL
;
593 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
594 BUG_ON(cm_id_priv
->state
!= IW_CM_STATE_CONN_SENT
);
595 cm_id_priv
->state
= IW_CM_STATE_IDLE
;
596 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
597 wake_up_all(&cm_id_priv
->connect_wait
);
602 EXPORT_SYMBOL(iw_cm_connect
);
605 * Passive Side: new CM_ID <-- CONN_RECV
607 * Handles an inbound connect request. The function creates a new
608 * iw_cm_id to represent the new connection and inherits the client
609 * callback function and other attributes from the listening parent.
611 * The work item contains a pointer to the listen_cm_id and the event. The
612 * listen_cm_id contains the client cm_handler, context and
613 * device. These are copied when the device is cloned. The event
614 * contains the new four tuple.
616 * An error on the child should not affect the parent, so this
617 * function does not return a value.
619 static void cm_conn_req_handler(struct iwcm_id_private
*listen_id_priv
,
620 struct iw_cm_event
*iw_event
)
623 struct iw_cm_id
*cm_id
;
624 struct iwcm_id_private
*cm_id_priv
;
628 * The provider should never generate a connection request
629 * event with a bad status.
631 BUG_ON(iw_event
->status
);
633 cm_id
= iw_create_cm_id(listen_id_priv
->id
.device
,
634 listen_id_priv
->id
.cm_handler
,
635 listen_id_priv
->id
.context
);
636 /* If the cm_id could not be created, ignore the request */
640 cm_id
->provider_data
= iw_event
->provider_data
;
641 cm_id
->local_addr
= iw_event
->local_addr
;
642 cm_id
->remote_addr
= iw_event
->remote_addr
;
644 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
645 cm_id_priv
->state
= IW_CM_STATE_CONN_RECV
;
648 * We could be destroying the listening id. If so, ignore this
651 spin_lock_irqsave(&listen_id_priv
->lock
, flags
);
652 if (listen_id_priv
->state
!= IW_CM_STATE_LISTEN
) {
653 spin_unlock_irqrestore(&listen_id_priv
->lock
, flags
);
654 iw_cm_reject(cm_id
, NULL
, 0);
655 iw_destroy_cm_id(cm_id
);
658 spin_unlock_irqrestore(&listen_id_priv
->lock
, flags
);
660 ret
= alloc_work_entries(cm_id_priv
, 3);
662 iw_cm_reject(cm_id
, NULL
, 0);
663 iw_destroy_cm_id(cm_id
);
667 /* Call the client CM handler */
668 ret
= cm_id
->cm_handler(cm_id
, iw_event
);
670 iw_cm_reject(cm_id
, NULL
, 0);
671 set_bit(IWCM_F_CALLBACK_DESTROY
, &cm_id_priv
->flags
);
672 destroy_cm_id(cm_id
);
673 if (atomic_read(&cm_id_priv
->refcount
)==0)
674 free_cm_id(cm_id_priv
);
678 if (iw_event
->private_data_len
)
679 kfree(iw_event
->private_data
);
683 * Passive Side: CM_ID <-- ESTABLISHED
685 * The provider generated an ESTABLISHED event which means that
686 * the MPA negotion has completed successfully and we are now in MPA
689 * This event can only be received in the CONN_RECV state. If the
690 * remote peer closed, the ESTABLISHED event would be received followed
691 * by the CLOSE event. If the app closes, it will block until we wake
692 * it up after processing this event.
694 static int cm_conn_est_handler(struct iwcm_id_private
*cm_id_priv
,
695 struct iw_cm_event
*iw_event
)
700 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
703 * We clear the CONNECT_WAIT bit here to allow the callback
704 * function to call iw_cm_disconnect. Calling iw_destroy_cm_id
705 * from a callback handler is not allowed.
707 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
708 BUG_ON(cm_id_priv
->state
!= IW_CM_STATE_CONN_RECV
);
709 cm_id_priv
->state
= IW_CM_STATE_ESTABLISHED
;
710 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
711 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, iw_event
);
712 wake_up_all(&cm_id_priv
->connect_wait
);
718 * Active Side: CM_ID <-- ESTABLISHED
720 * The app has called connect and is waiting for the established event to
721 * post it's requests to the server. This event will wake up anyone
722 * blocked in iw_cm_disconnect or iw_destroy_id.
724 static int cm_conn_rep_handler(struct iwcm_id_private
*cm_id_priv
,
725 struct iw_cm_event
*iw_event
)
730 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
732 * Clear the connect wait bit so a callback function calling
733 * iw_cm_disconnect will not wait and deadlock this thread
735 clear_bit(IWCM_F_CONNECT_WAIT
, &cm_id_priv
->flags
);
736 BUG_ON(cm_id_priv
->state
!= IW_CM_STATE_CONN_SENT
);
737 if (iw_event
->status
== 0) {
738 cm_id_priv
->id
.local_addr
= iw_event
->local_addr
;
739 cm_id_priv
->id
.remote_addr
= iw_event
->remote_addr
;
740 cm_id_priv
->state
= IW_CM_STATE_ESTABLISHED
;
742 /* REJECTED or RESET */
743 cm_id_priv
->id
.device
->iwcm
->rem_ref(cm_id_priv
->qp
);
744 cm_id_priv
->qp
= NULL
;
745 cm_id_priv
->state
= IW_CM_STATE_IDLE
;
747 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
748 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, iw_event
);
750 if (iw_event
->private_data_len
)
751 kfree(iw_event
->private_data
);
753 /* Wake up waiters on connect complete */
754 wake_up_all(&cm_id_priv
->connect_wait
);
762 * If in the ESTABLISHED state, move to CLOSING.
764 static void cm_disconnect_handler(struct iwcm_id_private
*cm_id_priv
,
765 struct iw_cm_event
*iw_event
)
769 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
770 if (cm_id_priv
->state
== IW_CM_STATE_ESTABLISHED
)
771 cm_id_priv
->state
= IW_CM_STATE_CLOSING
;
772 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
778 * If in the ESTBLISHED or CLOSING states, the QP will have have been
779 * moved by the provider to the ERR state. Disassociate the CM_ID from
780 * the QP, move to IDLE, and remove the 'connected' reference.
782 * If in some other state, the cm_id was destroyed asynchronously.
783 * This is the last reference that will result in waking up
784 * the app thread blocked in iw_destroy_cm_id.
786 static int cm_close_handler(struct iwcm_id_private
*cm_id_priv
,
787 struct iw_cm_event
*iw_event
)
791 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
793 if (cm_id_priv
->qp
) {
794 cm_id_priv
->id
.device
->iwcm
->rem_ref(cm_id_priv
->qp
);
795 cm_id_priv
->qp
= NULL
;
797 switch (cm_id_priv
->state
) {
798 case IW_CM_STATE_ESTABLISHED
:
799 case IW_CM_STATE_CLOSING
:
800 cm_id_priv
->state
= IW_CM_STATE_IDLE
;
801 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
802 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, iw_event
);
803 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
805 case IW_CM_STATE_DESTROYING
:
810 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
815 static int process_event(struct iwcm_id_private
*cm_id_priv
,
816 struct iw_cm_event
*iw_event
)
820 switch (iw_event
->event
) {
821 case IW_CM_EVENT_CONNECT_REQUEST
:
822 cm_conn_req_handler(cm_id_priv
, iw_event
);
824 case IW_CM_EVENT_CONNECT_REPLY
:
825 ret
= cm_conn_rep_handler(cm_id_priv
, iw_event
);
827 case IW_CM_EVENT_ESTABLISHED
:
828 ret
= cm_conn_est_handler(cm_id_priv
, iw_event
);
830 case IW_CM_EVENT_DISCONNECT
:
831 cm_disconnect_handler(cm_id_priv
, iw_event
);
833 case IW_CM_EVENT_CLOSE
:
834 ret
= cm_close_handler(cm_id_priv
, iw_event
);
844 * Process events on the work_list for the cm_id. If the callback
845 * function requests that the cm_id be deleted, a flag is set in the
846 * cm_id flags to indicate that when the last reference is
847 * removed, the cm_id is to be destroyed. This is necessary to
848 * distinguish between an object that will be destroyed by the app
849 * thread asleep on the destroy_comp list vs. an object destroyed
850 * here synchronously when the last reference is removed.
852 static void cm_work_handler(struct work_struct
*_work
)
854 struct iwcm_work
*work
= container_of(_work
, struct iwcm_work
, work
);
855 struct iw_cm_event levent
;
856 struct iwcm_id_private
*cm_id_priv
= work
->cm_id
;
862 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
863 empty
= list_empty(&cm_id_priv
->work_list
);
865 work
= list_entry(cm_id_priv
->work_list
.next
,
866 struct iwcm_work
, list
);
867 list_del_init(&work
->list
);
868 empty
= list_empty(&cm_id_priv
->work_list
);
869 levent
= work
->event
;
871 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
873 ret
= process_event(cm_id_priv
, &levent
);
875 set_bit(IWCM_F_CALLBACK_DESTROY
, &cm_id_priv
->flags
);
876 destroy_cm_id(&cm_id_priv
->id
);
878 BUG_ON(atomic_read(&cm_id_priv
->refcount
)==0);
879 destroy_id
= test_bit(IWCM_F_CALLBACK_DESTROY
, &cm_id_priv
->flags
);
880 if (iwcm_deref_id(cm_id_priv
)) {
882 BUG_ON(!list_empty(&cm_id_priv
->work_list
));
883 free_cm_id(cm_id_priv
);
889 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
891 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
895 * This function is called on interrupt context. Schedule events on
896 * the iwcm_wq thread to allow callback functions to downcall into
897 * the CM and/or block. Events are queued to a per-CM_ID
898 * work_list. If this is the first event on the work_list, the work
899 * element is also queued on the iwcm_wq thread.
901 * Each event holds a reference on the cm_id. Until the last posted
902 * event has been delivered and processed, the cm_id cannot be
906 * 0 - the event was handled.
907 * -ENOMEM - the event was not handled due to lack of resources.
909 static int cm_event_handler(struct iw_cm_id
*cm_id
,
910 struct iw_cm_event
*iw_event
)
912 struct iwcm_work
*work
;
913 struct iwcm_id_private
*cm_id_priv
;
917 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
919 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
920 work
= get_work(cm_id_priv
);
926 INIT_WORK(&work
->work
, cm_work_handler
);
927 work
->cm_id
= cm_id_priv
;
928 work
->event
= *iw_event
;
930 if ((work
->event
.event
== IW_CM_EVENT_CONNECT_REQUEST
||
931 work
->event
.event
== IW_CM_EVENT_CONNECT_REPLY
) &&
932 work
->event
.private_data_len
) {
933 ret
= copy_private_data(&work
->event
);
940 atomic_inc(&cm_id_priv
->refcount
);
941 if (list_empty(&cm_id_priv
->work_list
)) {
942 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
943 queue_work(iwcm_wq
, &work
->work
);
945 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
947 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
951 static int iwcm_init_qp_init_attr(struct iwcm_id_private
*cm_id_priv
,
952 struct ib_qp_attr
*qp_attr
,
958 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
959 switch (cm_id_priv
->state
) {
960 case IW_CM_STATE_IDLE
:
961 case IW_CM_STATE_CONN_SENT
:
962 case IW_CM_STATE_CONN_RECV
:
963 case IW_CM_STATE_ESTABLISHED
:
964 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
;
965 qp_attr
->qp_access_flags
= IB_ACCESS_REMOTE_WRITE
|
966 IB_ACCESS_REMOTE_READ
;
973 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
977 static int iwcm_init_qp_rts_attr(struct iwcm_id_private
*cm_id_priv
,
978 struct ib_qp_attr
*qp_attr
,
984 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
985 switch (cm_id_priv
->state
) {
986 case IW_CM_STATE_IDLE
:
987 case IW_CM_STATE_CONN_SENT
:
988 case IW_CM_STATE_CONN_RECV
:
989 case IW_CM_STATE_ESTABLISHED
:
997 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1001 int iw_cm_init_qp_attr(struct iw_cm_id
*cm_id
,
1002 struct ib_qp_attr
*qp_attr
,
1005 struct iwcm_id_private
*cm_id_priv
;
1008 cm_id_priv
= container_of(cm_id
, struct iwcm_id_private
, id
);
1009 switch (qp_attr
->qp_state
) {
1012 ret
= iwcm_init_qp_init_attr(cm_id_priv
,
1013 qp_attr
, qp_attr_mask
);
1016 ret
= iwcm_init_qp_rts_attr(cm_id_priv
,
1017 qp_attr
, qp_attr_mask
);
1025 EXPORT_SYMBOL(iw_cm_init_qp_attr
);
1027 static int __init
iw_cm_init(void)
1029 iwcm_wq
= create_singlethread_workqueue("iw_cm_wq");
1036 static void __exit
iw_cm_cleanup(void)
1038 destroy_workqueue(iwcm_wq
);
1041 module_init(iw_cm_init
);
1042 module_exit(iw_cm_cleanup
);