Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / infiniband / core / cm.c
blobe6749157fd86c35550df617cc89b6120a29fb5d4
1 /*
2 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
36 #include <linux/completion.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/device.h>
39 #include <linux/module.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/interrupt.h>
43 #include <linux/random.h>
44 #include <linux/rbtree.h>
45 #include <linux/spinlock.h>
46 #include <linux/slab.h>
47 #include <linux/sysfs.h>
48 #include <linux/workqueue.h>
49 #include <linux/kdev_t.h>
50 #include <linux/etherdevice.h>
52 #include <rdma/ib_cache.h>
53 #include <rdma/ib_cm.h>
54 #include "cm_msgs.h"
56 MODULE_AUTHOR("Sean Hefty");
57 MODULE_DESCRIPTION("InfiniBand CM");
58 MODULE_LICENSE("Dual BSD/GPL");
60 static const char * const ibcm_rej_reason_strs[] = {
61 [IB_CM_REJ_NO_QP] = "no QP",
62 [IB_CM_REJ_NO_EEC] = "no EEC",
63 [IB_CM_REJ_NO_RESOURCES] = "no resources",
64 [IB_CM_REJ_TIMEOUT] = "timeout",
65 [IB_CM_REJ_UNSUPPORTED] = "unsupported",
66 [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID",
67 [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance",
68 [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID",
69 [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type",
70 [IB_CM_REJ_STALE_CONN] = "stale conn",
71 [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist",
72 [IB_CM_REJ_INVALID_GID] = "invalid GID",
73 [IB_CM_REJ_INVALID_LID] = "invalid LID",
74 [IB_CM_REJ_INVALID_SL] = "invalid SL",
75 [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class",
76 [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit",
77 [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate",
78 [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID",
79 [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID",
80 [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL",
81 [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class",
82 [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit",
83 [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate",
84 [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect",
85 [IB_CM_REJ_PORT_REDIRECT] = "port redirect",
86 [IB_CM_REJ_INVALID_MTU] = "invalid MTU",
87 [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources",
88 [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined",
89 [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry",
90 [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID",
91 [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
92 [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
93 [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
96 const char *__attribute_const__ ibcm_reject_msg(int reason)
98 size_t index = reason;
100 if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
101 ibcm_rej_reason_strs[index])
102 return ibcm_rej_reason_strs[index];
103 else
104 return "unrecognized reason";
106 EXPORT_SYMBOL(ibcm_reject_msg);
108 static void cm_add_one(struct ib_device *device);
109 static void cm_remove_one(struct ib_device *device, void *client_data);
111 static struct ib_client cm_client = {
112 .name = "cm",
113 .add = cm_add_one,
114 .remove = cm_remove_one
117 static struct ib_cm {
118 spinlock_t lock;
119 struct list_head device_list;
120 rwlock_t device_lock;
121 struct rb_root listen_service_table;
122 u64 listen_service_id;
123 /* struct rb_root peer_service_table; todo: fix peer to peer */
124 struct rb_root remote_qp_table;
125 struct rb_root remote_id_table;
126 struct rb_root remote_sidr_table;
127 struct idr local_id_table;
128 __be32 random_id_operand;
129 struct list_head timewait_list;
130 struct workqueue_struct *wq;
131 /* Sync on cm change port state */
132 spinlock_t state_lock;
133 } cm;
135 /* Counter indexes ordered by attribute ID */
136 enum {
137 CM_REQ_COUNTER,
138 CM_MRA_COUNTER,
139 CM_REJ_COUNTER,
140 CM_REP_COUNTER,
141 CM_RTU_COUNTER,
142 CM_DREQ_COUNTER,
143 CM_DREP_COUNTER,
144 CM_SIDR_REQ_COUNTER,
145 CM_SIDR_REP_COUNTER,
146 CM_LAP_COUNTER,
147 CM_APR_COUNTER,
148 CM_ATTR_COUNT,
149 CM_ATTR_ID_OFFSET = 0x0010,
152 enum {
153 CM_XMIT,
154 CM_XMIT_RETRIES,
155 CM_RECV,
156 CM_RECV_DUPLICATES,
157 CM_COUNTER_GROUPS
160 static char const counter_group_names[CM_COUNTER_GROUPS]
161 [sizeof("cm_rx_duplicates")] = {
162 "cm_tx_msgs", "cm_tx_retries",
163 "cm_rx_msgs", "cm_rx_duplicates"
166 struct cm_counter_group {
167 struct kobject obj;
168 atomic_long_t counter[CM_ATTR_COUNT];
171 struct cm_counter_attribute {
172 struct attribute attr;
173 int index;
176 #define CM_COUNTER_ATTR(_name, _index) \
177 struct cm_counter_attribute cm_##_name##_counter_attr = { \
178 .attr = { .name = __stringify(_name), .mode = 0444 }, \
179 .index = _index \
182 static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
183 static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
184 static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
185 static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
186 static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
187 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
188 static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
189 static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
190 static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
191 static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
192 static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
194 static struct attribute *cm_counter_default_attrs[] = {
195 &cm_req_counter_attr.attr,
196 &cm_mra_counter_attr.attr,
197 &cm_rej_counter_attr.attr,
198 &cm_rep_counter_attr.attr,
199 &cm_rtu_counter_attr.attr,
200 &cm_dreq_counter_attr.attr,
201 &cm_drep_counter_attr.attr,
202 &cm_sidr_req_counter_attr.attr,
203 &cm_sidr_rep_counter_attr.attr,
204 &cm_lap_counter_attr.attr,
205 &cm_apr_counter_attr.attr,
206 NULL
209 struct cm_port {
210 struct cm_device *cm_dev;
211 struct ib_mad_agent *mad_agent;
212 struct kobject port_obj;
213 u8 port_num;
214 struct list_head cm_priv_prim_list;
215 struct list_head cm_priv_altr_list;
216 struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
219 struct cm_device {
220 struct list_head list;
221 struct ib_device *ib_device;
222 struct device *device;
223 u8 ack_delay;
224 int going_down;
225 struct cm_port *port[0];
228 struct cm_av {
229 struct cm_port *port;
230 union ib_gid dgid;
231 struct rdma_ah_attr ah_attr;
232 u16 pkey_index;
233 u8 timeout;
236 struct cm_work {
237 struct delayed_work work;
238 struct list_head list;
239 struct cm_port *port;
240 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
241 __be32 local_id; /* Established / timewait */
242 __be32 remote_id;
243 struct ib_cm_event cm_event;
244 struct sa_path_rec path[0];
247 struct cm_timewait_info {
248 struct cm_work work; /* Must be first. */
249 struct list_head list;
250 struct rb_node remote_qp_node;
251 struct rb_node remote_id_node;
252 __be64 remote_ca_guid;
253 __be32 remote_qpn;
254 u8 inserted_remote_qp;
255 u8 inserted_remote_id;
258 struct cm_id_private {
259 struct ib_cm_id id;
261 struct rb_node service_node;
262 struct rb_node sidr_id_node;
263 spinlock_t lock; /* Do not acquire inside cm.lock */
264 struct completion comp;
265 atomic_t refcount;
266 /* Number of clients sharing this ib_cm_id. Only valid for listeners.
267 * Protected by the cm.lock spinlock. */
268 int listen_sharecount;
270 struct ib_mad_send_buf *msg;
271 struct cm_timewait_info *timewait_info;
272 /* todo: use alternate port on send failure */
273 struct cm_av av;
274 struct cm_av alt_av;
276 void *private_data;
277 __be64 tid;
278 __be32 local_qpn;
279 __be32 remote_qpn;
280 enum ib_qp_type qp_type;
281 __be32 sq_psn;
282 __be32 rq_psn;
283 int timeout_ms;
284 enum ib_mtu path_mtu;
285 __be16 pkey;
286 u8 private_data_len;
287 u8 max_cm_retries;
288 u8 peer_to_peer;
289 u8 responder_resources;
290 u8 initiator_depth;
291 u8 retry_count;
292 u8 rnr_retry_count;
293 u8 service_timeout;
294 u8 target_ack_delay;
296 struct list_head prim_list;
297 struct list_head altr_list;
298 /* Indicates that the send port mad is registered and av is set */
299 int prim_send_port_not_ready;
300 int altr_send_port_not_ready;
302 struct list_head work_list;
303 atomic_t work_count;
306 static void cm_work_handler(struct work_struct *work);
308 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
310 if (atomic_dec_and_test(&cm_id_priv->refcount))
311 complete(&cm_id_priv->comp);
314 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
315 struct ib_mad_send_buf **msg)
317 struct ib_mad_agent *mad_agent;
318 struct ib_mad_send_buf *m;
319 struct ib_ah *ah;
320 struct cm_av *av;
321 unsigned long flags, flags2;
322 int ret = 0;
324 /* don't let the port to be released till the agent is down */
325 spin_lock_irqsave(&cm.state_lock, flags2);
326 spin_lock_irqsave(&cm.lock, flags);
327 if (!cm_id_priv->prim_send_port_not_ready)
328 av = &cm_id_priv->av;
329 else if (!cm_id_priv->altr_send_port_not_ready &&
330 (cm_id_priv->alt_av.port))
331 av = &cm_id_priv->alt_av;
332 else {
333 pr_info("%s: not valid CM id\n", __func__);
334 ret = -ENODEV;
335 spin_unlock_irqrestore(&cm.lock, flags);
336 goto out;
338 spin_unlock_irqrestore(&cm.lock, flags);
339 /* Make sure the port haven't released the mad yet */
340 mad_agent = cm_id_priv->av.port->mad_agent;
341 if (!mad_agent) {
342 pr_info("%s: not a valid MAD agent\n", __func__);
343 ret = -ENODEV;
344 goto out;
346 ah = rdma_create_ah(mad_agent->qp->pd, &av->ah_attr);
347 if (IS_ERR(ah)) {
348 ret = PTR_ERR(ah);
349 goto out;
352 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
353 av->pkey_index,
354 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
355 GFP_ATOMIC,
356 IB_MGMT_BASE_VERSION);
357 if (IS_ERR(m)) {
358 rdma_destroy_ah(ah);
359 ret = PTR_ERR(m);
360 goto out;
363 /* Timeout set by caller if response is expected. */
364 m->ah = ah;
365 m->retries = cm_id_priv->max_cm_retries;
367 atomic_inc(&cm_id_priv->refcount);
368 m->context[0] = cm_id_priv;
369 *msg = m;
371 out:
372 spin_unlock_irqrestore(&cm.state_lock, flags2);
373 return ret;
376 static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
377 struct ib_mad_recv_wc *mad_recv_wc)
379 return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
380 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
381 GFP_ATOMIC,
382 IB_MGMT_BASE_VERSION);
385 static int cm_create_response_msg_ah(struct cm_port *port,
386 struct ib_mad_recv_wc *mad_recv_wc,
387 struct ib_mad_send_buf *msg)
389 struct ib_ah *ah;
391 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
392 mad_recv_wc->recv_buf.grh, port->port_num);
393 if (IS_ERR(ah))
394 return PTR_ERR(ah);
396 msg->ah = ah;
397 return 0;
400 static void cm_free_msg(struct ib_mad_send_buf *msg)
402 if (msg->ah)
403 rdma_destroy_ah(msg->ah);
404 if (msg->context[0])
405 cm_deref_id(msg->context[0]);
406 ib_free_send_mad(msg);
409 static int cm_alloc_response_msg(struct cm_port *port,
410 struct ib_mad_recv_wc *mad_recv_wc,
411 struct ib_mad_send_buf **msg)
413 struct ib_mad_send_buf *m;
414 int ret;
416 m = cm_alloc_response_msg_no_ah(port, mad_recv_wc);
417 if (IS_ERR(m))
418 return PTR_ERR(m);
420 ret = cm_create_response_msg_ah(port, mad_recv_wc, m);
421 if (ret) {
422 cm_free_msg(m);
423 return ret;
426 *msg = m;
427 return 0;
430 static void * cm_copy_private_data(const void *private_data,
431 u8 private_data_len)
433 void *data;
435 if (!private_data || !private_data_len)
436 return NULL;
438 data = kmemdup(private_data, private_data_len, GFP_KERNEL);
439 if (!data)
440 return ERR_PTR(-ENOMEM);
442 return data;
445 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
446 void *private_data, u8 private_data_len)
448 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
449 kfree(cm_id_priv->private_data);
451 cm_id_priv->private_data = private_data;
452 cm_id_priv->private_data_len = private_data_len;
455 static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
456 struct ib_grh *grh, struct cm_av *av)
458 av->port = port;
459 av->pkey_index = wc->pkey_index;
460 return ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
461 port->port_num, wc,
462 grh, &av->ah_attr);
465 static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av,
466 struct cm_id_private *cm_id_priv)
468 struct cm_device *cm_dev;
469 struct cm_port *port = NULL;
470 unsigned long flags;
471 int ret;
472 u8 p;
473 struct net_device *ndev = ib_get_ndev_from_path(path);
475 read_lock_irqsave(&cm.device_lock, flags);
476 list_for_each_entry(cm_dev, &cm.device_list, list) {
477 if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid,
478 sa_conv_pathrec_to_gid_type(path),
479 ndev, &p, NULL)) {
480 port = cm_dev->port[p-1];
481 break;
484 read_unlock_irqrestore(&cm.device_lock, flags);
486 if (ndev)
487 dev_put(ndev);
489 if (!port)
490 return -EINVAL;
492 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
493 be16_to_cpu(path->pkey), &av->pkey_index);
494 if (ret)
495 return ret;
497 av->port = port;
498 ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
499 &av->ah_attr);
500 if (ret)
501 return ret;
503 av->timeout = path->packet_life_time + 1;
505 spin_lock_irqsave(&cm.lock, flags);
506 if (&cm_id_priv->av == av)
507 list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
508 else if (&cm_id_priv->alt_av == av)
509 list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
510 else
511 ret = -EINVAL;
513 spin_unlock_irqrestore(&cm.lock, flags);
515 return ret;
518 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
520 unsigned long flags;
521 int id;
523 idr_preload(GFP_KERNEL);
524 spin_lock_irqsave(&cm.lock, flags);
526 id = idr_alloc_cyclic(&cm.local_id_table, cm_id_priv, 0, 0, GFP_NOWAIT);
528 spin_unlock_irqrestore(&cm.lock, flags);
529 idr_preload_end();
531 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
532 return id < 0 ? id : 0;
535 static void cm_free_id(__be32 local_id)
537 spin_lock_irq(&cm.lock);
538 idr_remove(&cm.local_id_table,
539 (__force int) (local_id ^ cm.random_id_operand));
540 spin_unlock_irq(&cm.lock);
543 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
545 struct cm_id_private *cm_id_priv;
547 cm_id_priv = idr_find(&cm.local_id_table,
548 (__force int) (local_id ^ cm.random_id_operand));
549 if (cm_id_priv) {
550 if (cm_id_priv->id.remote_id == remote_id)
551 atomic_inc(&cm_id_priv->refcount);
552 else
553 cm_id_priv = NULL;
556 return cm_id_priv;
559 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
561 struct cm_id_private *cm_id_priv;
563 spin_lock_irq(&cm.lock);
564 cm_id_priv = cm_get_id(local_id, remote_id);
565 spin_unlock_irq(&cm.lock);
567 return cm_id_priv;
571 * Trivial helpers to strip endian annotation and compare; the
572 * endianness doesn't actually matter since we just need a stable
573 * order for the RB tree.
575 static int be32_lt(__be32 a, __be32 b)
577 return (__force u32) a < (__force u32) b;
580 static int be32_gt(__be32 a, __be32 b)
582 return (__force u32) a > (__force u32) b;
585 static int be64_lt(__be64 a, __be64 b)
587 return (__force u64) a < (__force u64) b;
590 static int be64_gt(__be64 a, __be64 b)
592 return (__force u64) a > (__force u64) b;
595 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
597 struct rb_node **link = &cm.listen_service_table.rb_node;
598 struct rb_node *parent = NULL;
599 struct cm_id_private *cur_cm_id_priv;
600 __be64 service_id = cm_id_priv->id.service_id;
601 __be64 service_mask = cm_id_priv->id.service_mask;
603 while (*link) {
604 parent = *link;
605 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
606 service_node);
607 if ((cur_cm_id_priv->id.service_mask & service_id) ==
608 (service_mask & cur_cm_id_priv->id.service_id) &&
609 (cm_id_priv->id.device == cur_cm_id_priv->id.device))
610 return cur_cm_id_priv;
612 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
613 link = &(*link)->rb_left;
614 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
615 link = &(*link)->rb_right;
616 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
617 link = &(*link)->rb_left;
618 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
619 link = &(*link)->rb_right;
620 else
621 link = &(*link)->rb_right;
623 rb_link_node(&cm_id_priv->service_node, parent, link);
624 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
625 return NULL;
628 static struct cm_id_private * cm_find_listen(struct ib_device *device,
629 __be64 service_id)
631 struct rb_node *node = cm.listen_service_table.rb_node;
632 struct cm_id_private *cm_id_priv;
634 while (node) {
635 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
636 if ((cm_id_priv->id.service_mask & service_id) ==
637 cm_id_priv->id.service_id &&
638 (cm_id_priv->id.device == device))
639 return cm_id_priv;
641 if (device < cm_id_priv->id.device)
642 node = node->rb_left;
643 else if (device > cm_id_priv->id.device)
644 node = node->rb_right;
645 else if (be64_lt(service_id, cm_id_priv->id.service_id))
646 node = node->rb_left;
647 else if (be64_gt(service_id, cm_id_priv->id.service_id))
648 node = node->rb_right;
649 else
650 node = node->rb_right;
652 return NULL;
655 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
656 *timewait_info)
658 struct rb_node **link = &cm.remote_id_table.rb_node;
659 struct rb_node *parent = NULL;
660 struct cm_timewait_info *cur_timewait_info;
661 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
662 __be32 remote_id = timewait_info->work.remote_id;
664 while (*link) {
665 parent = *link;
666 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
667 remote_id_node);
668 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
669 link = &(*link)->rb_left;
670 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
671 link = &(*link)->rb_right;
672 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
673 link = &(*link)->rb_left;
674 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
675 link = &(*link)->rb_right;
676 else
677 return cur_timewait_info;
679 timewait_info->inserted_remote_id = 1;
680 rb_link_node(&timewait_info->remote_id_node, parent, link);
681 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
682 return NULL;
685 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
686 __be32 remote_id)
688 struct rb_node *node = cm.remote_id_table.rb_node;
689 struct cm_timewait_info *timewait_info;
691 while (node) {
692 timewait_info = rb_entry(node, struct cm_timewait_info,
693 remote_id_node);
694 if (be32_lt(remote_id, timewait_info->work.remote_id))
695 node = node->rb_left;
696 else if (be32_gt(remote_id, timewait_info->work.remote_id))
697 node = node->rb_right;
698 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
699 node = node->rb_left;
700 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
701 node = node->rb_right;
702 else
703 return timewait_info;
705 return NULL;
708 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
709 *timewait_info)
711 struct rb_node **link = &cm.remote_qp_table.rb_node;
712 struct rb_node *parent = NULL;
713 struct cm_timewait_info *cur_timewait_info;
714 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
715 __be32 remote_qpn = timewait_info->remote_qpn;
717 while (*link) {
718 parent = *link;
719 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
720 remote_qp_node);
721 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
722 link = &(*link)->rb_left;
723 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
724 link = &(*link)->rb_right;
725 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
726 link = &(*link)->rb_left;
727 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
728 link = &(*link)->rb_right;
729 else
730 return cur_timewait_info;
732 timewait_info->inserted_remote_qp = 1;
733 rb_link_node(&timewait_info->remote_qp_node, parent, link);
734 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
735 return NULL;
738 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
739 *cm_id_priv)
741 struct rb_node **link = &cm.remote_sidr_table.rb_node;
742 struct rb_node *parent = NULL;
743 struct cm_id_private *cur_cm_id_priv;
744 union ib_gid *port_gid = &cm_id_priv->av.dgid;
745 __be32 remote_id = cm_id_priv->id.remote_id;
747 while (*link) {
748 parent = *link;
749 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
750 sidr_id_node);
751 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
752 link = &(*link)->rb_left;
753 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
754 link = &(*link)->rb_right;
755 else {
756 int cmp;
757 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
758 sizeof *port_gid);
759 if (cmp < 0)
760 link = &(*link)->rb_left;
761 else if (cmp > 0)
762 link = &(*link)->rb_right;
763 else
764 return cur_cm_id_priv;
767 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
768 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
769 return NULL;
772 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
773 enum ib_cm_sidr_status status)
775 struct ib_cm_sidr_rep_param param;
777 memset(&param, 0, sizeof param);
778 param.status = status;
779 ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
782 struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
783 ib_cm_handler cm_handler,
784 void *context)
786 struct cm_id_private *cm_id_priv;
787 int ret;
789 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
790 if (!cm_id_priv)
791 return ERR_PTR(-ENOMEM);
793 cm_id_priv->id.state = IB_CM_IDLE;
794 cm_id_priv->id.device = device;
795 cm_id_priv->id.cm_handler = cm_handler;
796 cm_id_priv->id.context = context;
797 cm_id_priv->id.remote_cm_qpn = 1;
798 ret = cm_alloc_id(cm_id_priv);
799 if (ret)
800 goto error;
802 spin_lock_init(&cm_id_priv->lock);
803 init_completion(&cm_id_priv->comp);
804 INIT_LIST_HEAD(&cm_id_priv->work_list);
805 INIT_LIST_HEAD(&cm_id_priv->prim_list);
806 INIT_LIST_HEAD(&cm_id_priv->altr_list);
807 atomic_set(&cm_id_priv->work_count, -1);
808 atomic_set(&cm_id_priv->refcount, 1);
809 return &cm_id_priv->id;
811 error:
812 kfree(cm_id_priv);
813 return ERR_PTR(-ENOMEM);
815 EXPORT_SYMBOL(ib_create_cm_id);
817 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
819 struct cm_work *work;
821 if (list_empty(&cm_id_priv->work_list))
822 return NULL;
824 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
825 list_del(&work->list);
826 return work;
829 static void cm_free_work(struct cm_work *work)
831 if (work->mad_recv_wc)
832 ib_free_recv_mad(work->mad_recv_wc);
833 kfree(work);
836 static inline int cm_convert_to_ms(int iba_time)
838 /* approximate conversion to ms from 4.096us x 2^iba_time */
839 return 1 << max(iba_time - 8, 0);
843 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
844 * Because of how ack_timeout is stored, adding one doubles the timeout.
845 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
846 * increment it (round up) only if the other is within 50%.
848 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
850 int ack_timeout = packet_life_time + 1;
852 if (ack_timeout >= ca_ack_delay)
853 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
854 else
855 ack_timeout = ca_ack_delay +
856 (ack_timeout >= (ca_ack_delay - 1));
858 return min(31, ack_timeout);
861 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
863 if (timewait_info->inserted_remote_id) {
864 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
865 timewait_info->inserted_remote_id = 0;
868 if (timewait_info->inserted_remote_qp) {
869 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
870 timewait_info->inserted_remote_qp = 0;
874 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
876 struct cm_timewait_info *timewait_info;
878 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
879 if (!timewait_info)
880 return ERR_PTR(-ENOMEM);
882 timewait_info->work.local_id = local_id;
883 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
884 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
885 return timewait_info;
888 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
890 int wait_time;
891 unsigned long flags;
892 struct cm_device *cm_dev;
894 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
895 if (!cm_dev)
896 return;
898 spin_lock_irqsave(&cm.lock, flags);
899 cm_cleanup_timewait(cm_id_priv->timewait_info);
900 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
901 spin_unlock_irqrestore(&cm.lock, flags);
904 * The cm_id could be destroyed by the user before we exit timewait.
905 * To protect against this, we search for the cm_id after exiting
906 * timewait before notifying the user that we've exited timewait.
908 cm_id_priv->id.state = IB_CM_TIMEWAIT;
909 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
911 /* Check if the device started its remove_one */
912 spin_lock_irqsave(&cm.lock, flags);
913 if (!cm_dev->going_down)
914 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
915 msecs_to_jiffies(wait_time));
916 spin_unlock_irqrestore(&cm.lock, flags);
918 cm_id_priv->timewait_info = NULL;
921 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
923 unsigned long flags;
925 cm_id_priv->id.state = IB_CM_IDLE;
926 if (cm_id_priv->timewait_info) {
927 spin_lock_irqsave(&cm.lock, flags);
928 cm_cleanup_timewait(cm_id_priv->timewait_info);
929 spin_unlock_irqrestore(&cm.lock, flags);
930 kfree(cm_id_priv->timewait_info);
931 cm_id_priv->timewait_info = NULL;
935 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
937 struct cm_id_private *cm_id_priv;
938 struct cm_work *work;
940 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
941 retest:
942 spin_lock_irq(&cm_id_priv->lock);
943 switch (cm_id->state) {
944 case IB_CM_LISTEN:
945 spin_unlock_irq(&cm_id_priv->lock);
947 spin_lock_irq(&cm.lock);
948 if (--cm_id_priv->listen_sharecount > 0) {
949 /* The id is still shared. */
950 cm_deref_id(cm_id_priv);
951 spin_unlock_irq(&cm.lock);
952 return;
954 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
955 spin_unlock_irq(&cm.lock);
956 break;
957 case IB_CM_SIDR_REQ_SENT:
958 cm_id->state = IB_CM_IDLE;
959 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
960 spin_unlock_irq(&cm_id_priv->lock);
961 break;
962 case IB_CM_SIDR_REQ_RCVD:
963 spin_unlock_irq(&cm_id_priv->lock);
964 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
965 spin_lock_irq(&cm.lock);
966 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
967 rb_erase(&cm_id_priv->sidr_id_node,
968 &cm.remote_sidr_table);
969 spin_unlock_irq(&cm.lock);
970 break;
971 case IB_CM_REQ_SENT:
972 case IB_CM_MRA_REQ_RCVD:
973 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
974 spin_unlock_irq(&cm_id_priv->lock);
975 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
976 &cm_id_priv->id.device->node_guid,
977 sizeof cm_id_priv->id.device->node_guid,
978 NULL, 0);
979 break;
980 case IB_CM_REQ_RCVD:
981 if (err == -ENOMEM) {
982 /* Do not reject to allow future retries. */
983 cm_reset_to_idle(cm_id_priv);
984 spin_unlock_irq(&cm_id_priv->lock);
985 } else {
986 spin_unlock_irq(&cm_id_priv->lock);
987 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
988 NULL, 0, NULL, 0);
990 break;
991 case IB_CM_REP_SENT:
992 case IB_CM_MRA_REP_RCVD:
993 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
994 /* Fall through */
995 case IB_CM_MRA_REQ_SENT:
996 case IB_CM_REP_RCVD:
997 case IB_CM_MRA_REP_SENT:
998 spin_unlock_irq(&cm_id_priv->lock);
999 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1000 NULL, 0, NULL, 0);
1001 break;
1002 case IB_CM_ESTABLISHED:
1003 spin_unlock_irq(&cm_id_priv->lock);
1004 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
1005 break;
1006 ib_send_cm_dreq(cm_id, NULL, 0);
1007 goto retest;
1008 case IB_CM_DREQ_SENT:
1009 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1010 cm_enter_timewait(cm_id_priv);
1011 spin_unlock_irq(&cm_id_priv->lock);
1012 break;
1013 case IB_CM_DREQ_RCVD:
1014 spin_unlock_irq(&cm_id_priv->lock);
1015 ib_send_cm_drep(cm_id, NULL, 0);
1016 break;
1017 default:
1018 spin_unlock_irq(&cm_id_priv->lock);
1019 break;
1022 spin_lock_irq(&cm.lock);
1023 if (!list_empty(&cm_id_priv->altr_list) &&
1024 (!cm_id_priv->altr_send_port_not_ready))
1025 list_del(&cm_id_priv->altr_list);
1026 if (!list_empty(&cm_id_priv->prim_list) &&
1027 (!cm_id_priv->prim_send_port_not_ready))
1028 list_del(&cm_id_priv->prim_list);
1029 spin_unlock_irq(&cm.lock);
1031 cm_free_id(cm_id->local_id);
1032 cm_deref_id(cm_id_priv);
1033 wait_for_completion(&cm_id_priv->comp);
1034 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
1035 cm_free_work(work);
1036 kfree(cm_id_priv->private_data);
1037 kfree(cm_id_priv);
1040 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
1042 cm_destroy_id(cm_id, 0);
1044 EXPORT_SYMBOL(ib_destroy_cm_id);
1047 * __ib_cm_listen - Initiates listening on the specified service ID for
1048 * connection and service ID resolution requests.
1049 * @cm_id: Connection identifier associated with the listen request.
1050 * @service_id: Service identifier matched against incoming connection
1051 * and service ID resolution requests. The service ID should be specified
1052 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1053 * assign a service ID to the caller.
1054 * @service_mask: Mask applied to service ID used to listen across a
1055 * range of service IDs. If set to 0, the service ID is matched
1056 * exactly. This parameter is ignored if %service_id is set to
1057 * IB_CM_ASSIGN_SERVICE_ID.
1059 static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
1060 __be64 service_mask)
1062 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
1063 int ret = 0;
1065 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
1066 service_id &= service_mask;
1067 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
1068 (service_id != IB_CM_ASSIGN_SERVICE_ID))
1069 return -EINVAL;
1071 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1072 if (cm_id->state != IB_CM_IDLE)
1073 return -EINVAL;
1075 cm_id->state = IB_CM_LISTEN;
1076 ++cm_id_priv->listen_sharecount;
1078 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
1079 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
1080 cm_id->service_mask = ~cpu_to_be64(0);
1081 } else {
1082 cm_id->service_id = service_id;
1083 cm_id->service_mask = service_mask;
1085 cur_cm_id_priv = cm_insert_listen(cm_id_priv);
1087 if (cur_cm_id_priv) {
1088 cm_id->state = IB_CM_IDLE;
1089 --cm_id_priv->listen_sharecount;
1090 ret = -EBUSY;
1092 return ret;
1095 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
1097 unsigned long flags;
1098 int ret;
1100 spin_lock_irqsave(&cm.lock, flags);
1101 ret = __ib_cm_listen(cm_id, service_id, service_mask);
1102 spin_unlock_irqrestore(&cm.lock, flags);
1104 return ret;
1106 EXPORT_SYMBOL(ib_cm_listen);
1109 * Create a new listening ib_cm_id and listen on the given service ID.
1111 * If there's an existing ID listening on that same device and service ID,
1112 * return it.
1114 * @device: Device associated with the cm_id. All related communication will
1115 * be associated with the specified device.
1116 * @cm_handler: Callback invoked to notify the user of CM events.
1117 * @service_id: Service identifier matched against incoming connection
1118 * and service ID resolution requests. The service ID should be specified
1119 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1120 * assign a service ID to the caller.
1122 * Callers should call ib_destroy_cm_id when done with the listener ID.
1124 struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
1125 ib_cm_handler cm_handler,
1126 __be64 service_id)
1128 struct cm_id_private *cm_id_priv;
1129 struct ib_cm_id *cm_id;
1130 unsigned long flags;
1131 int err = 0;
1133 /* Create an ID in advance, since the creation may sleep */
1134 cm_id = ib_create_cm_id(device, cm_handler, NULL);
1135 if (IS_ERR(cm_id))
1136 return cm_id;
1138 spin_lock_irqsave(&cm.lock, flags);
1140 if (service_id == IB_CM_ASSIGN_SERVICE_ID)
1141 goto new_id;
1143 /* Find an existing ID */
1144 cm_id_priv = cm_find_listen(device, service_id);
1145 if (cm_id_priv) {
1146 if (cm_id->cm_handler != cm_handler || cm_id->context) {
1147 /* Sharing an ib_cm_id with different handlers is not
1148 * supported */
1149 spin_unlock_irqrestore(&cm.lock, flags);
1150 return ERR_PTR(-EINVAL);
1152 atomic_inc(&cm_id_priv->refcount);
1153 ++cm_id_priv->listen_sharecount;
1154 spin_unlock_irqrestore(&cm.lock, flags);
1156 ib_destroy_cm_id(cm_id);
1157 cm_id = &cm_id_priv->id;
1158 return cm_id;
1161 new_id:
1162 /* Use newly created ID */
1163 err = __ib_cm_listen(cm_id, service_id, 0);
1165 spin_unlock_irqrestore(&cm.lock, flags);
1167 if (err) {
1168 ib_destroy_cm_id(cm_id);
1169 return ERR_PTR(err);
1171 return cm_id;
1173 EXPORT_SYMBOL(ib_cm_insert_listen);
1175 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
1176 enum cm_msg_sequence msg_seq)
1178 u64 hi_tid, low_tid;
1180 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1181 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
1182 (msg_seq << 30));
1183 return cpu_to_be64(hi_tid | low_tid);
1186 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1187 __be16 attr_id, __be64 tid)
1189 hdr->base_version = IB_MGMT_BASE_VERSION;
1190 hdr->mgmt_class = IB_MGMT_CLASS_CM;
1191 hdr->class_version = IB_CM_CLASS_VERSION;
1192 hdr->method = IB_MGMT_METHOD_SEND;
1193 hdr->attr_id = attr_id;
1194 hdr->tid = tid;
1197 static void cm_format_req(struct cm_req_msg *req_msg,
1198 struct cm_id_private *cm_id_priv,
1199 struct ib_cm_req_param *param)
1201 struct sa_path_rec *pri_path = param->primary_path;
1202 struct sa_path_rec *alt_path = param->alternate_path;
1203 bool pri_ext = false;
1205 if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
1206 pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
1207 pri_path->opa.slid);
1209 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1210 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
1212 req_msg->local_comm_id = cm_id_priv->id.local_id;
1213 req_msg->service_id = param->service_id;
1214 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1215 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
1216 cm_req_set_init_depth(req_msg, param->initiator_depth);
1217 cm_req_set_remote_resp_timeout(req_msg,
1218 param->remote_cm_response_timeout);
1219 cm_req_set_qp_type(req_msg, param->qp_type);
1220 cm_req_set_flow_ctrl(req_msg, param->flow_control);
1221 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
1222 cm_req_set_local_resp_timeout(req_msg,
1223 param->local_cm_response_timeout);
1224 req_msg->pkey = param->primary_path->pkey;
1225 cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
1226 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
1228 if (param->qp_type != IB_QPT_XRC_INI) {
1229 cm_req_set_resp_res(req_msg, param->responder_resources);
1230 cm_req_set_retry_count(req_msg, param->retry_count);
1231 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
1232 cm_req_set_srq(req_msg, param->srq);
1235 req_msg->primary_local_gid = pri_path->sgid;
1236 req_msg->primary_remote_gid = pri_path->dgid;
1237 if (pri_ext) {
1238 req_msg->primary_local_gid.global.interface_id
1239 = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
1240 req_msg->primary_remote_gid.global.interface_id
1241 = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
1243 if (pri_path->hop_limit <= 1) {
1244 req_msg->primary_local_lid = pri_ext ? 0 :
1245 htons(ntohl(sa_path_get_slid(pri_path)));
1246 req_msg->primary_remote_lid = pri_ext ? 0 :
1247 htons(ntohl(sa_path_get_dlid(pri_path)));
1248 } else {
1249 /* Work-around until there's a way to obtain remote LID info */
1250 req_msg->primary_local_lid = IB_LID_PERMISSIVE;
1251 req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
1253 cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
1254 cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
1255 req_msg->primary_traffic_class = pri_path->traffic_class;
1256 req_msg->primary_hop_limit = pri_path->hop_limit;
1257 cm_req_set_primary_sl(req_msg, pri_path->sl);
1258 cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
1259 cm_req_set_primary_local_ack_timeout(req_msg,
1260 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1261 pri_path->packet_life_time));
1263 if (alt_path) {
1264 bool alt_ext = false;
1266 if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA)
1267 alt_ext = opa_is_extended_lid(alt_path->opa.dlid,
1268 alt_path->opa.slid);
1270 req_msg->alt_local_gid = alt_path->sgid;
1271 req_msg->alt_remote_gid = alt_path->dgid;
1272 if (alt_ext) {
1273 req_msg->alt_local_gid.global.interface_id
1274 = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
1275 req_msg->alt_remote_gid.global.interface_id
1276 = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
1278 if (alt_path->hop_limit <= 1) {
1279 req_msg->alt_local_lid = alt_ext ? 0 :
1280 htons(ntohl(sa_path_get_slid(alt_path)));
1281 req_msg->alt_remote_lid = alt_ext ? 0 :
1282 htons(ntohl(sa_path_get_dlid(alt_path)));
1283 } else {
1284 req_msg->alt_local_lid = IB_LID_PERMISSIVE;
1285 req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
1287 cm_req_set_alt_flow_label(req_msg,
1288 alt_path->flow_label);
1289 cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
1290 req_msg->alt_traffic_class = alt_path->traffic_class;
1291 req_msg->alt_hop_limit = alt_path->hop_limit;
1292 cm_req_set_alt_sl(req_msg, alt_path->sl);
1293 cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
1294 cm_req_set_alt_local_ack_timeout(req_msg,
1295 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1296 alt_path->packet_life_time));
1299 if (param->private_data && param->private_data_len)
1300 memcpy(req_msg->private_data, param->private_data,
1301 param->private_data_len);
1304 static int cm_validate_req_param(struct ib_cm_req_param *param)
1306 /* peer-to-peer not supported */
1307 if (param->peer_to_peer)
1308 return -EINVAL;
1310 if (!param->primary_path)
1311 return -EINVAL;
1313 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1314 param->qp_type != IB_QPT_XRC_INI)
1315 return -EINVAL;
1317 if (param->private_data &&
1318 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1319 return -EINVAL;
1321 if (param->alternate_path &&
1322 (param->alternate_path->pkey != param->primary_path->pkey ||
1323 param->alternate_path->mtu != param->primary_path->mtu))
1324 return -EINVAL;
1326 return 0;
1329 int ib_send_cm_req(struct ib_cm_id *cm_id,
1330 struct ib_cm_req_param *param)
1332 struct cm_id_private *cm_id_priv;
1333 struct cm_req_msg *req_msg;
1334 unsigned long flags;
1335 int ret;
1337 ret = cm_validate_req_param(param);
1338 if (ret)
1339 return ret;
1341 /* Verify that we're not in timewait. */
1342 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1343 spin_lock_irqsave(&cm_id_priv->lock, flags);
1344 if (cm_id->state != IB_CM_IDLE) {
1345 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1346 ret = -EINVAL;
1347 goto out;
1349 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1351 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1352 id.local_id);
1353 if (IS_ERR(cm_id_priv->timewait_info)) {
1354 ret = PTR_ERR(cm_id_priv->timewait_info);
1355 goto out;
1358 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
1359 cm_id_priv);
1360 if (ret)
1361 goto error1;
1362 if (param->alternate_path) {
1363 ret = cm_init_av_by_path(param->alternate_path,
1364 &cm_id_priv->alt_av, cm_id_priv);
1365 if (ret)
1366 goto error1;
1368 cm_id->service_id = param->service_id;
1369 cm_id->service_mask = ~cpu_to_be64(0);
1370 cm_id_priv->timeout_ms = cm_convert_to_ms(
1371 param->primary_path->packet_life_time) * 2 +
1372 cm_convert_to_ms(
1373 param->remote_cm_response_timeout);
1374 cm_id_priv->max_cm_retries = param->max_cm_retries;
1375 cm_id_priv->initiator_depth = param->initiator_depth;
1376 cm_id_priv->responder_resources = param->responder_resources;
1377 cm_id_priv->retry_count = param->retry_count;
1378 cm_id_priv->path_mtu = param->primary_path->mtu;
1379 cm_id_priv->pkey = param->primary_path->pkey;
1380 cm_id_priv->qp_type = param->qp_type;
1382 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1383 if (ret)
1384 goto error1;
1386 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1387 cm_format_req(req_msg, cm_id_priv, param);
1388 cm_id_priv->tid = req_msg->hdr.tid;
1389 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1390 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1392 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1393 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1395 spin_lock_irqsave(&cm_id_priv->lock, flags);
1396 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1397 if (ret) {
1398 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1399 goto error2;
1401 BUG_ON(cm_id->state != IB_CM_IDLE);
1402 cm_id->state = IB_CM_REQ_SENT;
1403 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1404 return 0;
1406 error2: cm_free_msg(cm_id_priv->msg);
1407 error1: kfree(cm_id_priv->timewait_info);
1408 out: return ret;
1410 EXPORT_SYMBOL(ib_send_cm_req);
1412 static int cm_issue_rej(struct cm_port *port,
1413 struct ib_mad_recv_wc *mad_recv_wc,
1414 enum ib_cm_rej_reason reason,
1415 enum cm_msg_response msg_rejected,
1416 void *ari, u8 ari_length)
1418 struct ib_mad_send_buf *msg = NULL;
1419 struct cm_rej_msg *rej_msg, *rcv_msg;
1420 int ret;
1422 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1423 if (ret)
1424 return ret;
1426 /* We just need common CM header information. Cast to any message. */
1427 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1428 rej_msg = (struct cm_rej_msg *) msg->mad;
1430 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1431 rej_msg->remote_comm_id = rcv_msg->local_comm_id;
1432 rej_msg->local_comm_id = rcv_msg->remote_comm_id;
1433 cm_rej_set_msg_rejected(rej_msg, msg_rejected);
1434 rej_msg->reason = cpu_to_be16(reason);
1436 if (ari && ari_length) {
1437 cm_rej_set_reject_info_len(rej_msg, ari_length);
1438 memcpy(rej_msg->ari, ari, ari_length);
1441 ret = ib_post_send_mad(msg, NULL);
1442 if (ret)
1443 cm_free_msg(msg);
1445 return ret;
1448 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1449 __be32 local_qpn, __be32 remote_qpn)
1451 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1452 ((local_ca_guid == remote_ca_guid) &&
1453 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1456 static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
1458 return ((req_msg->alt_local_lid) ||
1459 (ib_is_opa_gid(&req_msg->alt_local_gid)));
1462 static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num,
1463 struct sa_path_rec *path, union ib_gid *gid)
1465 if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
1466 path->rec_type = SA_PATH_REC_TYPE_OPA;
1467 else
1468 path->rec_type = SA_PATH_REC_TYPE_IB;
1471 static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
1472 struct sa_path_rec *primary_path,
1473 struct sa_path_rec *alt_path)
1475 u32 lid;
1477 if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1478 sa_path_set_dlid(primary_path,
1479 ntohs(req_msg->primary_local_lid));
1480 sa_path_set_slid(primary_path,
1481 ntohs(req_msg->primary_remote_lid));
1482 } else {
1483 lid = opa_get_lid_from_gid(&req_msg->primary_local_gid);
1484 sa_path_set_dlid(primary_path, lid);
1486 lid = opa_get_lid_from_gid(&req_msg->primary_remote_gid);
1487 sa_path_set_slid(primary_path, lid);
1490 if (!cm_req_has_alt_path(req_msg))
1491 return;
1493 if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1494 sa_path_set_dlid(alt_path, ntohs(req_msg->alt_local_lid));
1495 sa_path_set_slid(alt_path, ntohs(req_msg->alt_remote_lid));
1496 } else {
1497 lid = opa_get_lid_from_gid(&req_msg->alt_local_gid);
1498 sa_path_set_dlid(alt_path, lid);
1500 lid = opa_get_lid_from_gid(&req_msg->alt_remote_gid);
1501 sa_path_set_slid(alt_path, lid);
1505 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1506 struct sa_path_rec *primary_path,
1507 struct sa_path_rec *alt_path)
1509 primary_path->dgid = req_msg->primary_local_gid;
1510 primary_path->sgid = req_msg->primary_remote_gid;
1511 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1512 primary_path->hop_limit = req_msg->primary_hop_limit;
1513 primary_path->traffic_class = req_msg->primary_traffic_class;
1514 primary_path->reversible = 1;
1515 primary_path->pkey = req_msg->pkey;
1516 primary_path->sl = cm_req_get_primary_sl(req_msg);
1517 primary_path->mtu_selector = IB_SA_EQ;
1518 primary_path->mtu = cm_req_get_path_mtu(req_msg);
1519 primary_path->rate_selector = IB_SA_EQ;
1520 primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1521 primary_path->packet_life_time_selector = IB_SA_EQ;
1522 primary_path->packet_life_time =
1523 cm_req_get_primary_local_ack_timeout(req_msg);
1524 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1525 primary_path->service_id = req_msg->service_id;
1527 if (cm_req_has_alt_path(req_msg)) {
1528 alt_path->dgid = req_msg->alt_local_gid;
1529 alt_path->sgid = req_msg->alt_remote_gid;
1530 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1531 alt_path->hop_limit = req_msg->alt_hop_limit;
1532 alt_path->traffic_class = req_msg->alt_traffic_class;
1533 alt_path->reversible = 1;
1534 alt_path->pkey = req_msg->pkey;
1535 alt_path->sl = cm_req_get_alt_sl(req_msg);
1536 alt_path->mtu_selector = IB_SA_EQ;
1537 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1538 alt_path->rate_selector = IB_SA_EQ;
1539 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1540 alt_path->packet_life_time_selector = IB_SA_EQ;
1541 alt_path->packet_life_time =
1542 cm_req_get_alt_local_ack_timeout(req_msg);
1543 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1544 alt_path->service_id = req_msg->service_id;
1546 cm_format_path_lid_from_req(req_msg, primary_path, alt_path);
1549 static u16 cm_get_bth_pkey(struct cm_work *work)
1551 struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1552 u8 port_num = work->port->port_num;
1553 u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1554 u16 pkey;
1555 int ret;
1557 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1558 if (ret) {
1559 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
1560 port_num, pkey_index, ret);
1561 return 0;
1564 return pkey;
1568 * Convert OPA SGID to IB SGID
1569 * ULPs (such as IPoIB) do not understand OPA GIDs and will
1570 * reject them as the local_gid will not match the sgid. Therefore,
1571 * change the pathrec's SGID to an IB SGID.
1573 * @work: Work completion
1574 * @path: Path record
1576 static void cm_opa_to_ib_sgid(struct cm_work *work,
1577 struct sa_path_rec *path)
1579 struct ib_device *dev = work->port->cm_dev->ib_device;
1580 u8 port_num = work->port->port_num;
1582 if (rdma_cap_opa_ah(dev, port_num) &&
1583 (ib_is_opa_gid(&path->sgid))) {
1584 union ib_gid sgid;
1586 if (ib_get_cached_gid(dev, port_num, 0, &sgid, NULL)) {
1587 dev_warn(&dev->dev,
1588 "Error updating sgid in CM request\n");
1589 return;
1592 path->sgid = sgid;
1596 static void cm_format_req_event(struct cm_work *work,
1597 struct cm_id_private *cm_id_priv,
1598 struct ib_cm_id *listen_id)
1600 struct cm_req_msg *req_msg;
1601 struct ib_cm_req_event_param *param;
1603 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1604 param = &work->cm_event.param.req_rcvd;
1605 param->listen_id = listen_id;
1606 param->bth_pkey = cm_get_bth_pkey(work);
1607 param->port = cm_id_priv->av.port->port_num;
1608 param->primary_path = &work->path[0];
1609 cm_opa_to_ib_sgid(work, param->primary_path);
1610 if (cm_req_has_alt_path(req_msg)) {
1611 param->alternate_path = &work->path[1];
1612 cm_opa_to_ib_sgid(work, param->alternate_path);
1613 } else {
1614 param->alternate_path = NULL;
1616 param->remote_ca_guid = req_msg->local_ca_guid;
1617 param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1618 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1619 param->qp_type = cm_req_get_qp_type(req_msg);
1620 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1621 param->responder_resources = cm_req_get_init_depth(req_msg);
1622 param->initiator_depth = cm_req_get_resp_res(req_msg);
1623 param->local_cm_response_timeout =
1624 cm_req_get_remote_resp_timeout(req_msg);
1625 param->flow_control = cm_req_get_flow_ctrl(req_msg);
1626 param->remote_cm_response_timeout =
1627 cm_req_get_local_resp_timeout(req_msg);
1628 param->retry_count = cm_req_get_retry_count(req_msg);
1629 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1630 param->srq = cm_req_get_srq(req_msg);
1631 work->cm_event.private_data = &req_msg->private_data;
1634 static void cm_process_work(struct cm_id_private *cm_id_priv,
1635 struct cm_work *work)
1637 int ret;
1639 /* We will typically only have the current event to report. */
1640 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1641 cm_free_work(work);
1643 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1644 spin_lock_irq(&cm_id_priv->lock);
1645 work = cm_dequeue_work(cm_id_priv);
1646 spin_unlock_irq(&cm_id_priv->lock);
1647 BUG_ON(!work);
1648 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1649 &work->cm_event);
1650 cm_free_work(work);
1652 cm_deref_id(cm_id_priv);
1653 if (ret)
1654 cm_destroy_id(&cm_id_priv->id, ret);
1657 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1658 struct cm_id_private *cm_id_priv,
1659 enum cm_msg_response msg_mraed, u8 service_timeout,
1660 const void *private_data, u8 private_data_len)
1662 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1663 cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1664 mra_msg->local_comm_id = cm_id_priv->id.local_id;
1665 mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1666 cm_mra_set_service_timeout(mra_msg, service_timeout);
1668 if (private_data && private_data_len)
1669 memcpy(mra_msg->private_data, private_data, private_data_len);
1672 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1673 struct cm_id_private *cm_id_priv,
1674 enum ib_cm_rej_reason reason,
1675 void *ari,
1676 u8 ari_length,
1677 const void *private_data,
1678 u8 private_data_len)
1680 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1681 rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1683 switch(cm_id_priv->id.state) {
1684 case IB_CM_REQ_RCVD:
1685 rej_msg->local_comm_id = 0;
1686 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1687 break;
1688 case IB_CM_MRA_REQ_SENT:
1689 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1690 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1691 break;
1692 case IB_CM_REP_RCVD:
1693 case IB_CM_MRA_REP_SENT:
1694 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1695 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1696 break;
1697 default:
1698 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1699 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1700 break;
1703 rej_msg->reason = cpu_to_be16(reason);
1704 if (ari && ari_length) {
1705 cm_rej_set_reject_info_len(rej_msg, ari_length);
1706 memcpy(rej_msg->ari, ari, ari_length);
1709 if (private_data && private_data_len)
1710 memcpy(rej_msg->private_data, private_data, private_data_len);
1713 static void cm_dup_req_handler(struct cm_work *work,
1714 struct cm_id_private *cm_id_priv)
1716 struct ib_mad_send_buf *msg = NULL;
1717 int ret;
1719 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1720 counter[CM_REQ_COUNTER]);
1722 /* Quick state check to discard duplicate REQs. */
1723 if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1724 return;
1726 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1727 if (ret)
1728 return;
1730 spin_lock_irq(&cm_id_priv->lock);
1731 switch (cm_id_priv->id.state) {
1732 case IB_CM_MRA_REQ_SENT:
1733 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1734 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1735 cm_id_priv->private_data,
1736 cm_id_priv->private_data_len);
1737 break;
1738 case IB_CM_TIMEWAIT:
1739 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1740 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1741 break;
1742 default:
1743 goto unlock;
1745 spin_unlock_irq(&cm_id_priv->lock);
1747 ret = ib_post_send_mad(msg, NULL);
1748 if (ret)
1749 goto free;
1750 return;
1752 unlock: spin_unlock_irq(&cm_id_priv->lock);
1753 free: cm_free_msg(msg);
1756 static struct cm_id_private * cm_match_req(struct cm_work *work,
1757 struct cm_id_private *cm_id_priv)
1759 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1760 struct cm_timewait_info *timewait_info;
1761 struct cm_req_msg *req_msg;
1762 struct ib_cm_id *cm_id;
1764 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1766 /* Check for possible duplicate REQ. */
1767 spin_lock_irq(&cm.lock);
1768 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1769 if (timewait_info) {
1770 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1771 timewait_info->work.remote_id);
1772 spin_unlock_irq(&cm.lock);
1773 if (cur_cm_id_priv) {
1774 cm_dup_req_handler(work, cur_cm_id_priv);
1775 cm_deref_id(cur_cm_id_priv);
1777 return NULL;
1780 /* Check for stale connections. */
1781 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1782 if (timewait_info) {
1783 cm_cleanup_timewait(cm_id_priv->timewait_info);
1784 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1785 timewait_info->work.remote_id);
1787 spin_unlock_irq(&cm.lock);
1788 cm_issue_rej(work->port, work->mad_recv_wc,
1789 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1790 NULL, 0);
1791 if (cur_cm_id_priv) {
1792 cm_id = &cur_cm_id_priv->id;
1793 ib_send_cm_dreq(cm_id, NULL, 0);
1794 cm_deref_id(cur_cm_id_priv);
1796 return NULL;
1799 /* Find matching listen request. */
1800 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1801 req_msg->service_id);
1802 if (!listen_cm_id_priv) {
1803 cm_cleanup_timewait(cm_id_priv->timewait_info);
1804 spin_unlock_irq(&cm.lock);
1805 cm_issue_rej(work->port, work->mad_recv_wc,
1806 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1807 NULL, 0);
1808 goto out;
1810 atomic_inc(&listen_cm_id_priv->refcount);
1811 atomic_inc(&cm_id_priv->refcount);
1812 cm_id_priv->id.state = IB_CM_REQ_RCVD;
1813 atomic_inc(&cm_id_priv->work_count);
1814 spin_unlock_irq(&cm.lock);
1815 out:
1816 return listen_cm_id_priv;
1820 * Work-around for inter-subnet connections. If the LIDs are permissive,
1821 * we need to override the LID/SL data in the REQ with the LID information
1822 * in the work completion.
1824 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
1826 if (!cm_req_get_primary_subnet_local(req_msg)) {
1827 if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
1828 req_msg->primary_local_lid = ib_lid_be16(wc->slid);
1829 cm_req_set_primary_sl(req_msg, wc->sl);
1832 if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
1833 req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1836 if (!cm_req_get_alt_subnet_local(req_msg)) {
1837 if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
1838 req_msg->alt_local_lid = ib_lid_be16(wc->slid);
1839 cm_req_set_alt_sl(req_msg, wc->sl);
1842 if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
1843 req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1847 static int cm_req_handler(struct cm_work *work)
1849 struct ib_cm_id *cm_id;
1850 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1851 struct cm_req_msg *req_msg;
1852 union ib_gid gid;
1853 struct ib_gid_attr gid_attr;
1854 const struct ib_global_route *grh;
1855 int ret;
1857 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1859 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
1860 if (IS_ERR(cm_id))
1861 return PTR_ERR(cm_id);
1863 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1864 cm_id_priv->id.remote_id = req_msg->local_comm_id;
1865 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1866 work->mad_recv_wc->recv_buf.grh,
1867 &cm_id_priv->av);
1868 if (ret)
1869 goto destroy;
1870 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1871 id.local_id);
1872 if (IS_ERR(cm_id_priv->timewait_info)) {
1873 ret = PTR_ERR(cm_id_priv->timewait_info);
1874 goto destroy;
1876 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1877 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1878 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1880 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1881 if (!listen_cm_id_priv) {
1882 pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__,
1883 be32_to_cpu(cm_id->local_id));
1884 ret = -EINVAL;
1885 goto free_timeinfo;
1888 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1889 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1890 cm_id_priv->id.service_id = req_msg->service_id;
1891 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1893 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1895 memset(&work->path[0], 0, sizeof(work->path[0]));
1896 if (cm_req_has_alt_path(req_msg))
1897 memset(&work->path[1], 0, sizeof(work->path[1]));
1898 grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
1899 ret = ib_get_cached_gid(work->port->cm_dev->ib_device,
1900 work->port->port_num,
1901 grh->sgid_index,
1902 &gid, &gid_attr);
1903 if (ret) {
1904 ib_send_cm_rej(cm_id, IB_CM_REJ_UNSUPPORTED, NULL, 0, NULL, 0);
1905 goto rejected;
1908 if (gid_attr.ndev) {
1909 work->path[0].rec_type =
1910 sa_conv_gid_to_pathrec_type(gid_attr.gid_type);
1911 sa_path_set_ifindex(&work->path[0],
1912 gid_attr.ndev->ifindex);
1913 sa_path_set_ndev(&work->path[0],
1914 dev_net(gid_attr.ndev));
1915 dev_put(gid_attr.ndev);
1916 } else {
1917 cm_path_set_rec_type(work->port->cm_dev->ib_device,
1918 work->port->port_num,
1919 &work->path[0],
1920 &req_msg->primary_local_gid);
1922 if (cm_req_has_alt_path(req_msg))
1923 work->path[1].rec_type = work->path[0].rec_type;
1924 cm_format_paths_from_req(req_msg, &work->path[0],
1925 &work->path[1]);
1926 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
1927 sa_path_set_dmac(&work->path[0],
1928 cm_id_priv->av.ah_attr.roce.dmac);
1929 work->path[0].hop_limit = grh->hop_limit;
1930 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
1931 cm_id_priv);
1932 if (ret) {
1933 int err;
1935 err = ib_get_cached_gid(work->port->cm_dev->ib_device,
1936 work->port->port_num, 0,
1937 &work->path[0].sgid,
1938 NULL);
1939 if (err)
1940 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
1941 NULL, 0, NULL, 0);
1942 else
1943 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
1944 &work->path[0].sgid,
1945 sizeof(work->path[0].sgid),
1946 NULL, 0);
1947 goto rejected;
1949 if (cm_req_has_alt_path(req_msg)) {
1950 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
1951 cm_id_priv);
1952 if (ret) {
1953 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
1954 &work->path[0].sgid,
1955 sizeof(work->path[0].sgid), NULL, 0);
1956 goto rejected;
1959 cm_id_priv->tid = req_msg->hdr.tid;
1960 cm_id_priv->timeout_ms = cm_convert_to_ms(
1961 cm_req_get_local_resp_timeout(req_msg));
1962 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1963 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1964 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1965 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1966 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1967 cm_id_priv->pkey = req_msg->pkey;
1968 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1969 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1970 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1971 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
1973 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1974 cm_process_work(cm_id_priv, work);
1975 cm_deref_id(listen_cm_id_priv);
1976 return 0;
1978 rejected:
1979 atomic_dec(&cm_id_priv->refcount);
1980 cm_deref_id(listen_cm_id_priv);
1981 free_timeinfo:
1982 kfree(cm_id_priv->timewait_info);
1983 destroy:
1984 ib_destroy_cm_id(cm_id);
1985 return ret;
1988 static void cm_format_rep(struct cm_rep_msg *rep_msg,
1989 struct cm_id_private *cm_id_priv,
1990 struct ib_cm_rep_param *param)
1992 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1993 rep_msg->local_comm_id = cm_id_priv->id.local_id;
1994 rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1995 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1996 rep_msg->resp_resources = param->responder_resources;
1997 cm_rep_set_target_ack_delay(rep_msg,
1998 cm_id_priv->av.port->cm_dev->ack_delay);
1999 cm_rep_set_failover(rep_msg, param->failover_accepted);
2000 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
2001 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
2003 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
2004 rep_msg->initiator_depth = param->initiator_depth;
2005 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
2006 cm_rep_set_srq(rep_msg, param->srq);
2007 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
2008 } else {
2009 cm_rep_set_srq(rep_msg, 1);
2010 cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
2013 if (param->private_data && param->private_data_len)
2014 memcpy(rep_msg->private_data, param->private_data,
2015 param->private_data_len);
2018 int ib_send_cm_rep(struct ib_cm_id *cm_id,
2019 struct ib_cm_rep_param *param)
2021 struct cm_id_private *cm_id_priv;
2022 struct ib_mad_send_buf *msg;
2023 struct cm_rep_msg *rep_msg;
2024 unsigned long flags;
2025 int ret;
2027 if (param->private_data &&
2028 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
2029 return -EINVAL;
2031 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2032 spin_lock_irqsave(&cm_id_priv->lock, flags);
2033 if (cm_id->state != IB_CM_REQ_RCVD &&
2034 cm_id->state != IB_CM_MRA_REQ_SENT) {
2035 pr_debug("%s: local_comm_id %d, cm_id->state: %d\n", __func__,
2036 be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
2037 ret = -EINVAL;
2038 goto out;
2041 ret = cm_alloc_msg(cm_id_priv, &msg);
2042 if (ret)
2043 goto out;
2045 rep_msg = (struct cm_rep_msg *) msg->mad;
2046 cm_format_rep(rep_msg, cm_id_priv, param);
2047 msg->timeout_ms = cm_id_priv->timeout_ms;
2048 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
2050 ret = ib_post_send_mad(msg, NULL);
2051 if (ret) {
2052 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2053 cm_free_msg(msg);
2054 return ret;
2057 cm_id->state = IB_CM_REP_SENT;
2058 cm_id_priv->msg = msg;
2059 cm_id_priv->initiator_depth = param->initiator_depth;
2060 cm_id_priv->responder_resources = param->responder_resources;
2061 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
2062 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
2064 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2065 return ret;
2067 EXPORT_SYMBOL(ib_send_cm_rep);
2069 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
2070 struct cm_id_private *cm_id_priv,
2071 const void *private_data,
2072 u8 private_data_len)
2074 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
2075 rtu_msg->local_comm_id = cm_id_priv->id.local_id;
2076 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
2078 if (private_data && private_data_len)
2079 memcpy(rtu_msg->private_data, private_data, private_data_len);
2082 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
2083 const void *private_data,
2084 u8 private_data_len)
2086 struct cm_id_private *cm_id_priv;
2087 struct ib_mad_send_buf *msg;
2088 unsigned long flags;
2089 void *data;
2090 int ret;
2092 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
2093 return -EINVAL;
2095 data = cm_copy_private_data(private_data, private_data_len);
2096 if (IS_ERR(data))
2097 return PTR_ERR(data);
2099 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2100 spin_lock_irqsave(&cm_id_priv->lock, flags);
2101 if (cm_id->state != IB_CM_REP_RCVD &&
2102 cm_id->state != IB_CM_MRA_REP_SENT) {
2103 pr_debug("%s: local_id %d, cm_id->state %d\n", __func__,
2104 be32_to_cpu(cm_id->local_id), cm_id->state);
2105 ret = -EINVAL;
2106 goto error;
2109 ret = cm_alloc_msg(cm_id_priv, &msg);
2110 if (ret)
2111 goto error;
2113 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2114 private_data, private_data_len);
2116 ret = ib_post_send_mad(msg, NULL);
2117 if (ret) {
2118 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2119 cm_free_msg(msg);
2120 kfree(data);
2121 return ret;
2124 cm_id->state = IB_CM_ESTABLISHED;
2125 cm_set_private_data(cm_id_priv, data, private_data_len);
2126 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2127 return 0;
2129 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2130 kfree(data);
2131 return ret;
2133 EXPORT_SYMBOL(ib_send_cm_rtu);
2135 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
2137 struct cm_rep_msg *rep_msg;
2138 struct ib_cm_rep_event_param *param;
2140 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2141 param = &work->cm_event.param.rep_rcvd;
2142 param->remote_ca_guid = rep_msg->local_ca_guid;
2143 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
2144 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
2145 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
2146 param->responder_resources = rep_msg->initiator_depth;
2147 param->initiator_depth = rep_msg->resp_resources;
2148 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
2149 param->failover_accepted = cm_rep_get_failover(rep_msg);
2150 param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
2151 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
2152 param->srq = cm_rep_get_srq(rep_msg);
2153 work->cm_event.private_data = &rep_msg->private_data;
2156 static void cm_dup_rep_handler(struct cm_work *work)
2158 struct cm_id_private *cm_id_priv;
2159 struct cm_rep_msg *rep_msg;
2160 struct ib_mad_send_buf *msg = NULL;
2161 int ret;
2163 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
2164 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
2165 rep_msg->local_comm_id);
2166 if (!cm_id_priv)
2167 return;
2169 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2170 counter[CM_REP_COUNTER]);
2171 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
2172 if (ret)
2173 goto deref;
2175 spin_lock_irq(&cm_id_priv->lock);
2176 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
2177 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2178 cm_id_priv->private_data,
2179 cm_id_priv->private_data_len);
2180 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
2181 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2182 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
2183 cm_id_priv->private_data,
2184 cm_id_priv->private_data_len);
2185 else
2186 goto unlock;
2187 spin_unlock_irq(&cm_id_priv->lock);
2189 ret = ib_post_send_mad(msg, NULL);
2190 if (ret)
2191 goto free;
2192 goto deref;
2194 unlock: spin_unlock_irq(&cm_id_priv->lock);
2195 free: cm_free_msg(msg);
2196 deref: cm_deref_id(cm_id_priv);
2199 static int cm_rep_handler(struct cm_work *work)
2201 struct cm_id_private *cm_id_priv;
2202 struct cm_rep_msg *rep_msg;
2203 int ret;
2204 struct cm_id_private *cur_cm_id_priv;
2205 struct ib_cm_id *cm_id;
2206 struct cm_timewait_info *timewait_info;
2208 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2209 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
2210 if (!cm_id_priv) {
2211 cm_dup_rep_handler(work);
2212 pr_debug("%s: remote_comm_id %d, no cm_id_priv\n", __func__,
2213 be32_to_cpu(rep_msg->remote_comm_id));
2214 return -EINVAL;
2217 cm_format_rep_event(work, cm_id_priv->qp_type);
2219 spin_lock_irq(&cm_id_priv->lock);
2220 switch (cm_id_priv->id.state) {
2221 case IB_CM_REQ_SENT:
2222 case IB_CM_MRA_REQ_RCVD:
2223 break;
2224 default:
2225 spin_unlock_irq(&cm_id_priv->lock);
2226 ret = -EINVAL;
2227 pr_debug("%s: cm_id_priv->id.state: %d, local_comm_id %d, remote_comm_id %d\n",
2228 __func__, cm_id_priv->id.state,
2229 be32_to_cpu(rep_msg->local_comm_id),
2230 be32_to_cpu(rep_msg->remote_comm_id));
2231 goto error;
2234 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
2235 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
2236 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2238 spin_lock(&cm.lock);
2239 /* Check for duplicate REP. */
2240 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
2241 spin_unlock(&cm.lock);
2242 spin_unlock_irq(&cm_id_priv->lock);
2243 ret = -EINVAL;
2244 pr_debug("%s: Failed to insert remote id %d\n", __func__,
2245 be32_to_cpu(rep_msg->remote_comm_id));
2246 goto error;
2248 /* Check for a stale connection. */
2249 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
2250 if (timewait_info) {
2251 rb_erase(&cm_id_priv->timewait_info->remote_id_node,
2252 &cm.remote_id_table);
2253 cm_id_priv->timewait_info->inserted_remote_id = 0;
2254 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
2255 timewait_info->work.remote_id);
2257 spin_unlock(&cm.lock);
2258 spin_unlock_irq(&cm_id_priv->lock);
2259 cm_issue_rej(work->port, work->mad_recv_wc,
2260 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
2261 NULL, 0);
2262 ret = -EINVAL;
2263 pr_debug("%s: Stale connection. local_comm_id %d, remote_comm_id %d\n",
2264 __func__, be32_to_cpu(rep_msg->local_comm_id),
2265 be32_to_cpu(rep_msg->remote_comm_id));
2267 if (cur_cm_id_priv) {
2268 cm_id = &cur_cm_id_priv->id;
2269 ib_send_cm_dreq(cm_id, NULL, 0);
2270 cm_deref_id(cur_cm_id_priv);
2273 goto error;
2275 spin_unlock(&cm.lock);
2277 cm_id_priv->id.state = IB_CM_REP_RCVD;
2278 cm_id_priv->id.remote_id = rep_msg->local_comm_id;
2279 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2280 cm_id_priv->initiator_depth = rep_msg->resp_resources;
2281 cm_id_priv->responder_resources = rep_msg->initiator_depth;
2282 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
2283 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
2284 cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
2285 cm_id_priv->av.timeout =
2286 cm_ack_timeout(cm_id_priv->target_ack_delay,
2287 cm_id_priv->av.timeout - 1);
2288 cm_id_priv->alt_av.timeout =
2289 cm_ack_timeout(cm_id_priv->target_ack_delay,
2290 cm_id_priv->alt_av.timeout - 1);
2292 /* todo: handle peer_to_peer */
2294 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2295 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2296 if (!ret)
2297 list_add_tail(&work->list, &cm_id_priv->work_list);
2298 spin_unlock_irq(&cm_id_priv->lock);
2300 if (ret)
2301 cm_process_work(cm_id_priv, work);
2302 else
2303 cm_deref_id(cm_id_priv);
2304 return 0;
2306 error:
2307 cm_deref_id(cm_id_priv);
2308 return ret;
2311 static int cm_establish_handler(struct cm_work *work)
2313 struct cm_id_private *cm_id_priv;
2314 int ret;
2316 /* See comment in cm_establish about lookup. */
2317 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
2318 if (!cm_id_priv)
2319 return -EINVAL;
2321 spin_lock_irq(&cm_id_priv->lock);
2322 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2323 spin_unlock_irq(&cm_id_priv->lock);
2324 goto out;
2327 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2328 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2329 if (!ret)
2330 list_add_tail(&work->list, &cm_id_priv->work_list);
2331 spin_unlock_irq(&cm_id_priv->lock);
2333 if (ret)
2334 cm_process_work(cm_id_priv, work);
2335 else
2336 cm_deref_id(cm_id_priv);
2337 return 0;
2338 out:
2339 cm_deref_id(cm_id_priv);
2340 return -EINVAL;
2343 static int cm_rtu_handler(struct cm_work *work)
2345 struct cm_id_private *cm_id_priv;
2346 struct cm_rtu_msg *rtu_msg;
2347 int ret;
2349 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2350 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
2351 rtu_msg->local_comm_id);
2352 if (!cm_id_priv)
2353 return -EINVAL;
2355 work->cm_event.private_data = &rtu_msg->private_data;
2357 spin_lock_irq(&cm_id_priv->lock);
2358 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2359 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2360 spin_unlock_irq(&cm_id_priv->lock);
2361 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2362 counter[CM_RTU_COUNTER]);
2363 goto out;
2365 cm_id_priv->id.state = IB_CM_ESTABLISHED;
2367 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2368 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2369 if (!ret)
2370 list_add_tail(&work->list, &cm_id_priv->work_list);
2371 spin_unlock_irq(&cm_id_priv->lock);
2373 if (ret)
2374 cm_process_work(cm_id_priv, work);
2375 else
2376 cm_deref_id(cm_id_priv);
2377 return 0;
2378 out:
2379 cm_deref_id(cm_id_priv);
2380 return -EINVAL;
2383 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2384 struct cm_id_private *cm_id_priv,
2385 const void *private_data,
2386 u8 private_data_len)
2388 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2389 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
2390 dreq_msg->local_comm_id = cm_id_priv->id.local_id;
2391 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
2392 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
2394 if (private_data && private_data_len)
2395 memcpy(dreq_msg->private_data, private_data, private_data_len);
2398 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
2399 const void *private_data,
2400 u8 private_data_len)
2402 struct cm_id_private *cm_id_priv;
2403 struct ib_mad_send_buf *msg;
2404 unsigned long flags;
2405 int ret;
2407 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2408 return -EINVAL;
2410 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2411 spin_lock_irqsave(&cm_id_priv->lock, flags);
2412 if (cm_id->state != IB_CM_ESTABLISHED) {
2413 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
2414 be32_to_cpu(cm_id->local_id), cm_id->state);
2415 ret = -EINVAL;
2416 goto out;
2419 if (cm_id->lap_state == IB_CM_LAP_SENT ||
2420 cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
2421 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2423 ret = cm_alloc_msg(cm_id_priv, &msg);
2424 if (ret) {
2425 cm_enter_timewait(cm_id_priv);
2426 goto out;
2429 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2430 private_data, private_data_len);
2431 msg->timeout_ms = cm_id_priv->timeout_ms;
2432 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2434 ret = ib_post_send_mad(msg, NULL);
2435 if (ret) {
2436 cm_enter_timewait(cm_id_priv);
2437 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2438 cm_free_msg(msg);
2439 return ret;
2442 cm_id->state = IB_CM_DREQ_SENT;
2443 cm_id_priv->msg = msg;
2444 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2445 return ret;
2447 EXPORT_SYMBOL(ib_send_cm_dreq);
2449 static void cm_format_drep(struct cm_drep_msg *drep_msg,
2450 struct cm_id_private *cm_id_priv,
2451 const void *private_data,
2452 u8 private_data_len)
2454 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2455 drep_msg->local_comm_id = cm_id_priv->id.local_id;
2456 drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
2458 if (private_data && private_data_len)
2459 memcpy(drep_msg->private_data, private_data, private_data_len);
2462 int ib_send_cm_drep(struct ib_cm_id *cm_id,
2463 const void *private_data,
2464 u8 private_data_len)
2466 struct cm_id_private *cm_id_priv;
2467 struct ib_mad_send_buf *msg;
2468 unsigned long flags;
2469 void *data;
2470 int ret;
2472 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2473 return -EINVAL;
2475 data = cm_copy_private_data(private_data, private_data_len);
2476 if (IS_ERR(data))
2477 return PTR_ERR(data);
2479 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2480 spin_lock_irqsave(&cm_id_priv->lock, flags);
2481 if (cm_id->state != IB_CM_DREQ_RCVD) {
2482 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2483 kfree(data);
2484 pr_debug("%s: local_id %d, cm_idcm_id->state(%d) != IB_CM_DREQ_RCVD\n",
2485 __func__, be32_to_cpu(cm_id->local_id), cm_id->state);
2486 return -EINVAL;
2489 cm_set_private_data(cm_id_priv, data, private_data_len);
2490 cm_enter_timewait(cm_id_priv);
2492 ret = cm_alloc_msg(cm_id_priv, &msg);
2493 if (ret)
2494 goto out;
2496 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2497 private_data, private_data_len);
2499 ret = ib_post_send_mad(msg, NULL);
2500 if (ret) {
2501 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2502 cm_free_msg(msg);
2503 return ret;
2506 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2507 return ret;
2509 EXPORT_SYMBOL(ib_send_cm_drep);
2511 static int cm_issue_drep(struct cm_port *port,
2512 struct ib_mad_recv_wc *mad_recv_wc)
2514 struct ib_mad_send_buf *msg = NULL;
2515 struct cm_dreq_msg *dreq_msg;
2516 struct cm_drep_msg *drep_msg;
2517 int ret;
2519 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2520 if (ret)
2521 return ret;
2523 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2524 drep_msg = (struct cm_drep_msg *) msg->mad;
2526 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2527 drep_msg->remote_comm_id = dreq_msg->local_comm_id;
2528 drep_msg->local_comm_id = dreq_msg->remote_comm_id;
2530 ret = ib_post_send_mad(msg, NULL);
2531 if (ret)
2532 cm_free_msg(msg);
2534 return ret;
2537 static int cm_dreq_handler(struct cm_work *work)
2539 struct cm_id_private *cm_id_priv;
2540 struct cm_dreq_msg *dreq_msg;
2541 struct ib_mad_send_buf *msg = NULL;
2542 int ret;
2544 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2545 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
2546 dreq_msg->local_comm_id);
2547 if (!cm_id_priv) {
2548 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2549 counter[CM_DREQ_COUNTER]);
2550 cm_issue_drep(work->port, work->mad_recv_wc);
2551 pr_debug("%s: no cm_id_priv, local_comm_id %d, remote_comm_id %d\n",
2552 __func__, be32_to_cpu(dreq_msg->local_comm_id),
2553 be32_to_cpu(dreq_msg->remote_comm_id));
2554 return -EINVAL;
2557 work->cm_event.private_data = &dreq_msg->private_data;
2559 spin_lock_irq(&cm_id_priv->lock);
2560 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
2561 goto unlock;
2563 switch (cm_id_priv->id.state) {
2564 case IB_CM_REP_SENT:
2565 case IB_CM_DREQ_SENT:
2566 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2567 break;
2568 case IB_CM_ESTABLISHED:
2569 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2570 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2571 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2572 break;
2573 case IB_CM_MRA_REP_RCVD:
2574 break;
2575 case IB_CM_TIMEWAIT:
2576 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2577 counter[CM_DREQ_COUNTER]);
2578 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
2579 if (IS_ERR(msg))
2580 goto unlock;
2582 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2583 cm_id_priv->private_data,
2584 cm_id_priv->private_data_len);
2585 spin_unlock_irq(&cm_id_priv->lock);
2587 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
2588 ib_post_send_mad(msg, NULL))
2589 cm_free_msg(msg);
2590 goto deref;
2591 case IB_CM_DREQ_RCVD:
2592 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2593 counter[CM_DREQ_COUNTER]);
2594 goto unlock;
2595 default:
2596 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
2597 __func__, be32_to_cpu(cm_id_priv->id.local_id),
2598 cm_id_priv->id.state);
2599 goto unlock;
2601 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2602 cm_id_priv->tid = dreq_msg->hdr.tid;
2603 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2604 if (!ret)
2605 list_add_tail(&work->list, &cm_id_priv->work_list);
2606 spin_unlock_irq(&cm_id_priv->lock);
2608 if (ret)
2609 cm_process_work(cm_id_priv, work);
2610 else
2611 cm_deref_id(cm_id_priv);
2612 return 0;
2614 unlock: spin_unlock_irq(&cm_id_priv->lock);
2615 deref: cm_deref_id(cm_id_priv);
2616 return -EINVAL;
2619 static int cm_drep_handler(struct cm_work *work)
2621 struct cm_id_private *cm_id_priv;
2622 struct cm_drep_msg *drep_msg;
2623 int ret;
2625 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2626 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
2627 drep_msg->local_comm_id);
2628 if (!cm_id_priv)
2629 return -EINVAL;
2631 work->cm_event.private_data = &drep_msg->private_data;
2633 spin_lock_irq(&cm_id_priv->lock);
2634 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2635 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2636 spin_unlock_irq(&cm_id_priv->lock);
2637 goto out;
2639 cm_enter_timewait(cm_id_priv);
2641 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2642 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2643 if (!ret)
2644 list_add_tail(&work->list, &cm_id_priv->work_list);
2645 spin_unlock_irq(&cm_id_priv->lock);
2647 if (ret)
2648 cm_process_work(cm_id_priv, work);
2649 else
2650 cm_deref_id(cm_id_priv);
2651 return 0;
2652 out:
2653 cm_deref_id(cm_id_priv);
2654 return -EINVAL;
2657 int ib_send_cm_rej(struct ib_cm_id *cm_id,
2658 enum ib_cm_rej_reason reason,
2659 void *ari,
2660 u8 ari_length,
2661 const void *private_data,
2662 u8 private_data_len)
2664 struct cm_id_private *cm_id_priv;
2665 struct ib_mad_send_buf *msg;
2666 unsigned long flags;
2667 int ret;
2669 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2670 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2671 return -EINVAL;
2673 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2675 spin_lock_irqsave(&cm_id_priv->lock, flags);
2676 switch (cm_id->state) {
2677 case IB_CM_REQ_SENT:
2678 case IB_CM_MRA_REQ_RCVD:
2679 case IB_CM_REQ_RCVD:
2680 case IB_CM_MRA_REQ_SENT:
2681 case IB_CM_REP_RCVD:
2682 case IB_CM_MRA_REP_SENT:
2683 ret = cm_alloc_msg(cm_id_priv, &msg);
2684 if (!ret)
2685 cm_format_rej((struct cm_rej_msg *) msg->mad,
2686 cm_id_priv, reason, ari, ari_length,
2687 private_data, private_data_len);
2689 cm_reset_to_idle(cm_id_priv);
2690 break;
2691 case IB_CM_REP_SENT:
2692 case IB_CM_MRA_REP_RCVD:
2693 ret = cm_alloc_msg(cm_id_priv, &msg);
2694 if (!ret)
2695 cm_format_rej((struct cm_rej_msg *) msg->mad,
2696 cm_id_priv, reason, ari, ari_length,
2697 private_data, private_data_len);
2699 cm_enter_timewait(cm_id_priv);
2700 break;
2701 default:
2702 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
2703 be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
2704 ret = -EINVAL;
2705 goto out;
2708 if (ret)
2709 goto out;
2711 ret = ib_post_send_mad(msg, NULL);
2712 if (ret)
2713 cm_free_msg(msg);
2715 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2716 return ret;
2718 EXPORT_SYMBOL(ib_send_cm_rej);
2720 static void cm_format_rej_event(struct cm_work *work)
2722 struct cm_rej_msg *rej_msg;
2723 struct ib_cm_rej_event_param *param;
2725 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2726 param = &work->cm_event.param.rej_rcvd;
2727 param->ari = rej_msg->ari;
2728 param->ari_length = cm_rej_get_reject_info_len(rej_msg);
2729 param->reason = __be16_to_cpu(rej_msg->reason);
2730 work->cm_event.private_data = &rej_msg->private_data;
2733 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2735 struct cm_timewait_info *timewait_info;
2736 struct cm_id_private *cm_id_priv;
2737 __be32 remote_id;
2739 remote_id = rej_msg->local_comm_id;
2741 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
2742 spin_lock_irq(&cm.lock);
2743 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
2744 remote_id);
2745 if (!timewait_info) {
2746 spin_unlock_irq(&cm.lock);
2747 return NULL;
2749 cm_id_priv = idr_find(&cm.local_id_table, (__force int)
2750 (timewait_info->work.local_id ^
2751 cm.random_id_operand));
2752 if (cm_id_priv) {
2753 if (cm_id_priv->id.remote_id == remote_id)
2754 atomic_inc(&cm_id_priv->refcount);
2755 else
2756 cm_id_priv = NULL;
2758 spin_unlock_irq(&cm.lock);
2759 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2760 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2761 else
2762 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2764 return cm_id_priv;
2767 static int cm_rej_handler(struct cm_work *work)
2769 struct cm_id_private *cm_id_priv;
2770 struct cm_rej_msg *rej_msg;
2771 int ret;
2773 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2774 cm_id_priv = cm_acquire_rejected_id(rej_msg);
2775 if (!cm_id_priv)
2776 return -EINVAL;
2778 cm_format_rej_event(work);
2780 spin_lock_irq(&cm_id_priv->lock);
2781 switch (cm_id_priv->id.state) {
2782 case IB_CM_REQ_SENT:
2783 case IB_CM_MRA_REQ_RCVD:
2784 case IB_CM_REP_SENT:
2785 case IB_CM_MRA_REP_RCVD:
2786 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2787 /* fall through */
2788 case IB_CM_REQ_RCVD:
2789 case IB_CM_MRA_REQ_SENT:
2790 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2791 cm_enter_timewait(cm_id_priv);
2792 else
2793 cm_reset_to_idle(cm_id_priv);
2794 break;
2795 case IB_CM_DREQ_SENT:
2796 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2797 /* fall through */
2798 case IB_CM_REP_RCVD:
2799 case IB_CM_MRA_REP_SENT:
2800 cm_enter_timewait(cm_id_priv);
2801 break;
2802 case IB_CM_ESTABLISHED:
2803 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
2804 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
2805 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
2806 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2807 cm_id_priv->msg);
2808 cm_enter_timewait(cm_id_priv);
2809 break;
2811 /* fall through */
2812 default:
2813 spin_unlock_irq(&cm_id_priv->lock);
2814 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
2815 __func__, be32_to_cpu(cm_id_priv->id.local_id),
2816 cm_id_priv->id.state);
2817 ret = -EINVAL;
2818 goto out;
2821 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2822 if (!ret)
2823 list_add_tail(&work->list, &cm_id_priv->work_list);
2824 spin_unlock_irq(&cm_id_priv->lock);
2826 if (ret)
2827 cm_process_work(cm_id_priv, work);
2828 else
2829 cm_deref_id(cm_id_priv);
2830 return 0;
2831 out:
2832 cm_deref_id(cm_id_priv);
2833 return -EINVAL;
2836 int ib_send_cm_mra(struct ib_cm_id *cm_id,
2837 u8 service_timeout,
2838 const void *private_data,
2839 u8 private_data_len)
2841 struct cm_id_private *cm_id_priv;
2842 struct ib_mad_send_buf *msg;
2843 enum ib_cm_state cm_state;
2844 enum ib_cm_lap_state lap_state;
2845 enum cm_msg_response msg_response;
2846 void *data;
2847 unsigned long flags;
2848 int ret;
2850 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2851 return -EINVAL;
2853 data = cm_copy_private_data(private_data, private_data_len);
2854 if (IS_ERR(data))
2855 return PTR_ERR(data);
2857 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2859 spin_lock_irqsave(&cm_id_priv->lock, flags);
2860 switch(cm_id_priv->id.state) {
2861 case IB_CM_REQ_RCVD:
2862 cm_state = IB_CM_MRA_REQ_SENT;
2863 lap_state = cm_id->lap_state;
2864 msg_response = CM_MSG_RESPONSE_REQ;
2865 break;
2866 case IB_CM_REP_RCVD:
2867 cm_state = IB_CM_MRA_REP_SENT;
2868 lap_state = cm_id->lap_state;
2869 msg_response = CM_MSG_RESPONSE_REP;
2870 break;
2871 case IB_CM_ESTABLISHED:
2872 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
2873 cm_state = cm_id->state;
2874 lap_state = IB_CM_MRA_LAP_SENT;
2875 msg_response = CM_MSG_RESPONSE_OTHER;
2876 break;
2878 /* fall through */
2879 default:
2880 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
2881 __func__, be32_to_cpu(cm_id_priv->id.local_id),
2882 cm_id_priv->id.state);
2883 ret = -EINVAL;
2884 goto error1;
2887 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
2888 ret = cm_alloc_msg(cm_id_priv, &msg);
2889 if (ret)
2890 goto error1;
2892 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2893 msg_response, service_timeout,
2894 private_data, private_data_len);
2895 ret = ib_post_send_mad(msg, NULL);
2896 if (ret)
2897 goto error2;
2900 cm_id->state = cm_state;
2901 cm_id->lap_state = lap_state;
2902 cm_id_priv->service_timeout = service_timeout;
2903 cm_set_private_data(cm_id_priv, data, private_data_len);
2904 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2905 return 0;
2907 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2908 kfree(data);
2909 return ret;
2911 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2912 kfree(data);
2913 cm_free_msg(msg);
2914 return ret;
2916 EXPORT_SYMBOL(ib_send_cm_mra);
2918 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2920 switch (cm_mra_get_msg_mraed(mra_msg)) {
2921 case CM_MSG_RESPONSE_REQ:
2922 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2923 case CM_MSG_RESPONSE_REP:
2924 case CM_MSG_RESPONSE_OTHER:
2925 return cm_acquire_id(mra_msg->remote_comm_id,
2926 mra_msg->local_comm_id);
2927 default:
2928 return NULL;
2932 static int cm_mra_handler(struct cm_work *work)
2934 struct cm_id_private *cm_id_priv;
2935 struct cm_mra_msg *mra_msg;
2936 int timeout, ret;
2938 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2939 cm_id_priv = cm_acquire_mraed_id(mra_msg);
2940 if (!cm_id_priv)
2941 return -EINVAL;
2943 work->cm_event.private_data = &mra_msg->private_data;
2944 work->cm_event.param.mra_rcvd.service_timeout =
2945 cm_mra_get_service_timeout(mra_msg);
2946 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2947 cm_convert_to_ms(cm_id_priv->av.timeout);
2949 spin_lock_irq(&cm_id_priv->lock);
2950 switch (cm_id_priv->id.state) {
2951 case IB_CM_REQ_SENT:
2952 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2953 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2954 cm_id_priv->msg, timeout))
2955 goto out;
2956 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2957 break;
2958 case IB_CM_REP_SENT:
2959 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2960 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2961 cm_id_priv->msg, timeout))
2962 goto out;
2963 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2964 break;
2965 case IB_CM_ESTABLISHED:
2966 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2967 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2968 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2969 cm_id_priv->msg, timeout)) {
2970 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2971 atomic_long_inc(&work->port->
2972 counter_group[CM_RECV_DUPLICATES].
2973 counter[CM_MRA_COUNTER]);
2974 goto out;
2976 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2977 break;
2978 case IB_CM_MRA_REQ_RCVD:
2979 case IB_CM_MRA_REP_RCVD:
2980 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2981 counter[CM_MRA_COUNTER]);
2982 /* fall through */
2983 default:
2984 pr_debug("%s local_id %d, cm_id_priv->id.state: %d\n",
2985 __func__, be32_to_cpu(cm_id_priv->id.local_id),
2986 cm_id_priv->id.state);
2987 goto out;
2990 cm_id_priv->msg->context[1] = (void *) (unsigned long)
2991 cm_id_priv->id.state;
2992 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2993 if (!ret)
2994 list_add_tail(&work->list, &cm_id_priv->work_list);
2995 spin_unlock_irq(&cm_id_priv->lock);
2997 if (ret)
2998 cm_process_work(cm_id_priv, work);
2999 else
3000 cm_deref_id(cm_id_priv);
3001 return 0;
3002 out:
3003 spin_unlock_irq(&cm_id_priv->lock);
3004 cm_deref_id(cm_id_priv);
3005 return -EINVAL;
3008 static void cm_format_lap(struct cm_lap_msg *lap_msg,
3009 struct cm_id_private *cm_id_priv,
3010 struct sa_path_rec *alternate_path,
3011 const void *private_data,
3012 u8 private_data_len)
3014 bool alt_ext = false;
3016 if (alternate_path->rec_type == SA_PATH_REC_TYPE_OPA)
3017 alt_ext = opa_is_extended_lid(alternate_path->opa.dlid,
3018 alternate_path->opa.slid);
3019 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
3020 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
3021 lap_msg->local_comm_id = cm_id_priv->id.local_id;
3022 lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
3023 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
3024 /* todo: need remote CM response timeout */
3025 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
3026 lap_msg->alt_local_lid =
3027 htons(ntohl(sa_path_get_slid(alternate_path)));
3028 lap_msg->alt_remote_lid =
3029 htons(ntohl(sa_path_get_dlid(alternate_path)));
3030 lap_msg->alt_local_gid = alternate_path->sgid;
3031 lap_msg->alt_remote_gid = alternate_path->dgid;
3032 if (alt_ext) {
3033 lap_msg->alt_local_gid.global.interface_id
3034 = OPA_MAKE_ID(be32_to_cpu(alternate_path->opa.slid));
3035 lap_msg->alt_remote_gid.global.interface_id
3036 = OPA_MAKE_ID(be32_to_cpu(alternate_path->opa.dlid));
3038 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
3039 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
3040 lap_msg->alt_hop_limit = alternate_path->hop_limit;
3041 cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
3042 cm_lap_set_sl(lap_msg, alternate_path->sl);
3043 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
3044 cm_lap_set_local_ack_timeout(lap_msg,
3045 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
3046 alternate_path->packet_life_time));
3048 if (private_data && private_data_len)
3049 memcpy(lap_msg->private_data, private_data, private_data_len);
3052 int ib_send_cm_lap(struct ib_cm_id *cm_id,
3053 struct sa_path_rec *alternate_path,
3054 const void *private_data,
3055 u8 private_data_len)
3057 struct cm_id_private *cm_id_priv;
3058 struct ib_mad_send_buf *msg;
3059 unsigned long flags;
3060 int ret;
3062 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
3063 return -EINVAL;
3065 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3066 spin_lock_irqsave(&cm_id_priv->lock, flags);
3067 if (cm_id->state != IB_CM_ESTABLISHED ||
3068 (cm_id->lap_state != IB_CM_LAP_UNINIT &&
3069 cm_id->lap_state != IB_CM_LAP_IDLE)) {
3070 ret = -EINVAL;
3071 goto out;
3074 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
3075 cm_id_priv);
3076 if (ret)
3077 goto out;
3078 cm_id_priv->alt_av.timeout =
3079 cm_ack_timeout(cm_id_priv->target_ack_delay,
3080 cm_id_priv->alt_av.timeout - 1);
3082 ret = cm_alloc_msg(cm_id_priv, &msg);
3083 if (ret)
3084 goto out;
3086 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
3087 alternate_path, private_data, private_data_len);
3088 msg->timeout_ms = cm_id_priv->timeout_ms;
3089 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
3091 ret = ib_post_send_mad(msg, NULL);
3092 if (ret) {
3093 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3094 cm_free_msg(msg);
3095 return ret;
3098 cm_id->lap_state = IB_CM_LAP_SENT;
3099 cm_id_priv->msg = msg;
3101 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3102 return ret;
3104 EXPORT_SYMBOL(ib_send_cm_lap);
3106 static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
3107 struct sa_path_rec *path)
3109 u32 lid;
3111 if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
3112 sa_path_set_dlid(path, ntohs(lap_msg->alt_local_lid));
3113 sa_path_set_slid(path, ntohs(lap_msg->alt_remote_lid));
3114 } else {
3115 lid = opa_get_lid_from_gid(&lap_msg->alt_local_gid);
3116 sa_path_set_dlid(path, lid);
3118 lid = opa_get_lid_from_gid(&lap_msg->alt_remote_gid);
3119 sa_path_set_slid(path, lid);
3123 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
3124 struct sa_path_rec *path,
3125 struct cm_lap_msg *lap_msg)
3127 path->dgid = lap_msg->alt_local_gid;
3128 path->sgid = lap_msg->alt_remote_gid;
3129 path->flow_label = cm_lap_get_flow_label(lap_msg);
3130 path->hop_limit = lap_msg->alt_hop_limit;
3131 path->traffic_class = cm_lap_get_traffic_class(lap_msg);
3132 path->reversible = 1;
3133 path->pkey = cm_id_priv->pkey;
3134 path->sl = cm_lap_get_sl(lap_msg);
3135 path->mtu_selector = IB_SA_EQ;
3136 path->mtu = cm_id_priv->path_mtu;
3137 path->rate_selector = IB_SA_EQ;
3138 path->rate = cm_lap_get_packet_rate(lap_msg);
3139 path->packet_life_time_selector = IB_SA_EQ;
3140 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
3141 path->packet_life_time -= (path->packet_life_time > 0);
3142 cm_format_path_lid_from_lap(lap_msg, path);
3145 static int cm_lap_handler(struct cm_work *work)
3147 struct cm_id_private *cm_id_priv;
3148 struct cm_lap_msg *lap_msg;
3149 struct ib_cm_lap_event_param *param;
3150 struct ib_mad_send_buf *msg = NULL;
3151 int ret;
3153 /* todo: verify LAP request and send reject APR if invalid. */
3154 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
3155 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
3156 lap_msg->local_comm_id);
3157 if (!cm_id_priv)
3158 return -EINVAL;
3160 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3161 work->mad_recv_wc->recv_buf.grh,
3162 &cm_id_priv->av);
3163 if (ret)
3164 goto deref;
3166 param = &work->cm_event.param.lap_rcvd;
3167 memset(&work->path[0], 0, sizeof(work->path[1]));
3168 cm_path_set_rec_type(work->port->cm_dev->ib_device,
3169 work->port->port_num,
3170 &work->path[0],
3171 &lap_msg->alt_local_gid);
3172 param->alternate_path = &work->path[0];
3173 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
3174 work->cm_event.private_data = &lap_msg->private_data;
3176 spin_lock_irq(&cm_id_priv->lock);
3177 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
3178 goto unlock;
3180 switch (cm_id_priv->id.lap_state) {
3181 case IB_CM_LAP_UNINIT:
3182 case IB_CM_LAP_IDLE:
3183 break;
3184 case IB_CM_MRA_LAP_SENT:
3185 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3186 counter[CM_LAP_COUNTER]);
3187 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
3188 if (IS_ERR(msg))
3189 goto unlock;
3191 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3192 CM_MSG_RESPONSE_OTHER,
3193 cm_id_priv->service_timeout,
3194 cm_id_priv->private_data,
3195 cm_id_priv->private_data_len);
3196 spin_unlock_irq(&cm_id_priv->lock);
3198 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
3199 ib_post_send_mad(msg, NULL))
3200 cm_free_msg(msg);
3201 goto deref;
3202 case IB_CM_LAP_RCVD:
3203 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3204 counter[CM_LAP_COUNTER]);
3205 goto unlock;
3206 default:
3207 goto unlock;
3210 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
3211 cm_id_priv->tid = lap_msg->hdr.tid;
3212 cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
3213 cm_id_priv);
3214 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3215 if (!ret)
3216 list_add_tail(&work->list, &cm_id_priv->work_list);
3217 spin_unlock_irq(&cm_id_priv->lock);
3219 if (ret)
3220 cm_process_work(cm_id_priv, work);
3221 else
3222 cm_deref_id(cm_id_priv);
3223 return 0;
3225 unlock: spin_unlock_irq(&cm_id_priv->lock);
3226 deref: cm_deref_id(cm_id_priv);
3227 return -EINVAL;
3230 static void cm_format_apr(struct cm_apr_msg *apr_msg,
3231 struct cm_id_private *cm_id_priv,
3232 enum ib_cm_apr_status status,
3233 void *info,
3234 u8 info_length,
3235 const void *private_data,
3236 u8 private_data_len)
3238 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
3239 apr_msg->local_comm_id = cm_id_priv->id.local_id;
3240 apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
3241 apr_msg->ap_status = (u8) status;
3243 if (info && info_length) {
3244 apr_msg->info_length = info_length;
3245 memcpy(apr_msg->info, info, info_length);
3248 if (private_data && private_data_len)
3249 memcpy(apr_msg->private_data, private_data, private_data_len);
3252 int ib_send_cm_apr(struct ib_cm_id *cm_id,
3253 enum ib_cm_apr_status status,
3254 void *info,
3255 u8 info_length,
3256 const void *private_data,
3257 u8 private_data_len)
3259 struct cm_id_private *cm_id_priv;
3260 struct ib_mad_send_buf *msg;
3261 unsigned long flags;
3262 int ret;
3264 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
3265 (info && info_length > IB_CM_APR_INFO_LENGTH))
3266 return -EINVAL;
3268 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3269 spin_lock_irqsave(&cm_id_priv->lock, flags);
3270 if (cm_id->state != IB_CM_ESTABLISHED ||
3271 (cm_id->lap_state != IB_CM_LAP_RCVD &&
3272 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
3273 ret = -EINVAL;
3274 goto out;
3277 ret = cm_alloc_msg(cm_id_priv, &msg);
3278 if (ret)
3279 goto out;
3281 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
3282 info, info_length, private_data, private_data_len);
3283 ret = ib_post_send_mad(msg, NULL);
3284 if (ret) {
3285 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3286 cm_free_msg(msg);
3287 return ret;
3290 cm_id->lap_state = IB_CM_LAP_IDLE;
3291 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3292 return ret;
3294 EXPORT_SYMBOL(ib_send_cm_apr);
3296 static int cm_apr_handler(struct cm_work *work)
3298 struct cm_id_private *cm_id_priv;
3299 struct cm_apr_msg *apr_msg;
3300 int ret;
3302 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
3303 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
3304 apr_msg->local_comm_id);
3305 if (!cm_id_priv)
3306 return -EINVAL; /* Unmatched reply. */
3308 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
3309 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
3310 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
3311 work->cm_event.private_data = &apr_msg->private_data;
3313 spin_lock_irq(&cm_id_priv->lock);
3314 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
3315 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
3316 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
3317 spin_unlock_irq(&cm_id_priv->lock);
3318 goto out;
3320 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
3321 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3322 cm_id_priv->msg = NULL;
3324 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3325 if (!ret)
3326 list_add_tail(&work->list, &cm_id_priv->work_list);
3327 spin_unlock_irq(&cm_id_priv->lock);
3329 if (ret)
3330 cm_process_work(cm_id_priv, work);
3331 else
3332 cm_deref_id(cm_id_priv);
3333 return 0;
3334 out:
3335 cm_deref_id(cm_id_priv);
3336 return -EINVAL;
3339 static int cm_timewait_handler(struct cm_work *work)
3341 struct cm_timewait_info *timewait_info;
3342 struct cm_id_private *cm_id_priv;
3343 int ret;
3345 timewait_info = (struct cm_timewait_info *)work;
3346 spin_lock_irq(&cm.lock);
3347 list_del(&timewait_info->list);
3348 spin_unlock_irq(&cm.lock);
3350 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
3351 timewait_info->work.remote_id);
3352 if (!cm_id_priv)
3353 return -EINVAL;
3355 spin_lock_irq(&cm_id_priv->lock);
3356 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
3357 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
3358 spin_unlock_irq(&cm_id_priv->lock);
3359 goto out;
3361 cm_id_priv->id.state = IB_CM_IDLE;
3362 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3363 if (!ret)
3364 list_add_tail(&work->list, &cm_id_priv->work_list);
3365 spin_unlock_irq(&cm_id_priv->lock);
3367 if (ret)
3368 cm_process_work(cm_id_priv, work);
3369 else
3370 cm_deref_id(cm_id_priv);
3371 return 0;
3372 out:
3373 cm_deref_id(cm_id_priv);
3374 return -EINVAL;
3377 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
3378 struct cm_id_private *cm_id_priv,
3379 struct ib_cm_sidr_req_param *param)
3381 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
3382 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
3383 sidr_req_msg->request_id = cm_id_priv->id.local_id;
3384 sidr_req_msg->pkey = param->path->pkey;
3385 sidr_req_msg->service_id = param->service_id;
3387 if (param->private_data && param->private_data_len)
3388 memcpy(sidr_req_msg->private_data, param->private_data,
3389 param->private_data_len);
3392 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3393 struct ib_cm_sidr_req_param *param)
3395 struct cm_id_private *cm_id_priv;
3396 struct ib_mad_send_buf *msg;
3397 unsigned long flags;
3398 int ret;
3400 if (!param->path || (param->private_data &&
3401 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3402 return -EINVAL;
3404 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3405 ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
3406 if (ret)
3407 goto out;
3409 cm_id->service_id = param->service_id;
3410 cm_id->service_mask = ~cpu_to_be64(0);
3411 cm_id_priv->timeout_ms = param->timeout_ms;
3412 cm_id_priv->max_cm_retries = param->max_cm_retries;
3413 ret = cm_alloc_msg(cm_id_priv, &msg);
3414 if (ret)
3415 goto out;
3417 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
3418 param);
3419 msg->timeout_ms = cm_id_priv->timeout_ms;
3420 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
3422 spin_lock_irqsave(&cm_id_priv->lock, flags);
3423 if (cm_id->state == IB_CM_IDLE)
3424 ret = ib_post_send_mad(msg, NULL);
3425 else
3426 ret = -EINVAL;
3428 if (ret) {
3429 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3430 cm_free_msg(msg);
3431 goto out;
3433 cm_id->state = IB_CM_SIDR_REQ_SENT;
3434 cm_id_priv->msg = msg;
3435 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3436 out:
3437 return ret;
3439 EXPORT_SYMBOL(ib_send_cm_sidr_req);
3441 static void cm_format_sidr_req_event(struct cm_work *work,
3442 struct ib_cm_id *listen_id)
3444 struct cm_sidr_req_msg *sidr_req_msg;
3445 struct ib_cm_sidr_req_event_param *param;
3447 sidr_req_msg = (struct cm_sidr_req_msg *)
3448 work->mad_recv_wc->recv_buf.mad;
3449 param = &work->cm_event.param.sidr_req_rcvd;
3450 param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
3451 param->listen_id = listen_id;
3452 param->service_id = sidr_req_msg->service_id;
3453 param->bth_pkey = cm_get_bth_pkey(work);
3454 param->port = work->port->port_num;
3455 work->cm_event.private_data = &sidr_req_msg->private_data;
3458 static int cm_sidr_req_handler(struct cm_work *work)
3460 struct ib_cm_id *cm_id;
3461 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
3462 struct cm_sidr_req_msg *sidr_req_msg;
3463 struct ib_wc *wc;
3464 int ret;
3466 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
3467 if (IS_ERR(cm_id))
3468 return PTR_ERR(cm_id);
3469 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3471 /* Record SGID/SLID and request ID for lookup. */
3472 sidr_req_msg = (struct cm_sidr_req_msg *)
3473 work->mad_recv_wc->recv_buf.mad;
3474 wc = work->mad_recv_wc->wc;
3475 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
3476 cm_id_priv->av.dgid.global.interface_id = 0;
3477 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3478 work->mad_recv_wc->recv_buf.grh,
3479 &cm_id_priv->av);
3480 if (ret)
3481 goto out;
3483 cm_id_priv->id.remote_id = sidr_req_msg->request_id;
3484 cm_id_priv->tid = sidr_req_msg->hdr.tid;
3485 atomic_inc(&cm_id_priv->work_count);
3487 spin_lock_irq(&cm.lock);
3488 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3489 if (cur_cm_id_priv) {
3490 spin_unlock_irq(&cm.lock);
3491 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3492 counter[CM_SIDR_REQ_COUNTER]);
3493 goto out; /* Duplicate message. */
3495 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3496 cur_cm_id_priv = cm_find_listen(cm_id->device,
3497 sidr_req_msg->service_id);
3498 if (!cur_cm_id_priv) {
3499 spin_unlock_irq(&cm.lock);
3500 cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
3501 goto out; /* No match. */
3503 atomic_inc(&cur_cm_id_priv->refcount);
3504 atomic_inc(&cm_id_priv->refcount);
3505 spin_unlock_irq(&cm.lock);
3507 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
3508 cm_id_priv->id.context = cur_cm_id_priv->id.context;
3509 cm_id_priv->id.service_id = sidr_req_msg->service_id;
3510 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3512 cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
3513 cm_process_work(cm_id_priv, work);
3514 cm_deref_id(cur_cm_id_priv);
3515 return 0;
3516 out:
3517 ib_destroy_cm_id(&cm_id_priv->id);
3518 return -EINVAL;
3521 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3522 struct cm_id_private *cm_id_priv,
3523 struct ib_cm_sidr_rep_param *param)
3525 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3526 cm_id_priv->tid);
3527 sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
3528 sidr_rep_msg->status = param->status;
3529 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
3530 sidr_rep_msg->service_id = cm_id_priv->id.service_id;
3531 sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
3533 if (param->info && param->info_length)
3534 memcpy(sidr_rep_msg->info, param->info, param->info_length);
3536 if (param->private_data && param->private_data_len)
3537 memcpy(sidr_rep_msg->private_data, param->private_data,
3538 param->private_data_len);
3541 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3542 struct ib_cm_sidr_rep_param *param)
3544 struct cm_id_private *cm_id_priv;
3545 struct ib_mad_send_buf *msg;
3546 unsigned long flags;
3547 int ret;
3549 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3550 (param->private_data &&
3551 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3552 return -EINVAL;
3554 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3555 spin_lock_irqsave(&cm_id_priv->lock, flags);
3556 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
3557 ret = -EINVAL;
3558 goto error;
3561 ret = cm_alloc_msg(cm_id_priv, &msg);
3562 if (ret)
3563 goto error;
3565 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3566 param);
3567 ret = ib_post_send_mad(msg, NULL);
3568 if (ret) {
3569 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3570 cm_free_msg(msg);
3571 return ret;
3573 cm_id->state = IB_CM_IDLE;
3574 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3576 spin_lock_irqsave(&cm.lock, flags);
3577 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3578 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3579 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3581 spin_unlock_irqrestore(&cm.lock, flags);
3582 return 0;
3584 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3585 return ret;
3587 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3589 static void cm_format_sidr_rep_event(struct cm_work *work)
3591 struct cm_sidr_rep_msg *sidr_rep_msg;
3592 struct ib_cm_sidr_rep_event_param *param;
3594 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3595 work->mad_recv_wc->recv_buf.mad;
3596 param = &work->cm_event.param.sidr_rep_rcvd;
3597 param->status = sidr_rep_msg->status;
3598 param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
3599 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
3600 param->info = &sidr_rep_msg->info;
3601 param->info_len = sidr_rep_msg->info_length;
3602 work->cm_event.private_data = &sidr_rep_msg->private_data;
3605 static int cm_sidr_rep_handler(struct cm_work *work)
3607 struct cm_sidr_rep_msg *sidr_rep_msg;
3608 struct cm_id_private *cm_id_priv;
3610 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3611 work->mad_recv_wc->recv_buf.mad;
3612 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
3613 if (!cm_id_priv)
3614 return -EINVAL; /* Unmatched reply. */
3616 spin_lock_irq(&cm_id_priv->lock);
3617 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3618 spin_unlock_irq(&cm_id_priv->lock);
3619 goto out;
3621 cm_id_priv->id.state = IB_CM_IDLE;
3622 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3623 spin_unlock_irq(&cm_id_priv->lock);
3625 cm_format_sidr_rep_event(work);
3626 cm_process_work(cm_id_priv, work);
3627 return 0;
3628 out:
3629 cm_deref_id(cm_id_priv);
3630 return -EINVAL;
3633 static void cm_process_send_error(struct ib_mad_send_buf *msg,
3634 enum ib_wc_status wc_status)
3636 struct cm_id_private *cm_id_priv;
3637 struct ib_cm_event cm_event;
3638 enum ib_cm_state state;
3639 int ret;
3641 memset(&cm_event, 0, sizeof cm_event);
3642 cm_id_priv = msg->context[0];
3644 /* Discard old sends or ones without a response. */
3645 spin_lock_irq(&cm_id_priv->lock);
3646 state = (enum ib_cm_state) (unsigned long) msg->context[1];
3647 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
3648 goto discard;
3650 pr_debug_ratelimited("CM: failed sending MAD in state %d. (%s)\n",
3651 state, ib_wc_status_msg(wc_status));
3652 switch (state) {
3653 case IB_CM_REQ_SENT:
3654 case IB_CM_MRA_REQ_RCVD:
3655 cm_reset_to_idle(cm_id_priv);
3656 cm_event.event = IB_CM_REQ_ERROR;
3657 break;
3658 case IB_CM_REP_SENT:
3659 case IB_CM_MRA_REP_RCVD:
3660 cm_reset_to_idle(cm_id_priv);
3661 cm_event.event = IB_CM_REP_ERROR;
3662 break;
3663 case IB_CM_DREQ_SENT:
3664 cm_enter_timewait(cm_id_priv);
3665 cm_event.event = IB_CM_DREQ_ERROR;
3666 break;
3667 case IB_CM_SIDR_REQ_SENT:
3668 cm_id_priv->id.state = IB_CM_IDLE;
3669 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3670 break;
3671 default:
3672 goto discard;
3674 spin_unlock_irq(&cm_id_priv->lock);
3675 cm_event.param.send_status = wc_status;
3677 /* No other events can occur on the cm_id at this point. */
3678 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3679 cm_free_msg(msg);
3680 if (ret)
3681 ib_destroy_cm_id(&cm_id_priv->id);
3682 return;
3683 discard:
3684 spin_unlock_irq(&cm_id_priv->lock);
3685 cm_free_msg(msg);
3688 static void cm_send_handler(struct ib_mad_agent *mad_agent,
3689 struct ib_mad_send_wc *mad_send_wc)
3691 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3692 struct cm_port *port;
3693 u16 attr_index;
3695 port = mad_agent->context;
3696 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3697 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3700 * If the send was in response to a received message (context[0] is not
3701 * set to a cm_id), and is not a REJ, then it is a send that was
3702 * manually retried.
3704 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3705 msg->retries = 1;
3707 atomic_long_add(1 + msg->retries,
3708 &port->counter_group[CM_XMIT].counter[attr_index]);
3709 if (msg->retries)
3710 atomic_long_add(msg->retries,
3711 &port->counter_group[CM_XMIT_RETRIES].
3712 counter[attr_index]);
3714 switch (mad_send_wc->status) {
3715 case IB_WC_SUCCESS:
3716 case IB_WC_WR_FLUSH_ERR:
3717 cm_free_msg(msg);
3718 break;
3719 default:
3720 if (msg->context[0] && msg->context[1])
3721 cm_process_send_error(msg, mad_send_wc->status);
3722 else
3723 cm_free_msg(msg);
3724 break;
3728 static void cm_work_handler(struct work_struct *_work)
3730 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3731 int ret;
3733 switch (work->cm_event.event) {
3734 case IB_CM_REQ_RECEIVED:
3735 ret = cm_req_handler(work);
3736 break;
3737 case IB_CM_MRA_RECEIVED:
3738 ret = cm_mra_handler(work);
3739 break;
3740 case IB_CM_REJ_RECEIVED:
3741 ret = cm_rej_handler(work);
3742 break;
3743 case IB_CM_REP_RECEIVED:
3744 ret = cm_rep_handler(work);
3745 break;
3746 case IB_CM_RTU_RECEIVED:
3747 ret = cm_rtu_handler(work);
3748 break;
3749 case IB_CM_USER_ESTABLISHED:
3750 ret = cm_establish_handler(work);
3751 break;
3752 case IB_CM_DREQ_RECEIVED:
3753 ret = cm_dreq_handler(work);
3754 break;
3755 case IB_CM_DREP_RECEIVED:
3756 ret = cm_drep_handler(work);
3757 break;
3758 case IB_CM_SIDR_REQ_RECEIVED:
3759 ret = cm_sidr_req_handler(work);
3760 break;
3761 case IB_CM_SIDR_REP_RECEIVED:
3762 ret = cm_sidr_rep_handler(work);
3763 break;
3764 case IB_CM_LAP_RECEIVED:
3765 ret = cm_lap_handler(work);
3766 break;
3767 case IB_CM_APR_RECEIVED:
3768 ret = cm_apr_handler(work);
3769 break;
3770 case IB_CM_TIMEWAIT_EXIT:
3771 ret = cm_timewait_handler(work);
3772 break;
3773 default:
3774 pr_debug("cm_event.event: 0x%x\n", work->cm_event.event);
3775 ret = -EINVAL;
3776 break;
3778 if (ret)
3779 cm_free_work(work);
3782 static int cm_establish(struct ib_cm_id *cm_id)
3784 struct cm_id_private *cm_id_priv;
3785 struct cm_work *work;
3786 unsigned long flags;
3787 int ret = 0;
3788 struct cm_device *cm_dev;
3790 cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3791 if (!cm_dev)
3792 return -ENODEV;
3794 work = kmalloc(sizeof *work, GFP_ATOMIC);
3795 if (!work)
3796 return -ENOMEM;
3798 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3799 spin_lock_irqsave(&cm_id_priv->lock, flags);
3800 switch (cm_id->state)
3802 case IB_CM_REP_SENT:
3803 case IB_CM_MRA_REP_RCVD:
3804 cm_id->state = IB_CM_ESTABLISHED;
3805 break;
3806 case IB_CM_ESTABLISHED:
3807 ret = -EISCONN;
3808 break;
3809 default:
3810 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
3811 be32_to_cpu(cm_id->local_id), cm_id->state);
3812 ret = -EINVAL;
3813 break;
3815 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3817 if (ret) {
3818 kfree(work);
3819 goto out;
3823 * The CM worker thread may try to destroy the cm_id before it
3824 * can execute this work item. To prevent potential deadlock,
3825 * we need to find the cm_id once we're in the context of the
3826 * worker thread, rather than holding a reference on it.
3828 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3829 work->local_id = cm_id->local_id;
3830 work->remote_id = cm_id->remote_id;
3831 work->mad_recv_wc = NULL;
3832 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3834 /* Check if the device started its remove_one */
3835 spin_lock_irqsave(&cm.lock, flags);
3836 if (!cm_dev->going_down) {
3837 queue_delayed_work(cm.wq, &work->work, 0);
3838 } else {
3839 kfree(work);
3840 ret = -ENODEV;
3842 spin_unlock_irqrestore(&cm.lock, flags);
3844 out:
3845 return ret;
3848 static int cm_migrate(struct ib_cm_id *cm_id)
3850 struct cm_id_private *cm_id_priv;
3851 struct cm_av tmp_av;
3852 unsigned long flags;
3853 int tmp_send_port_not_ready;
3854 int ret = 0;
3856 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3857 spin_lock_irqsave(&cm_id_priv->lock, flags);
3858 if (cm_id->state == IB_CM_ESTABLISHED &&
3859 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3860 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3861 cm_id->lap_state = IB_CM_LAP_IDLE;
3862 /* Swap address vector */
3863 tmp_av = cm_id_priv->av;
3864 cm_id_priv->av = cm_id_priv->alt_av;
3865 cm_id_priv->alt_av = tmp_av;
3866 /* Swap port send ready state */
3867 tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
3868 cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
3869 cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
3870 } else
3871 ret = -EINVAL;
3872 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3874 return ret;
3877 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3879 int ret;
3881 switch (event) {
3882 case IB_EVENT_COMM_EST:
3883 ret = cm_establish(cm_id);
3884 break;
3885 case IB_EVENT_PATH_MIG:
3886 ret = cm_migrate(cm_id);
3887 break;
3888 default:
3889 ret = -EINVAL;
3891 return ret;
3893 EXPORT_SYMBOL(ib_cm_notify);
3895 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3896 struct ib_mad_send_buf *send_buf,
3897 struct ib_mad_recv_wc *mad_recv_wc)
3899 struct cm_port *port = mad_agent->context;
3900 struct cm_work *work;
3901 enum ib_cm_event_type event;
3902 bool alt_path = false;
3903 u16 attr_id;
3904 int paths = 0;
3905 int going_down = 0;
3907 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
3908 case CM_REQ_ATTR_ID:
3909 alt_path = cm_req_has_alt_path((struct cm_req_msg *)
3910 mad_recv_wc->recv_buf.mad);
3911 paths = 1 + (alt_path != 0);
3912 event = IB_CM_REQ_RECEIVED;
3913 break;
3914 case CM_MRA_ATTR_ID:
3915 event = IB_CM_MRA_RECEIVED;
3916 break;
3917 case CM_REJ_ATTR_ID:
3918 event = IB_CM_REJ_RECEIVED;
3919 break;
3920 case CM_REP_ATTR_ID:
3921 event = IB_CM_REP_RECEIVED;
3922 break;
3923 case CM_RTU_ATTR_ID:
3924 event = IB_CM_RTU_RECEIVED;
3925 break;
3926 case CM_DREQ_ATTR_ID:
3927 event = IB_CM_DREQ_RECEIVED;
3928 break;
3929 case CM_DREP_ATTR_ID:
3930 event = IB_CM_DREP_RECEIVED;
3931 break;
3932 case CM_SIDR_REQ_ATTR_ID:
3933 event = IB_CM_SIDR_REQ_RECEIVED;
3934 break;
3935 case CM_SIDR_REP_ATTR_ID:
3936 event = IB_CM_SIDR_REP_RECEIVED;
3937 break;
3938 case CM_LAP_ATTR_ID:
3939 paths = 1;
3940 event = IB_CM_LAP_RECEIVED;
3941 break;
3942 case CM_APR_ATTR_ID:
3943 event = IB_CM_APR_RECEIVED;
3944 break;
3945 default:
3946 ib_free_recv_mad(mad_recv_wc);
3947 return;
3950 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
3951 atomic_long_inc(&port->counter_group[CM_RECV].
3952 counter[attr_id - CM_ATTR_ID_OFFSET]);
3954 work = kmalloc(sizeof(*work) + sizeof(struct sa_path_rec) * paths,
3955 GFP_KERNEL);
3956 if (!work) {
3957 ib_free_recv_mad(mad_recv_wc);
3958 return;
3961 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3962 work->cm_event.event = event;
3963 work->mad_recv_wc = mad_recv_wc;
3964 work->port = port;
3966 /* Check if the device started its remove_one */
3967 spin_lock_irq(&cm.lock);
3968 if (!port->cm_dev->going_down)
3969 queue_delayed_work(cm.wq, &work->work, 0);
3970 else
3971 going_down = 1;
3972 spin_unlock_irq(&cm.lock);
3974 if (going_down) {
3975 kfree(work);
3976 ib_free_recv_mad(mad_recv_wc);
3980 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3981 struct ib_qp_attr *qp_attr,
3982 int *qp_attr_mask)
3984 unsigned long flags;
3985 int ret;
3987 spin_lock_irqsave(&cm_id_priv->lock, flags);
3988 switch (cm_id_priv->id.state) {
3989 case IB_CM_REQ_SENT:
3990 case IB_CM_MRA_REQ_RCVD:
3991 case IB_CM_REQ_RCVD:
3992 case IB_CM_MRA_REQ_SENT:
3993 case IB_CM_REP_RCVD:
3994 case IB_CM_MRA_REP_SENT:
3995 case IB_CM_REP_SENT:
3996 case IB_CM_MRA_REP_RCVD:
3997 case IB_CM_ESTABLISHED:
3998 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3999 IB_QP_PKEY_INDEX | IB_QP_PORT;
4000 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
4001 if (cm_id_priv->responder_resources)
4002 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
4003 IB_ACCESS_REMOTE_ATOMIC;
4004 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
4005 qp_attr->port_num = cm_id_priv->av.port->port_num;
4006 ret = 0;
4007 break;
4008 default:
4009 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4010 __func__, be32_to_cpu(cm_id_priv->id.local_id),
4011 cm_id_priv->id.state);
4012 ret = -EINVAL;
4013 break;
4015 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4016 return ret;
4019 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
4020 struct ib_qp_attr *qp_attr,
4021 int *qp_attr_mask)
4023 unsigned long flags;
4024 int ret;
4026 spin_lock_irqsave(&cm_id_priv->lock, flags);
4027 switch (cm_id_priv->id.state) {
4028 case IB_CM_REQ_RCVD:
4029 case IB_CM_MRA_REQ_SENT:
4030 case IB_CM_REP_RCVD:
4031 case IB_CM_MRA_REP_SENT:
4032 case IB_CM_REP_SENT:
4033 case IB_CM_MRA_REP_RCVD:
4034 case IB_CM_ESTABLISHED:
4035 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
4036 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
4037 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
4038 qp_attr->path_mtu = cm_id_priv->path_mtu;
4039 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
4040 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
4041 if (cm_id_priv->qp_type == IB_QPT_RC ||
4042 cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
4043 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
4044 IB_QP_MIN_RNR_TIMER;
4045 qp_attr->max_dest_rd_atomic =
4046 cm_id_priv->responder_resources;
4047 qp_attr->min_rnr_timer = 0;
4049 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4050 *qp_attr_mask |= IB_QP_ALT_PATH;
4051 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4052 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4053 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4054 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4056 ret = 0;
4057 break;
4058 default:
4059 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4060 __func__, be32_to_cpu(cm_id_priv->id.local_id),
4061 cm_id_priv->id.state);
4062 ret = -EINVAL;
4063 break;
4065 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4066 return ret;
4069 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
4070 struct ib_qp_attr *qp_attr,
4071 int *qp_attr_mask)
4073 unsigned long flags;
4074 int ret;
4076 spin_lock_irqsave(&cm_id_priv->lock, flags);
4077 switch (cm_id_priv->id.state) {
4078 /* Allow transition to RTS before sending REP */
4079 case IB_CM_REQ_RCVD:
4080 case IB_CM_MRA_REQ_SENT:
4082 case IB_CM_REP_RCVD:
4083 case IB_CM_MRA_REP_SENT:
4084 case IB_CM_REP_SENT:
4085 case IB_CM_MRA_REP_RCVD:
4086 case IB_CM_ESTABLISHED:
4087 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
4088 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
4089 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
4090 switch (cm_id_priv->qp_type) {
4091 case IB_QPT_RC:
4092 case IB_QPT_XRC_INI:
4093 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
4094 IB_QP_MAX_QP_RD_ATOMIC;
4095 qp_attr->retry_cnt = cm_id_priv->retry_count;
4096 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
4097 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
4098 /* fall through */
4099 case IB_QPT_XRC_TGT:
4100 *qp_attr_mask |= IB_QP_TIMEOUT;
4101 qp_attr->timeout = cm_id_priv->av.timeout;
4102 break;
4103 default:
4104 break;
4106 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4107 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
4108 qp_attr->path_mig_state = IB_MIG_REARM;
4110 } else {
4111 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
4112 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4113 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4114 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4115 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4116 qp_attr->path_mig_state = IB_MIG_REARM;
4118 ret = 0;
4119 break;
4120 default:
4121 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4122 __func__, be32_to_cpu(cm_id_priv->id.local_id),
4123 cm_id_priv->id.state);
4124 ret = -EINVAL;
4125 break;
4127 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4128 return ret;
4131 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
4132 struct ib_qp_attr *qp_attr,
4133 int *qp_attr_mask)
4135 struct cm_id_private *cm_id_priv;
4136 int ret;
4138 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
4139 switch (qp_attr->qp_state) {
4140 case IB_QPS_INIT:
4141 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
4142 break;
4143 case IB_QPS_RTR:
4144 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
4145 break;
4146 case IB_QPS_RTS:
4147 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
4148 break;
4149 default:
4150 ret = -EINVAL;
4151 break;
4153 return ret;
4155 EXPORT_SYMBOL(ib_cm_init_qp_attr);
4157 static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
4158 char *buf)
4160 struct cm_counter_group *group;
4161 struct cm_counter_attribute *cm_attr;
4163 group = container_of(obj, struct cm_counter_group, obj);
4164 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
4166 return sprintf(buf, "%ld\n",
4167 atomic_long_read(&group->counter[cm_attr->index]));
4170 static const struct sysfs_ops cm_counter_ops = {
4171 .show = cm_show_counter
4174 static struct kobj_type cm_counter_obj_type = {
4175 .sysfs_ops = &cm_counter_ops,
4176 .default_attrs = cm_counter_default_attrs
4179 static void cm_release_port_obj(struct kobject *obj)
4181 struct cm_port *cm_port;
4183 cm_port = container_of(obj, struct cm_port, port_obj);
4184 kfree(cm_port);
4187 static struct kobj_type cm_port_obj_type = {
4188 .release = cm_release_port_obj
4191 static char *cm_devnode(struct device *dev, umode_t *mode)
4193 if (mode)
4194 *mode = 0666;
4195 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
4198 struct class cm_class = {
4199 .owner = THIS_MODULE,
4200 .name = "infiniband_cm",
4201 .devnode = cm_devnode,
4203 EXPORT_SYMBOL(cm_class);
4205 static int cm_create_port_fs(struct cm_port *port)
4207 int i, ret;
4209 ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
4210 &port->cm_dev->device->kobj,
4211 "%d", port->port_num);
4212 if (ret) {
4213 kfree(port);
4214 return ret;
4217 for (i = 0; i < CM_COUNTER_GROUPS; i++) {
4218 ret = kobject_init_and_add(&port->counter_group[i].obj,
4219 &cm_counter_obj_type,
4220 &port->port_obj,
4221 "%s", counter_group_names[i]);
4222 if (ret)
4223 goto error;
4226 return 0;
4228 error:
4229 while (i--)
4230 kobject_put(&port->counter_group[i].obj);
4231 kobject_put(&port->port_obj);
4232 return ret;
4236 static void cm_remove_port_fs(struct cm_port *port)
4238 int i;
4240 for (i = 0; i < CM_COUNTER_GROUPS; i++)
4241 kobject_put(&port->counter_group[i].obj);
4243 kobject_put(&port->port_obj);
4246 static void cm_add_one(struct ib_device *ib_device)
4248 struct cm_device *cm_dev;
4249 struct cm_port *port;
4250 struct ib_mad_reg_req reg_req = {
4251 .mgmt_class = IB_MGMT_CLASS_CM,
4252 .mgmt_class_version = IB_CM_CLASS_VERSION,
4254 struct ib_port_modify port_modify = {
4255 .set_port_cap_mask = IB_PORT_CM_SUP
4257 unsigned long flags;
4258 int ret;
4259 int count = 0;
4260 u8 i;
4262 cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
4263 ib_device->phys_port_cnt, GFP_KERNEL);
4264 if (!cm_dev)
4265 return;
4267 cm_dev->ib_device = ib_device;
4268 cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
4269 cm_dev->going_down = 0;
4270 cm_dev->device = device_create(&cm_class, &ib_device->dev,
4271 MKDEV(0, 0), NULL,
4272 "%s", ib_device->name);
4273 if (IS_ERR(cm_dev->device)) {
4274 kfree(cm_dev);
4275 return;
4278 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
4279 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4280 if (!rdma_cap_ib_cm(ib_device, i))
4281 continue;
4283 port = kzalloc(sizeof *port, GFP_KERNEL);
4284 if (!port)
4285 goto error1;
4287 cm_dev->port[i-1] = port;
4288 port->cm_dev = cm_dev;
4289 port->port_num = i;
4291 INIT_LIST_HEAD(&port->cm_priv_prim_list);
4292 INIT_LIST_HEAD(&port->cm_priv_altr_list);
4294 ret = cm_create_port_fs(port);
4295 if (ret)
4296 goto error1;
4298 port->mad_agent = ib_register_mad_agent(ib_device, i,
4299 IB_QPT_GSI,
4300 &reg_req,
4302 cm_send_handler,
4303 cm_recv_handler,
4304 port,
4306 if (IS_ERR(port->mad_agent))
4307 goto error2;
4309 ret = ib_modify_port(ib_device, i, 0, &port_modify);
4310 if (ret)
4311 goto error3;
4313 count++;
4316 if (!count)
4317 goto free;
4319 ib_set_client_data(ib_device, &cm_client, cm_dev);
4321 write_lock_irqsave(&cm.device_lock, flags);
4322 list_add_tail(&cm_dev->list, &cm.device_list);
4323 write_unlock_irqrestore(&cm.device_lock, flags);
4324 return;
4326 error3:
4327 ib_unregister_mad_agent(port->mad_agent);
4328 error2:
4329 cm_remove_port_fs(port);
4330 error1:
4331 port_modify.set_port_cap_mask = 0;
4332 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
4333 while (--i) {
4334 if (!rdma_cap_ib_cm(ib_device, i))
4335 continue;
4337 port = cm_dev->port[i-1];
4338 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4339 ib_unregister_mad_agent(port->mad_agent);
4340 cm_remove_port_fs(port);
4342 free:
4343 device_unregister(cm_dev->device);
4344 kfree(cm_dev);
4347 static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4349 struct cm_device *cm_dev = client_data;
4350 struct cm_port *port;
4351 struct cm_id_private *cm_id_priv;
4352 struct ib_mad_agent *cur_mad_agent;
4353 struct ib_port_modify port_modify = {
4354 .clr_port_cap_mask = IB_PORT_CM_SUP
4356 unsigned long flags;
4357 int i;
4359 if (!cm_dev)
4360 return;
4362 write_lock_irqsave(&cm.device_lock, flags);
4363 list_del(&cm_dev->list);
4364 write_unlock_irqrestore(&cm.device_lock, flags);
4366 spin_lock_irq(&cm.lock);
4367 cm_dev->going_down = 1;
4368 spin_unlock_irq(&cm.lock);
4370 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4371 if (!rdma_cap_ib_cm(ib_device, i))
4372 continue;
4374 port = cm_dev->port[i-1];
4375 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4376 /* Mark all the cm_id's as not valid */
4377 spin_lock_irq(&cm.lock);
4378 list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
4379 cm_id_priv->altr_send_port_not_ready = 1;
4380 list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
4381 cm_id_priv->prim_send_port_not_ready = 1;
4382 spin_unlock_irq(&cm.lock);
4384 * We flush the queue here after the going_down set, this
4385 * verify that no new works will be queued in the recv handler,
4386 * after that we can call the unregister_mad_agent
4388 flush_workqueue(cm.wq);
4389 spin_lock_irq(&cm.state_lock);
4390 cur_mad_agent = port->mad_agent;
4391 port->mad_agent = NULL;
4392 spin_unlock_irq(&cm.state_lock);
4393 ib_unregister_mad_agent(cur_mad_agent);
4394 cm_remove_port_fs(port);
4397 device_unregister(cm_dev->device);
4398 kfree(cm_dev);
4401 static int __init ib_cm_init(void)
4403 int ret;
4405 memset(&cm, 0, sizeof cm);
4406 INIT_LIST_HEAD(&cm.device_list);
4407 rwlock_init(&cm.device_lock);
4408 spin_lock_init(&cm.lock);
4409 spin_lock_init(&cm.state_lock);
4410 cm.listen_service_table = RB_ROOT;
4411 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4412 cm.remote_id_table = RB_ROOT;
4413 cm.remote_qp_table = RB_ROOT;
4414 cm.remote_sidr_table = RB_ROOT;
4415 idr_init(&cm.local_id_table);
4416 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4417 INIT_LIST_HEAD(&cm.timewait_list);
4419 ret = class_register(&cm_class);
4420 if (ret) {
4421 ret = -ENOMEM;
4422 goto error1;
4425 cm.wq = alloc_workqueue("ib_cm", 0, 1);
4426 if (!cm.wq) {
4427 ret = -ENOMEM;
4428 goto error2;
4431 ret = ib_register_client(&cm_client);
4432 if (ret)
4433 goto error3;
4435 return 0;
4436 error3:
4437 destroy_workqueue(cm.wq);
4438 error2:
4439 class_unregister(&cm_class);
4440 error1:
4441 idr_destroy(&cm.local_id_table);
4442 return ret;
4445 static void __exit ib_cm_cleanup(void)
4447 struct cm_timewait_info *timewait_info, *tmp;
4449 spin_lock_irq(&cm.lock);
4450 list_for_each_entry(timewait_info, &cm.timewait_list, list)
4451 cancel_delayed_work(&timewait_info->work.work);
4452 spin_unlock_irq(&cm.lock);
4454 ib_unregister_client(&cm_client);
4455 destroy_workqueue(cm.wq);
4457 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4458 list_del(&timewait_info->list);
4459 kfree(timewait_info);
4462 class_unregister(&cm_class);
4463 idr_destroy(&cm.local_id_table);
4466 module_init(ib_cm_init);
4467 module_exit(ib_cm_cleanup);