Merge tag 'chrome-platform-for-linus-4.13' of git://git.kernel.org/pub/scm/linux...
[linux/fpc-iii.git] / drivers / infiniband / core / mad.c
blobf8f53bb90837ca26c188cd537f5c8f6c8027e921
1 /*
2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
6 * Copyright (c) 2014 Intel Corporation. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <linux/security.h>
44 #include <rdma/ib_cache.h>
46 #include "mad_priv.h"
47 #include "core_priv.h"
48 #include "mad_rmpp.h"
49 #include "smi.h"
50 #include "opa_smi.h"
51 #include "agent.h"
52 #include "core_priv.h"
54 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
55 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
57 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
58 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
59 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
60 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
62 static struct list_head ib_mad_port_list;
63 static u32 ib_mad_client_id = 0;
65 /* Port list lock */
66 static DEFINE_SPINLOCK(ib_mad_port_list_lock);
68 /* Forward declarations */
69 static int method_in_use(struct ib_mad_mgmt_method_table **method,
70 struct ib_mad_reg_req *mad_reg_req);
71 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
72 static struct ib_mad_agent_private *find_mad_agent(
73 struct ib_mad_port_private *port_priv,
74 const struct ib_mad_hdr *mad);
75 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
76 struct ib_mad_private *mad);
77 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
78 static void timeout_sends(struct work_struct *work);
79 static void local_completions(struct work_struct *work);
80 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
81 struct ib_mad_agent_private *agent_priv,
82 u8 mgmt_class);
83 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
84 struct ib_mad_agent_private *agent_priv);
85 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
86 struct ib_wc *wc);
87 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
90 * Returns a ib_mad_port_private structure or NULL for a device/port
91 * Assumes ib_mad_port_list_lock is being held
93 static inline struct ib_mad_port_private *
94 __ib_get_mad_port(struct ib_device *device, int port_num)
96 struct ib_mad_port_private *entry;
98 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
99 if (entry->device == device && entry->port_num == port_num)
100 return entry;
102 return NULL;
106 * Wrapper function to return a ib_mad_port_private structure or NULL
107 * for a device/port
109 static inline struct ib_mad_port_private *
110 ib_get_mad_port(struct ib_device *device, int port_num)
112 struct ib_mad_port_private *entry;
113 unsigned long flags;
115 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
116 entry = __ib_get_mad_port(device, port_num);
117 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
119 return entry;
122 static inline u8 convert_mgmt_class(u8 mgmt_class)
124 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
125 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
126 0 : mgmt_class;
129 static int get_spl_qp_index(enum ib_qp_type qp_type)
131 switch (qp_type)
133 case IB_QPT_SMI:
134 return 0;
135 case IB_QPT_GSI:
136 return 1;
137 default:
138 return -1;
142 static int vendor_class_index(u8 mgmt_class)
144 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
147 static int is_vendor_class(u8 mgmt_class)
149 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
150 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
151 return 0;
152 return 1;
155 static int is_vendor_oui(char *oui)
157 if (oui[0] || oui[1] || oui[2])
158 return 1;
159 return 0;
162 static int is_vendor_method_in_use(
163 struct ib_mad_mgmt_vendor_class *vendor_class,
164 struct ib_mad_reg_req *mad_reg_req)
166 struct ib_mad_mgmt_method_table *method;
167 int i;
169 for (i = 0; i < MAX_MGMT_OUI; i++) {
170 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
171 method = vendor_class->method_table[i];
172 if (method) {
173 if (method_in_use(&method, mad_reg_req))
174 return 1;
175 else
176 break;
180 return 0;
183 int ib_response_mad(const struct ib_mad_hdr *hdr)
185 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
186 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
187 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
188 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
190 EXPORT_SYMBOL(ib_response_mad);
193 * ib_register_mad_agent - Register to send/receive MADs
195 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
196 u8 port_num,
197 enum ib_qp_type qp_type,
198 struct ib_mad_reg_req *mad_reg_req,
199 u8 rmpp_version,
200 ib_mad_send_handler send_handler,
201 ib_mad_recv_handler recv_handler,
202 void *context,
203 u32 registration_flags)
205 struct ib_mad_port_private *port_priv;
206 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
207 struct ib_mad_agent_private *mad_agent_priv;
208 struct ib_mad_reg_req *reg_req = NULL;
209 struct ib_mad_mgmt_class_table *class;
210 struct ib_mad_mgmt_vendor_class_table *vendor;
211 struct ib_mad_mgmt_vendor_class *vendor_class;
212 struct ib_mad_mgmt_method_table *method;
213 int ret2, qpn;
214 unsigned long flags;
215 u8 mgmt_class, vclass;
217 /* Validate parameters */
218 qpn = get_spl_qp_index(qp_type);
219 if (qpn == -1) {
220 dev_notice(&device->dev,
221 "ib_register_mad_agent: invalid QP Type %d\n",
222 qp_type);
223 goto error1;
226 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
227 dev_notice(&device->dev,
228 "ib_register_mad_agent: invalid RMPP Version %u\n",
229 rmpp_version);
230 goto error1;
233 /* Validate MAD registration request if supplied */
234 if (mad_reg_req) {
235 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
236 dev_notice(&device->dev,
237 "ib_register_mad_agent: invalid Class Version %u\n",
238 mad_reg_req->mgmt_class_version);
239 goto error1;
241 if (!recv_handler) {
242 dev_notice(&device->dev,
243 "ib_register_mad_agent: no recv_handler\n");
244 goto error1;
246 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
248 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
249 * one in this range currently allowed
251 if (mad_reg_req->mgmt_class !=
252 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
253 dev_notice(&device->dev,
254 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
255 mad_reg_req->mgmt_class);
256 goto error1;
258 } else if (mad_reg_req->mgmt_class == 0) {
260 * Class 0 is reserved in IBA and is used for
261 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
263 dev_notice(&device->dev,
264 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
265 goto error1;
266 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
268 * If class is in "new" vendor range,
269 * ensure supplied OUI is not zero
271 if (!is_vendor_oui(mad_reg_req->oui)) {
272 dev_notice(&device->dev,
273 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
274 mad_reg_req->mgmt_class);
275 goto error1;
278 /* Make sure class supplied is consistent with RMPP */
279 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
280 if (rmpp_version) {
281 dev_notice(&device->dev,
282 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
283 mad_reg_req->mgmt_class);
284 goto error1;
288 /* Make sure class supplied is consistent with QP type */
289 if (qp_type == IB_QPT_SMI) {
290 if ((mad_reg_req->mgmt_class !=
291 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
292 (mad_reg_req->mgmt_class !=
293 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
294 dev_notice(&device->dev,
295 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
296 mad_reg_req->mgmt_class);
297 goto error1;
299 } else {
300 if ((mad_reg_req->mgmt_class ==
301 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
302 (mad_reg_req->mgmt_class ==
303 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
304 dev_notice(&device->dev,
305 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
306 mad_reg_req->mgmt_class);
307 goto error1;
310 } else {
311 /* No registration request supplied */
312 if (!send_handler)
313 goto error1;
314 if (registration_flags & IB_MAD_USER_RMPP)
315 goto error1;
318 /* Validate device and port */
319 port_priv = ib_get_mad_port(device, port_num);
320 if (!port_priv) {
321 dev_notice(&device->dev,
322 "ib_register_mad_agent: Invalid port %d\n",
323 port_num);
324 ret = ERR_PTR(-ENODEV);
325 goto error1;
328 /* Verify the QP requested is supported. For example, Ethernet devices
329 * will not have QP0 */
330 if (!port_priv->qp_info[qpn].qp) {
331 dev_notice(&device->dev,
332 "ib_register_mad_agent: QP %d not supported\n", qpn);
333 ret = ERR_PTR(-EPROTONOSUPPORT);
334 goto error1;
337 /* Allocate structures */
338 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
339 if (!mad_agent_priv) {
340 ret = ERR_PTR(-ENOMEM);
341 goto error1;
344 if (mad_reg_req) {
345 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
346 if (!reg_req) {
347 ret = ERR_PTR(-ENOMEM);
348 goto error3;
352 /* Now, fill in the various structures */
353 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
354 mad_agent_priv->reg_req = reg_req;
355 mad_agent_priv->agent.rmpp_version = rmpp_version;
356 mad_agent_priv->agent.device = device;
357 mad_agent_priv->agent.recv_handler = recv_handler;
358 mad_agent_priv->agent.send_handler = send_handler;
359 mad_agent_priv->agent.context = context;
360 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
361 mad_agent_priv->agent.port_num = port_num;
362 mad_agent_priv->agent.flags = registration_flags;
363 spin_lock_init(&mad_agent_priv->lock);
364 INIT_LIST_HEAD(&mad_agent_priv->send_list);
365 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
366 INIT_LIST_HEAD(&mad_agent_priv->done_list);
367 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
368 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
369 INIT_LIST_HEAD(&mad_agent_priv->local_list);
370 INIT_WORK(&mad_agent_priv->local_work, local_completions);
371 atomic_set(&mad_agent_priv->refcount, 1);
372 init_completion(&mad_agent_priv->comp);
374 ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type);
375 if (ret2) {
376 ret = ERR_PTR(ret2);
377 goto error4;
380 spin_lock_irqsave(&port_priv->reg_lock, flags);
381 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
384 * Make sure MAD registration (if supplied)
385 * is non overlapping with any existing ones
387 if (mad_reg_req) {
388 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
389 if (!is_vendor_class(mgmt_class)) {
390 class = port_priv->version[mad_reg_req->
391 mgmt_class_version].class;
392 if (class) {
393 method = class->method_table[mgmt_class];
394 if (method) {
395 if (method_in_use(&method,
396 mad_reg_req))
397 goto error5;
400 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
401 mgmt_class);
402 } else {
403 /* "New" vendor class range */
404 vendor = port_priv->version[mad_reg_req->
405 mgmt_class_version].vendor;
406 if (vendor) {
407 vclass = vendor_class_index(mgmt_class);
408 vendor_class = vendor->vendor_class[vclass];
409 if (vendor_class) {
410 if (is_vendor_method_in_use(
411 vendor_class,
412 mad_reg_req))
413 goto error5;
416 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
418 if (ret2) {
419 ret = ERR_PTR(ret2);
420 goto error5;
424 /* Add mad agent into port's agent list */
425 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
426 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
428 return &mad_agent_priv->agent;
429 error5:
430 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
431 ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
432 error4:
433 kfree(reg_req);
434 error3:
435 kfree(mad_agent_priv);
436 error1:
437 return ret;
439 EXPORT_SYMBOL(ib_register_mad_agent);
441 static inline int is_snooping_sends(int mad_snoop_flags)
443 return (mad_snoop_flags &
444 (/*IB_MAD_SNOOP_POSTED_SENDS |
445 IB_MAD_SNOOP_RMPP_SENDS |*/
446 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
447 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
450 static inline int is_snooping_recvs(int mad_snoop_flags)
452 return (mad_snoop_flags &
453 (IB_MAD_SNOOP_RECVS /*|
454 IB_MAD_SNOOP_RMPP_RECVS*/));
457 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
458 struct ib_mad_snoop_private *mad_snoop_priv)
460 struct ib_mad_snoop_private **new_snoop_table;
461 unsigned long flags;
462 int i;
464 spin_lock_irqsave(&qp_info->snoop_lock, flags);
465 /* Check for empty slot in array. */
466 for (i = 0; i < qp_info->snoop_table_size; i++)
467 if (!qp_info->snoop_table[i])
468 break;
470 if (i == qp_info->snoop_table_size) {
471 /* Grow table. */
472 new_snoop_table = krealloc(qp_info->snoop_table,
473 sizeof mad_snoop_priv *
474 (qp_info->snoop_table_size + 1),
475 GFP_ATOMIC);
476 if (!new_snoop_table) {
477 i = -ENOMEM;
478 goto out;
481 qp_info->snoop_table = new_snoop_table;
482 qp_info->snoop_table_size++;
484 qp_info->snoop_table[i] = mad_snoop_priv;
485 atomic_inc(&qp_info->snoop_count);
486 out:
487 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
488 return i;
491 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
492 u8 port_num,
493 enum ib_qp_type qp_type,
494 int mad_snoop_flags,
495 ib_mad_snoop_handler snoop_handler,
496 ib_mad_recv_handler recv_handler,
497 void *context)
499 struct ib_mad_port_private *port_priv;
500 struct ib_mad_agent *ret;
501 struct ib_mad_snoop_private *mad_snoop_priv;
502 int qpn;
503 int err;
505 /* Validate parameters */
506 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
507 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
508 ret = ERR_PTR(-EINVAL);
509 goto error1;
511 qpn = get_spl_qp_index(qp_type);
512 if (qpn == -1) {
513 ret = ERR_PTR(-EINVAL);
514 goto error1;
516 port_priv = ib_get_mad_port(device, port_num);
517 if (!port_priv) {
518 ret = ERR_PTR(-ENODEV);
519 goto error1;
521 /* Allocate structures */
522 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
523 if (!mad_snoop_priv) {
524 ret = ERR_PTR(-ENOMEM);
525 goto error1;
528 /* Now, fill in the various structures */
529 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
530 mad_snoop_priv->agent.device = device;
531 mad_snoop_priv->agent.recv_handler = recv_handler;
532 mad_snoop_priv->agent.snoop_handler = snoop_handler;
533 mad_snoop_priv->agent.context = context;
534 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
535 mad_snoop_priv->agent.port_num = port_num;
536 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
537 init_completion(&mad_snoop_priv->comp);
539 err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type);
540 if (err) {
541 ret = ERR_PTR(err);
542 goto error2;
545 mad_snoop_priv->snoop_index = register_snoop_agent(
546 &port_priv->qp_info[qpn],
547 mad_snoop_priv);
548 if (mad_snoop_priv->snoop_index < 0) {
549 ret = ERR_PTR(mad_snoop_priv->snoop_index);
550 goto error3;
553 atomic_set(&mad_snoop_priv->refcount, 1);
554 return &mad_snoop_priv->agent;
555 error3:
556 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
557 error2:
558 kfree(mad_snoop_priv);
559 error1:
560 return ret;
562 EXPORT_SYMBOL(ib_register_mad_snoop);
564 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
566 if (atomic_dec_and_test(&mad_agent_priv->refcount))
567 complete(&mad_agent_priv->comp);
570 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
572 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
573 complete(&mad_snoop_priv->comp);
576 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
578 struct ib_mad_port_private *port_priv;
579 unsigned long flags;
581 /* Note that we could still be handling received MADs */
584 * Canceling all sends results in dropping received response
585 * MADs, preventing us from queuing additional work
587 cancel_mads(mad_agent_priv);
588 port_priv = mad_agent_priv->qp_info->port_priv;
589 cancel_delayed_work(&mad_agent_priv->timed_work);
591 spin_lock_irqsave(&port_priv->reg_lock, flags);
592 remove_mad_reg_req(mad_agent_priv);
593 list_del(&mad_agent_priv->agent_list);
594 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
596 flush_workqueue(port_priv->wq);
597 ib_cancel_rmpp_recvs(mad_agent_priv);
599 deref_mad_agent(mad_agent_priv);
600 wait_for_completion(&mad_agent_priv->comp);
602 ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
604 kfree(mad_agent_priv->reg_req);
605 kfree(mad_agent_priv);
608 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
610 struct ib_mad_qp_info *qp_info;
611 unsigned long flags;
613 qp_info = mad_snoop_priv->qp_info;
614 spin_lock_irqsave(&qp_info->snoop_lock, flags);
615 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
616 atomic_dec(&qp_info->snoop_count);
617 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
619 deref_snoop_agent(mad_snoop_priv);
620 wait_for_completion(&mad_snoop_priv->comp);
622 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
624 kfree(mad_snoop_priv);
628 * ib_unregister_mad_agent - Unregisters a client from using MAD services
630 void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
632 struct ib_mad_agent_private *mad_agent_priv;
633 struct ib_mad_snoop_private *mad_snoop_priv;
635 /* If the TID is zero, the agent can only snoop. */
636 if (mad_agent->hi_tid) {
637 mad_agent_priv = container_of(mad_agent,
638 struct ib_mad_agent_private,
639 agent);
640 unregister_mad_agent(mad_agent_priv);
641 } else {
642 mad_snoop_priv = container_of(mad_agent,
643 struct ib_mad_snoop_private,
644 agent);
645 unregister_mad_snoop(mad_snoop_priv);
648 EXPORT_SYMBOL(ib_unregister_mad_agent);
650 static void dequeue_mad(struct ib_mad_list_head *mad_list)
652 struct ib_mad_queue *mad_queue;
653 unsigned long flags;
655 BUG_ON(!mad_list->mad_queue);
656 mad_queue = mad_list->mad_queue;
657 spin_lock_irqsave(&mad_queue->lock, flags);
658 list_del(&mad_list->list);
659 mad_queue->count--;
660 spin_unlock_irqrestore(&mad_queue->lock, flags);
663 static void snoop_send(struct ib_mad_qp_info *qp_info,
664 struct ib_mad_send_buf *send_buf,
665 struct ib_mad_send_wc *mad_send_wc,
666 int mad_snoop_flags)
668 struct ib_mad_snoop_private *mad_snoop_priv;
669 unsigned long flags;
670 int i;
672 spin_lock_irqsave(&qp_info->snoop_lock, flags);
673 for (i = 0; i < qp_info->snoop_table_size; i++) {
674 mad_snoop_priv = qp_info->snoop_table[i];
675 if (!mad_snoop_priv ||
676 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
677 continue;
679 atomic_inc(&mad_snoop_priv->refcount);
680 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
681 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
682 send_buf, mad_send_wc);
683 deref_snoop_agent(mad_snoop_priv);
684 spin_lock_irqsave(&qp_info->snoop_lock, flags);
686 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
689 static void snoop_recv(struct ib_mad_qp_info *qp_info,
690 struct ib_mad_recv_wc *mad_recv_wc,
691 int mad_snoop_flags)
693 struct ib_mad_snoop_private *mad_snoop_priv;
694 unsigned long flags;
695 int i;
697 spin_lock_irqsave(&qp_info->snoop_lock, flags);
698 for (i = 0; i < qp_info->snoop_table_size; i++) {
699 mad_snoop_priv = qp_info->snoop_table[i];
700 if (!mad_snoop_priv ||
701 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
702 continue;
704 atomic_inc(&mad_snoop_priv->refcount);
705 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
706 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
707 mad_recv_wc);
708 deref_snoop_agent(mad_snoop_priv);
709 spin_lock_irqsave(&qp_info->snoop_lock, flags);
711 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
714 static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
715 u16 pkey_index, u8 port_num, struct ib_wc *wc)
717 memset(wc, 0, sizeof *wc);
718 wc->wr_cqe = cqe;
719 wc->status = IB_WC_SUCCESS;
720 wc->opcode = IB_WC_RECV;
721 wc->pkey_index = pkey_index;
722 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
723 wc->src_qp = IB_QP0;
724 wc->qp = qp;
725 wc->slid = slid;
726 wc->sl = 0;
727 wc->dlid_path_bits = 0;
728 wc->port_num = port_num;
731 static size_t mad_priv_size(const struct ib_mad_private *mp)
733 return sizeof(struct ib_mad_private) + mp->mad_size;
736 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
738 size_t size = sizeof(struct ib_mad_private) + mad_size;
739 struct ib_mad_private *ret = kzalloc(size, flags);
741 if (ret)
742 ret->mad_size = mad_size;
744 return ret;
747 static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
749 return rdma_max_mad_size(port_priv->device, port_priv->port_num);
752 static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
754 return sizeof(struct ib_grh) + mp->mad_size;
758 * Return 0 if SMP is to be sent
759 * Return 1 if SMP was consumed locally (whether or not solicited)
760 * Return < 0 if error
762 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
763 struct ib_mad_send_wr_private *mad_send_wr)
765 int ret = 0;
766 struct ib_smp *smp = mad_send_wr->send_buf.mad;
767 struct opa_smp *opa_smp = (struct opa_smp *)smp;
768 unsigned long flags;
769 struct ib_mad_local_private *local;
770 struct ib_mad_private *mad_priv;
771 struct ib_mad_port_private *port_priv;
772 struct ib_mad_agent_private *recv_mad_agent = NULL;
773 struct ib_device *device = mad_agent_priv->agent.device;
774 u8 port_num;
775 struct ib_wc mad_wc;
776 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
777 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
778 u16 out_mad_pkey_index = 0;
779 u16 drslid;
780 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
781 mad_agent_priv->qp_info->port_priv->port_num);
783 if (rdma_cap_ib_switch(device) &&
784 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
785 port_num = send_wr->port_num;
786 else
787 port_num = mad_agent_priv->agent.port_num;
790 * Directed route handling starts if the initial LID routed part of
791 * a request or the ending LID routed part of a response is empty.
792 * If we are at the start of the LID routed part, don't update the
793 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
795 if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
796 u32 opa_drslid;
798 if ((opa_get_smp_direction(opa_smp)
799 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
800 OPA_LID_PERMISSIVE &&
801 opa_smi_handle_dr_smp_send(opa_smp,
802 rdma_cap_ib_switch(device),
803 port_num) == IB_SMI_DISCARD) {
804 ret = -EINVAL;
805 dev_err(&device->dev, "OPA Invalid directed route\n");
806 goto out;
808 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
809 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
810 opa_drslid & 0xffff0000) {
811 ret = -EINVAL;
812 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
813 opa_drslid);
814 goto out;
816 drslid = (u16)(opa_drslid & 0x0000ffff);
818 /* Check to post send on QP or process locally */
819 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
820 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
821 goto out;
822 } else {
823 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
824 IB_LID_PERMISSIVE &&
825 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
826 IB_SMI_DISCARD) {
827 ret = -EINVAL;
828 dev_err(&device->dev, "Invalid directed route\n");
829 goto out;
831 drslid = be16_to_cpu(smp->dr_slid);
833 /* Check to post send on QP or process locally */
834 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
835 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
836 goto out;
839 local = kmalloc(sizeof *local, GFP_ATOMIC);
840 if (!local) {
841 ret = -ENOMEM;
842 goto out;
844 local->mad_priv = NULL;
845 local->recv_mad_agent = NULL;
846 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
847 if (!mad_priv) {
848 ret = -ENOMEM;
849 kfree(local);
850 goto out;
853 build_smp_wc(mad_agent_priv->agent.qp,
854 send_wr->wr.wr_cqe, drslid,
855 send_wr->pkey_index,
856 send_wr->port_num, &mad_wc);
858 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
859 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
860 + mad_send_wr->send_buf.data_len
861 + sizeof(struct ib_grh);
864 /* No GRH for DR SMP */
865 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
866 (const struct ib_mad_hdr *)smp, mad_size,
867 (struct ib_mad_hdr *)mad_priv->mad,
868 &mad_size, &out_mad_pkey_index);
869 switch (ret)
871 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
872 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
873 mad_agent_priv->agent.recv_handler) {
874 local->mad_priv = mad_priv;
875 local->recv_mad_agent = mad_agent_priv;
877 * Reference MAD agent until receive
878 * side of local completion handled
880 atomic_inc(&mad_agent_priv->refcount);
881 } else
882 kfree(mad_priv);
883 break;
884 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
885 kfree(mad_priv);
886 break;
887 case IB_MAD_RESULT_SUCCESS:
888 /* Treat like an incoming receive MAD */
889 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
890 mad_agent_priv->agent.port_num);
891 if (port_priv) {
892 memcpy(mad_priv->mad, smp, mad_priv->mad_size);
893 recv_mad_agent = find_mad_agent(port_priv,
894 (const struct ib_mad_hdr *)mad_priv->mad);
896 if (!port_priv || !recv_mad_agent) {
898 * No receiving agent so drop packet and
899 * generate send completion.
901 kfree(mad_priv);
902 break;
904 local->mad_priv = mad_priv;
905 local->recv_mad_agent = recv_mad_agent;
906 break;
907 default:
908 kfree(mad_priv);
909 kfree(local);
910 ret = -EINVAL;
911 goto out;
914 local->mad_send_wr = mad_send_wr;
915 if (opa) {
916 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
917 local->return_wc_byte_len = mad_size;
919 /* Reference MAD agent until send side of local completion handled */
920 atomic_inc(&mad_agent_priv->refcount);
921 /* Queue local completion to local list */
922 spin_lock_irqsave(&mad_agent_priv->lock, flags);
923 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
924 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
925 queue_work(mad_agent_priv->qp_info->port_priv->wq,
926 &mad_agent_priv->local_work);
927 ret = 1;
928 out:
929 return ret;
932 static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
934 int seg_size, pad;
936 seg_size = mad_size - hdr_len;
937 if (data_len && seg_size) {
938 pad = seg_size - data_len % seg_size;
939 return pad == seg_size ? 0 : pad;
940 } else
941 return seg_size;
944 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
946 struct ib_rmpp_segment *s, *t;
948 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
949 list_del(&s->list);
950 kfree(s);
954 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
955 size_t mad_size, gfp_t gfp_mask)
957 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
958 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
959 struct ib_rmpp_segment *seg = NULL;
960 int left, seg_size, pad;
962 send_buf->seg_size = mad_size - send_buf->hdr_len;
963 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
964 seg_size = send_buf->seg_size;
965 pad = send_wr->pad;
967 /* Allocate data segments. */
968 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
969 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
970 if (!seg) {
971 free_send_rmpp_list(send_wr);
972 return -ENOMEM;
974 seg->num = ++send_buf->seg_count;
975 list_add_tail(&seg->list, &send_wr->rmpp_list);
978 /* Zero any padding */
979 if (pad)
980 memset(seg->data + seg_size - pad, 0, pad);
982 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
983 agent.rmpp_version;
984 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
985 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
987 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
988 struct ib_rmpp_segment, list);
989 send_wr->last_ack_seg = send_wr->cur_seg;
990 return 0;
993 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
995 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
997 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
999 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
1000 u32 remote_qpn, u16 pkey_index,
1001 int rmpp_active,
1002 int hdr_len, int data_len,
1003 gfp_t gfp_mask,
1004 u8 base_version)
1006 struct ib_mad_agent_private *mad_agent_priv;
1007 struct ib_mad_send_wr_private *mad_send_wr;
1008 int pad, message_size, ret, size;
1009 void *buf;
1010 size_t mad_size;
1011 bool opa;
1013 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
1014 agent);
1016 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
1018 if (opa && base_version == OPA_MGMT_BASE_VERSION)
1019 mad_size = sizeof(struct opa_mad);
1020 else
1021 mad_size = sizeof(struct ib_mad);
1023 pad = get_pad_size(hdr_len, data_len, mad_size);
1024 message_size = hdr_len + data_len + pad;
1026 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
1027 if (!rmpp_active && message_size > mad_size)
1028 return ERR_PTR(-EINVAL);
1029 } else
1030 if (rmpp_active || message_size > mad_size)
1031 return ERR_PTR(-EINVAL);
1033 size = rmpp_active ? hdr_len : mad_size;
1034 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
1035 if (!buf)
1036 return ERR_PTR(-ENOMEM);
1038 mad_send_wr = buf + size;
1039 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
1040 mad_send_wr->send_buf.mad = buf;
1041 mad_send_wr->send_buf.hdr_len = hdr_len;
1042 mad_send_wr->send_buf.data_len = data_len;
1043 mad_send_wr->pad = pad;
1045 mad_send_wr->mad_agent_priv = mad_agent_priv;
1046 mad_send_wr->sg_list[0].length = hdr_len;
1047 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
1049 /* OPA MADs don't have to be the full 2048 bytes */
1050 if (opa && base_version == OPA_MGMT_BASE_VERSION &&
1051 data_len < mad_size - hdr_len)
1052 mad_send_wr->sg_list[1].length = data_len;
1053 else
1054 mad_send_wr->sg_list[1].length = mad_size - hdr_len;
1056 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
1058 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1060 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1061 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
1062 mad_send_wr->send_wr.wr.num_sge = 2;
1063 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
1064 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
1065 mad_send_wr->send_wr.remote_qpn = remote_qpn;
1066 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
1067 mad_send_wr->send_wr.pkey_index = pkey_index;
1069 if (rmpp_active) {
1070 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
1071 if (ret) {
1072 kfree(buf);
1073 return ERR_PTR(ret);
1077 mad_send_wr->send_buf.mad_agent = mad_agent;
1078 atomic_inc(&mad_agent_priv->refcount);
1079 return &mad_send_wr->send_buf;
1081 EXPORT_SYMBOL(ib_create_send_mad);
1083 int ib_get_mad_data_offset(u8 mgmt_class)
1085 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1086 return IB_MGMT_SA_HDR;
1087 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1088 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1089 (mgmt_class == IB_MGMT_CLASS_BIS))
1090 return IB_MGMT_DEVICE_HDR;
1091 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1092 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1093 return IB_MGMT_VENDOR_HDR;
1094 else
1095 return IB_MGMT_MAD_HDR;
1097 EXPORT_SYMBOL(ib_get_mad_data_offset);
1099 int ib_is_mad_class_rmpp(u8 mgmt_class)
1101 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1102 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1103 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1104 (mgmt_class == IB_MGMT_CLASS_BIS) ||
1105 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1106 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1107 return 1;
1108 return 0;
1110 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1112 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1114 struct ib_mad_send_wr_private *mad_send_wr;
1115 struct list_head *list;
1117 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1118 send_buf);
1119 list = &mad_send_wr->cur_seg->list;
1121 if (mad_send_wr->cur_seg->num < seg_num) {
1122 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1123 if (mad_send_wr->cur_seg->num == seg_num)
1124 break;
1125 } else if (mad_send_wr->cur_seg->num > seg_num) {
1126 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1127 if (mad_send_wr->cur_seg->num == seg_num)
1128 break;
1130 return mad_send_wr->cur_seg->data;
1132 EXPORT_SYMBOL(ib_get_rmpp_segment);
1134 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1136 if (mad_send_wr->send_buf.seg_count)
1137 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1138 mad_send_wr->seg_num);
1139 else
1140 return mad_send_wr->send_buf.mad +
1141 mad_send_wr->send_buf.hdr_len;
1144 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1146 struct ib_mad_agent_private *mad_agent_priv;
1147 struct ib_mad_send_wr_private *mad_send_wr;
1149 mad_agent_priv = container_of(send_buf->mad_agent,
1150 struct ib_mad_agent_private, agent);
1151 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1152 send_buf);
1154 free_send_rmpp_list(mad_send_wr);
1155 kfree(send_buf->mad);
1156 deref_mad_agent(mad_agent_priv);
1158 EXPORT_SYMBOL(ib_free_send_mad);
1160 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1162 struct ib_mad_qp_info *qp_info;
1163 struct list_head *list;
1164 struct ib_send_wr *bad_send_wr;
1165 struct ib_mad_agent *mad_agent;
1166 struct ib_sge *sge;
1167 unsigned long flags;
1168 int ret;
1170 /* Set WR ID to find mad_send_wr upon completion */
1171 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1172 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1173 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1174 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1176 mad_agent = mad_send_wr->send_buf.mad_agent;
1177 sge = mad_send_wr->sg_list;
1178 sge[0].addr = ib_dma_map_single(mad_agent->device,
1179 mad_send_wr->send_buf.mad,
1180 sge[0].length,
1181 DMA_TO_DEVICE);
1182 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1183 return -ENOMEM;
1185 mad_send_wr->header_mapping = sge[0].addr;
1187 sge[1].addr = ib_dma_map_single(mad_agent->device,
1188 ib_get_payload(mad_send_wr),
1189 sge[1].length,
1190 DMA_TO_DEVICE);
1191 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1192 ib_dma_unmap_single(mad_agent->device,
1193 mad_send_wr->header_mapping,
1194 sge[0].length, DMA_TO_DEVICE);
1195 return -ENOMEM;
1197 mad_send_wr->payload_mapping = sge[1].addr;
1199 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1200 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1201 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
1202 &bad_send_wr);
1203 list = &qp_info->send_queue.list;
1204 } else {
1205 ret = 0;
1206 list = &qp_info->overflow_list;
1209 if (!ret) {
1210 qp_info->send_queue.count++;
1211 list_add_tail(&mad_send_wr->mad_list.list, list);
1213 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1214 if (ret) {
1215 ib_dma_unmap_single(mad_agent->device,
1216 mad_send_wr->header_mapping,
1217 sge[0].length, DMA_TO_DEVICE);
1218 ib_dma_unmap_single(mad_agent->device,
1219 mad_send_wr->payload_mapping,
1220 sge[1].length, DMA_TO_DEVICE);
1222 return ret;
1226 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1227 * with the registered client
1229 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1230 struct ib_mad_send_buf **bad_send_buf)
1232 struct ib_mad_agent_private *mad_agent_priv;
1233 struct ib_mad_send_buf *next_send_buf;
1234 struct ib_mad_send_wr_private *mad_send_wr;
1235 unsigned long flags;
1236 int ret = -EINVAL;
1238 /* Walk list of send WRs and post each on send list */
1239 for (; send_buf; send_buf = next_send_buf) {
1240 mad_send_wr = container_of(send_buf,
1241 struct ib_mad_send_wr_private,
1242 send_buf);
1243 mad_agent_priv = mad_send_wr->mad_agent_priv;
1245 ret = ib_mad_enforce_security(mad_agent_priv,
1246 mad_send_wr->send_wr.pkey_index);
1247 if (ret)
1248 goto error;
1250 if (!send_buf->mad_agent->send_handler ||
1251 (send_buf->timeout_ms &&
1252 !send_buf->mad_agent->recv_handler)) {
1253 ret = -EINVAL;
1254 goto error;
1257 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1258 if (mad_agent_priv->agent.rmpp_version) {
1259 ret = -EINVAL;
1260 goto error;
1265 * Save pointer to next work request to post in case the
1266 * current one completes, and the user modifies the work
1267 * request associated with the completion
1269 next_send_buf = send_buf->next;
1270 mad_send_wr->send_wr.ah = send_buf->ah;
1272 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1273 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1274 ret = handle_outgoing_dr_smp(mad_agent_priv,
1275 mad_send_wr);
1276 if (ret < 0) /* error */
1277 goto error;
1278 else if (ret == 1) /* locally consumed */
1279 continue;
1282 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1283 /* Timeout will be updated after send completes */
1284 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1285 mad_send_wr->max_retries = send_buf->retries;
1286 mad_send_wr->retries_left = send_buf->retries;
1287 send_buf->retries = 0;
1288 /* Reference for work request to QP + response */
1289 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1290 mad_send_wr->status = IB_WC_SUCCESS;
1292 /* Reference MAD agent until send completes */
1293 atomic_inc(&mad_agent_priv->refcount);
1294 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1295 list_add_tail(&mad_send_wr->agent_list,
1296 &mad_agent_priv->send_list);
1297 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1299 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1300 ret = ib_send_rmpp_mad(mad_send_wr);
1301 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1302 ret = ib_send_mad(mad_send_wr);
1303 } else
1304 ret = ib_send_mad(mad_send_wr);
1305 if (ret < 0) {
1306 /* Fail send request */
1307 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1308 list_del(&mad_send_wr->agent_list);
1309 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1310 atomic_dec(&mad_agent_priv->refcount);
1311 goto error;
1314 return 0;
1315 error:
1316 if (bad_send_buf)
1317 *bad_send_buf = send_buf;
1318 return ret;
1320 EXPORT_SYMBOL(ib_post_send_mad);
1323 * ib_free_recv_mad - Returns data buffers used to receive
1324 * a MAD to the access layer
1326 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1328 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1329 struct ib_mad_private_header *mad_priv_hdr;
1330 struct ib_mad_private *priv;
1331 struct list_head free_list;
1333 INIT_LIST_HEAD(&free_list);
1334 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1336 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1337 &free_list, list) {
1338 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1339 recv_buf);
1340 mad_priv_hdr = container_of(mad_recv_wc,
1341 struct ib_mad_private_header,
1342 recv_wc);
1343 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1344 header);
1345 kfree(priv);
1348 EXPORT_SYMBOL(ib_free_recv_mad);
1350 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1351 u8 rmpp_version,
1352 ib_mad_send_handler send_handler,
1353 ib_mad_recv_handler recv_handler,
1354 void *context)
1356 return ERR_PTR(-EINVAL); /* XXX: for now */
1358 EXPORT_SYMBOL(ib_redirect_mad_qp);
1360 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1361 struct ib_wc *wc)
1363 dev_err(&mad_agent->device->dev,
1364 "ib_process_mad_wc() not implemented yet\n");
1365 return 0;
1367 EXPORT_SYMBOL(ib_process_mad_wc);
1369 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1370 struct ib_mad_reg_req *mad_reg_req)
1372 int i;
1374 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1375 if ((*method)->agent[i]) {
1376 pr_err("Method %d already in use\n", i);
1377 return -EINVAL;
1380 return 0;
1383 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1385 /* Allocate management method table */
1386 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1387 return (*method) ? 0 : (-ENOMEM);
1391 * Check to see if there are any methods still in use
1393 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1395 int i;
1397 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1398 if (method->agent[i])
1399 return 1;
1400 return 0;
1404 * Check to see if there are any method tables for this class still in use
1406 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1408 int i;
1410 for (i = 0; i < MAX_MGMT_CLASS; i++)
1411 if (class->method_table[i])
1412 return 1;
1413 return 0;
1416 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1418 int i;
1420 for (i = 0; i < MAX_MGMT_OUI; i++)
1421 if (vendor_class->method_table[i])
1422 return 1;
1423 return 0;
1426 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1427 const char *oui)
1429 int i;
1431 for (i = 0; i < MAX_MGMT_OUI; i++)
1432 /* Is there matching OUI for this vendor class ? */
1433 if (!memcmp(vendor_class->oui[i], oui, 3))
1434 return i;
1436 return -1;
1439 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1441 int i;
1443 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1444 if (vendor->vendor_class[i])
1445 return 1;
1447 return 0;
1450 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1451 struct ib_mad_agent_private *agent)
1453 int i;
1455 /* Remove any methods for this mad agent */
1456 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1457 if (method->agent[i] == agent) {
1458 method->agent[i] = NULL;
1463 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1464 struct ib_mad_agent_private *agent_priv,
1465 u8 mgmt_class)
1467 struct ib_mad_port_private *port_priv;
1468 struct ib_mad_mgmt_class_table **class;
1469 struct ib_mad_mgmt_method_table **method;
1470 int i, ret;
1472 port_priv = agent_priv->qp_info->port_priv;
1473 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1474 if (!*class) {
1475 /* Allocate management class table for "new" class version */
1476 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1477 if (!*class) {
1478 ret = -ENOMEM;
1479 goto error1;
1482 /* Allocate method table for this management class */
1483 method = &(*class)->method_table[mgmt_class];
1484 if ((ret = allocate_method_table(method)))
1485 goto error2;
1486 } else {
1487 method = &(*class)->method_table[mgmt_class];
1488 if (!*method) {
1489 /* Allocate method table for this management class */
1490 if ((ret = allocate_method_table(method)))
1491 goto error1;
1495 /* Now, make sure methods are not already in use */
1496 if (method_in_use(method, mad_reg_req))
1497 goto error3;
1499 /* Finally, add in methods being registered */
1500 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1501 (*method)->agent[i] = agent_priv;
1503 return 0;
1505 error3:
1506 /* Remove any methods for this mad agent */
1507 remove_methods_mad_agent(*method, agent_priv);
1508 /* Now, check to see if there are any methods in use */
1509 if (!check_method_table(*method)) {
1510 /* If not, release management method table */
1511 kfree(*method);
1512 *method = NULL;
1514 ret = -EINVAL;
1515 goto error1;
1516 error2:
1517 kfree(*class);
1518 *class = NULL;
1519 error1:
1520 return ret;
1523 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1524 struct ib_mad_agent_private *agent_priv)
1526 struct ib_mad_port_private *port_priv;
1527 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1528 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1529 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1530 struct ib_mad_mgmt_method_table **method;
1531 int i, ret = -ENOMEM;
1532 u8 vclass;
1534 /* "New" vendor (with OUI) class */
1535 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1536 port_priv = agent_priv->qp_info->port_priv;
1537 vendor_table = &port_priv->version[
1538 mad_reg_req->mgmt_class_version].vendor;
1539 if (!*vendor_table) {
1540 /* Allocate mgmt vendor class table for "new" class version */
1541 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1542 if (!vendor)
1543 goto error1;
1545 *vendor_table = vendor;
1547 if (!(*vendor_table)->vendor_class[vclass]) {
1548 /* Allocate table for this management vendor class */
1549 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1550 if (!vendor_class)
1551 goto error2;
1553 (*vendor_table)->vendor_class[vclass] = vendor_class;
1555 for (i = 0; i < MAX_MGMT_OUI; i++) {
1556 /* Is there matching OUI for this vendor class ? */
1557 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1558 mad_reg_req->oui, 3)) {
1559 method = &(*vendor_table)->vendor_class[
1560 vclass]->method_table[i];
1561 BUG_ON(!*method);
1562 goto check_in_use;
1565 for (i = 0; i < MAX_MGMT_OUI; i++) {
1566 /* OUI slot available ? */
1567 if (!is_vendor_oui((*vendor_table)->vendor_class[
1568 vclass]->oui[i])) {
1569 method = &(*vendor_table)->vendor_class[
1570 vclass]->method_table[i];
1571 BUG_ON(*method);
1572 /* Allocate method table for this OUI */
1573 if ((ret = allocate_method_table(method)))
1574 goto error3;
1575 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1576 mad_reg_req->oui, 3);
1577 goto check_in_use;
1580 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1581 goto error3;
1583 check_in_use:
1584 /* Now, make sure methods are not already in use */
1585 if (method_in_use(method, mad_reg_req))
1586 goto error4;
1588 /* Finally, add in methods being registered */
1589 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1590 (*method)->agent[i] = agent_priv;
1592 return 0;
1594 error4:
1595 /* Remove any methods for this mad agent */
1596 remove_methods_mad_agent(*method, agent_priv);
1597 /* Now, check to see if there are any methods in use */
1598 if (!check_method_table(*method)) {
1599 /* If not, release management method table */
1600 kfree(*method);
1601 *method = NULL;
1603 ret = -EINVAL;
1604 error3:
1605 if (vendor_class) {
1606 (*vendor_table)->vendor_class[vclass] = NULL;
1607 kfree(vendor_class);
1609 error2:
1610 if (vendor) {
1611 *vendor_table = NULL;
1612 kfree(vendor);
1614 error1:
1615 return ret;
1618 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1620 struct ib_mad_port_private *port_priv;
1621 struct ib_mad_mgmt_class_table *class;
1622 struct ib_mad_mgmt_method_table *method;
1623 struct ib_mad_mgmt_vendor_class_table *vendor;
1624 struct ib_mad_mgmt_vendor_class *vendor_class;
1625 int index;
1626 u8 mgmt_class;
1629 * Was MAD registration request supplied
1630 * with original registration ?
1632 if (!agent_priv->reg_req) {
1633 goto out;
1636 port_priv = agent_priv->qp_info->port_priv;
1637 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1638 class = port_priv->version[
1639 agent_priv->reg_req->mgmt_class_version].class;
1640 if (!class)
1641 goto vendor_check;
1643 method = class->method_table[mgmt_class];
1644 if (method) {
1645 /* Remove any methods for this mad agent */
1646 remove_methods_mad_agent(method, agent_priv);
1647 /* Now, check to see if there are any methods still in use */
1648 if (!check_method_table(method)) {
1649 /* If not, release management method table */
1650 kfree(method);
1651 class->method_table[mgmt_class] = NULL;
1652 /* Any management classes left ? */
1653 if (!check_class_table(class)) {
1654 /* If not, release management class table */
1655 kfree(class);
1656 port_priv->version[
1657 agent_priv->reg_req->
1658 mgmt_class_version].class = NULL;
1663 vendor_check:
1664 if (!is_vendor_class(mgmt_class))
1665 goto out;
1667 /* normalize mgmt_class to vendor range 2 */
1668 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1669 vendor = port_priv->version[
1670 agent_priv->reg_req->mgmt_class_version].vendor;
1672 if (!vendor)
1673 goto out;
1675 vendor_class = vendor->vendor_class[mgmt_class];
1676 if (vendor_class) {
1677 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1678 if (index < 0)
1679 goto out;
1680 method = vendor_class->method_table[index];
1681 if (method) {
1682 /* Remove any methods for this mad agent */
1683 remove_methods_mad_agent(method, agent_priv);
1685 * Now, check to see if there are
1686 * any methods still in use
1688 if (!check_method_table(method)) {
1689 /* If not, release management method table */
1690 kfree(method);
1691 vendor_class->method_table[index] = NULL;
1692 memset(vendor_class->oui[index], 0, 3);
1693 /* Any OUIs left ? */
1694 if (!check_vendor_class(vendor_class)) {
1695 /* If not, release vendor class table */
1696 kfree(vendor_class);
1697 vendor->vendor_class[mgmt_class] = NULL;
1698 /* Any other vendor classes left ? */
1699 if (!check_vendor_table(vendor)) {
1700 kfree(vendor);
1701 port_priv->version[
1702 agent_priv->reg_req->
1703 mgmt_class_version].
1704 vendor = NULL;
1711 out:
1712 return;
1715 static struct ib_mad_agent_private *
1716 find_mad_agent(struct ib_mad_port_private *port_priv,
1717 const struct ib_mad_hdr *mad_hdr)
1719 struct ib_mad_agent_private *mad_agent = NULL;
1720 unsigned long flags;
1722 spin_lock_irqsave(&port_priv->reg_lock, flags);
1723 if (ib_response_mad(mad_hdr)) {
1724 u32 hi_tid;
1725 struct ib_mad_agent_private *entry;
1728 * Routing is based on high 32 bits of transaction ID
1729 * of MAD.
1731 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1732 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1733 if (entry->agent.hi_tid == hi_tid) {
1734 mad_agent = entry;
1735 break;
1738 } else {
1739 struct ib_mad_mgmt_class_table *class;
1740 struct ib_mad_mgmt_method_table *method;
1741 struct ib_mad_mgmt_vendor_class_table *vendor;
1742 struct ib_mad_mgmt_vendor_class *vendor_class;
1743 const struct ib_vendor_mad *vendor_mad;
1744 int index;
1747 * Routing is based on version, class, and method
1748 * For "newer" vendor MADs, also based on OUI
1750 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1751 goto out;
1752 if (!is_vendor_class(mad_hdr->mgmt_class)) {
1753 class = port_priv->version[
1754 mad_hdr->class_version].class;
1755 if (!class)
1756 goto out;
1757 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
1758 ARRAY_SIZE(class->method_table))
1759 goto out;
1760 method = class->method_table[convert_mgmt_class(
1761 mad_hdr->mgmt_class)];
1762 if (method)
1763 mad_agent = method->agent[mad_hdr->method &
1764 ~IB_MGMT_METHOD_RESP];
1765 } else {
1766 vendor = port_priv->version[
1767 mad_hdr->class_version].vendor;
1768 if (!vendor)
1769 goto out;
1770 vendor_class = vendor->vendor_class[vendor_class_index(
1771 mad_hdr->mgmt_class)];
1772 if (!vendor_class)
1773 goto out;
1774 /* Find matching OUI */
1775 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1776 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1777 if (index == -1)
1778 goto out;
1779 method = vendor_class->method_table[index];
1780 if (method) {
1781 mad_agent = method->agent[mad_hdr->method &
1782 ~IB_MGMT_METHOD_RESP];
1787 if (mad_agent) {
1788 if (mad_agent->agent.recv_handler)
1789 atomic_inc(&mad_agent->refcount);
1790 else {
1791 dev_notice(&port_priv->device->dev,
1792 "No receive handler for client %p on port %d\n",
1793 &mad_agent->agent, port_priv->port_num);
1794 mad_agent = NULL;
1797 out:
1798 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1800 return mad_agent;
1803 static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1804 const struct ib_mad_qp_info *qp_info,
1805 bool opa)
1807 int valid = 0;
1808 u32 qp_num = qp_info->qp->qp_num;
1810 /* Make sure MAD base version is understood */
1811 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1812 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1813 pr_err("MAD received with unsupported base version %d %s\n",
1814 mad_hdr->base_version, opa ? "(opa)" : "");
1815 goto out;
1818 /* Filter SMI packets sent to other than QP0 */
1819 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1820 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1821 if (qp_num == 0)
1822 valid = 1;
1823 } else {
1824 /* CM attributes other than ClassPortInfo only use Send method */
1825 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1826 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1827 (mad_hdr->method != IB_MGMT_METHOD_SEND))
1828 goto out;
1829 /* Filter GSI packets sent to QP0 */
1830 if (qp_num != 0)
1831 valid = 1;
1834 out:
1835 return valid;
1838 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1839 const struct ib_mad_hdr *mad_hdr)
1841 struct ib_rmpp_mad *rmpp_mad;
1843 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1844 return !mad_agent_priv->agent.rmpp_version ||
1845 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1846 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1847 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1848 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1851 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1852 const struct ib_mad_recv_wc *rwc)
1854 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1855 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1858 static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1859 const struct ib_mad_send_wr_private *wr,
1860 const struct ib_mad_recv_wc *rwc )
1862 struct rdma_ah_attr attr;
1863 u8 send_resp, rcv_resp;
1864 union ib_gid sgid;
1865 struct ib_device *device = mad_agent_priv->agent.device;
1866 u8 port_num = mad_agent_priv->agent.port_num;
1867 u8 lmc;
1868 bool has_grh;
1870 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1871 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1873 if (send_resp == rcv_resp)
1874 /* both requests, or both responses. GIDs different */
1875 return 0;
1877 if (rdma_query_ah(wr->send_buf.ah, &attr))
1878 /* Assume not equal, to avoid false positives. */
1879 return 0;
1881 has_grh = !!(rdma_ah_get_ah_flags(&attr) & IB_AH_GRH);
1882 if (has_grh != !!(rwc->wc->wc_flags & IB_WC_GRH))
1883 /* one has GID, other does not. Assume different */
1884 return 0;
1886 if (!send_resp && rcv_resp) {
1887 /* is request/response. */
1888 if (!has_grh) {
1889 if (ib_get_cached_lmc(device, port_num, &lmc))
1890 return 0;
1891 return (!lmc || !((rdma_ah_get_path_bits(&attr) ^
1892 rwc->wc->dlid_path_bits) &
1893 ((1 << lmc) - 1)));
1894 } else {
1895 const struct ib_global_route *grh =
1896 rdma_ah_read_grh(&attr);
1898 if (ib_get_cached_gid(device, port_num,
1899 grh->sgid_index, &sgid, NULL))
1900 return 0;
1901 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1902 16);
1906 if (!has_grh)
1907 return rdma_ah_get_dlid(&attr) == rwc->wc->slid;
1908 else
1909 return !memcmp(rdma_ah_read_grh(&attr)->dgid.raw,
1910 rwc->recv_buf.grh->sgid.raw,
1911 16);
1914 static inline int is_direct(u8 class)
1916 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1919 struct ib_mad_send_wr_private*
1920 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1921 const struct ib_mad_recv_wc *wc)
1923 struct ib_mad_send_wr_private *wr;
1924 const struct ib_mad_hdr *mad_hdr;
1926 mad_hdr = &wc->recv_buf.mad->mad_hdr;
1928 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1929 if ((wr->tid == mad_hdr->tid) &&
1930 rcv_has_same_class(wr, wc) &&
1932 * Don't check GID for direct routed MADs.
1933 * These might have permissive LIDs.
1935 (is_direct(mad_hdr->mgmt_class) ||
1936 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1937 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1941 * It's possible to receive the response before we've
1942 * been notified that the send has completed
1944 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1945 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1946 wr->tid == mad_hdr->tid &&
1947 wr->timeout &&
1948 rcv_has_same_class(wr, wc) &&
1950 * Don't check GID for direct routed MADs.
1951 * These might have permissive LIDs.
1953 (is_direct(mad_hdr->mgmt_class) ||
1954 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1955 /* Verify request has not been canceled */
1956 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1958 return NULL;
1961 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1963 mad_send_wr->timeout = 0;
1964 if (mad_send_wr->refcount == 1)
1965 list_move_tail(&mad_send_wr->agent_list,
1966 &mad_send_wr->mad_agent_priv->done_list);
1969 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1970 struct ib_mad_recv_wc *mad_recv_wc)
1972 struct ib_mad_send_wr_private *mad_send_wr;
1973 struct ib_mad_send_wc mad_send_wc;
1974 unsigned long flags;
1975 int ret;
1977 ret = ib_mad_enforce_security(mad_agent_priv,
1978 mad_recv_wc->wc->pkey_index);
1979 if (ret) {
1980 ib_free_recv_mad(mad_recv_wc);
1981 deref_mad_agent(mad_agent_priv);
1984 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1985 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1986 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1987 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1988 mad_recv_wc);
1989 if (!mad_recv_wc) {
1990 deref_mad_agent(mad_agent_priv);
1991 return;
1995 /* Complete corresponding request */
1996 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
1997 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1998 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1999 if (!mad_send_wr) {
2000 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2001 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
2002 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
2003 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
2004 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
2005 /* user rmpp is in effect
2006 * and this is an active RMPP MAD
2008 mad_agent_priv->agent.recv_handler(
2009 &mad_agent_priv->agent, NULL,
2010 mad_recv_wc);
2011 atomic_dec(&mad_agent_priv->refcount);
2012 } else {
2013 /* not user rmpp, revert to normal behavior and
2014 * drop the mad */
2015 ib_free_recv_mad(mad_recv_wc);
2016 deref_mad_agent(mad_agent_priv);
2017 return;
2019 } else {
2020 ib_mark_mad_done(mad_send_wr);
2021 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2023 /* Defined behavior is to complete response before request */
2024 mad_agent_priv->agent.recv_handler(
2025 &mad_agent_priv->agent,
2026 &mad_send_wr->send_buf,
2027 mad_recv_wc);
2028 atomic_dec(&mad_agent_priv->refcount);
2030 mad_send_wc.status = IB_WC_SUCCESS;
2031 mad_send_wc.vendor_err = 0;
2032 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2033 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2035 } else {
2036 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
2037 mad_recv_wc);
2038 deref_mad_agent(mad_agent_priv);
2041 return;
2044 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2045 const struct ib_mad_qp_info *qp_info,
2046 const struct ib_wc *wc,
2047 int port_num,
2048 struct ib_mad_private *recv,
2049 struct ib_mad_private *response)
2051 enum smi_forward_action retsmi;
2052 struct ib_smp *smp = (struct ib_smp *)recv->mad;
2054 if (smi_handle_dr_smp_recv(smp,
2055 rdma_cap_ib_switch(port_priv->device),
2056 port_num,
2057 port_priv->device->phys_port_cnt) ==
2058 IB_SMI_DISCARD)
2059 return IB_SMI_DISCARD;
2061 retsmi = smi_check_forward_dr_smp(smp);
2062 if (retsmi == IB_SMI_LOCAL)
2063 return IB_SMI_HANDLE;
2065 if (retsmi == IB_SMI_SEND) { /* don't forward */
2066 if (smi_handle_dr_smp_send(smp,
2067 rdma_cap_ib_switch(port_priv->device),
2068 port_num) == IB_SMI_DISCARD)
2069 return IB_SMI_DISCARD;
2071 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
2072 return IB_SMI_DISCARD;
2073 } else if (rdma_cap_ib_switch(port_priv->device)) {
2074 /* forward case for switches */
2075 memcpy(response, recv, mad_priv_size(response));
2076 response->header.recv_wc.wc = &response->header.wc;
2077 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2078 response->header.recv_wc.recv_buf.grh = &response->grh;
2080 agent_send_response((const struct ib_mad_hdr *)response->mad,
2081 &response->grh, wc,
2082 port_priv->device,
2083 smi_get_fwd_port(smp),
2084 qp_info->qp->qp_num,
2085 response->mad_size,
2086 false);
2088 return IB_SMI_DISCARD;
2090 return IB_SMI_HANDLE;
2093 static bool generate_unmatched_resp(const struct ib_mad_private *recv,
2094 struct ib_mad_private *response,
2095 size_t *resp_len, bool opa)
2097 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2098 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2100 if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2101 recv_hdr->method == IB_MGMT_METHOD_SET) {
2102 memcpy(response, recv, mad_priv_size(response));
2103 response->header.recv_wc.wc = &response->header.wc;
2104 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2105 response->header.recv_wc.recv_buf.grh = &response->grh;
2106 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2107 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2108 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2109 resp_hdr->status |= IB_SMP_DIRECTION;
2111 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2112 if (recv_hdr->mgmt_class ==
2113 IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2114 recv_hdr->mgmt_class ==
2115 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2116 *resp_len = opa_get_smp_header_size(
2117 (struct opa_smp *)recv->mad);
2118 else
2119 *resp_len = sizeof(struct ib_mad_hdr);
2122 return true;
2123 } else {
2124 return false;
2128 static enum smi_action
2129 handle_opa_smi(struct ib_mad_port_private *port_priv,
2130 struct ib_mad_qp_info *qp_info,
2131 struct ib_wc *wc,
2132 int port_num,
2133 struct ib_mad_private *recv,
2134 struct ib_mad_private *response)
2136 enum smi_forward_action retsmi;
2137 struct opa_smp *smp = (struct opa_smp *)recv->mad;
2139 if (opa_smi_handle_dr_smp_recv(smp,
2140 rdma_cap_ib_switch(port_priv->device),
2141 port_num,
2142 port_priv->device->phys_port_cnt) ==
2143 IB_SMI_DISCARD)
2144 return IB_SMI_DISCARD;
2146 retsmi = opa_smi_check_forward_dr_smp(smp);
2147 if (retsmi == IB_SMI_LOCAL)
2148 return IB_SMI_HANDLE;
2150 if (retsmi == IB_SMI_SEND) { /* don't forward */
2151 if (opa_smi_handle_dr_smp_send(smp,
2152 rdma_cap_ib_switch(port_priv->device),
2153 port_num) == IB_SMI_DISCARD)
2154 return IB_SMI_DISCARD;
2156 if (opa_smi_check_local_smp(smp, port_priv->device) ==
2157 IB_SMI_DISCARD)
2158 return IB_SMI_DISCARD;
2160 } else if (rdma_cap_ib_switch(port_priv->device)) {
2161 /* forward case for switches */
2162 memcpy(response, recv, mad_priv_size(response));
2163 response->header.recv_wc.wc = &response->header.wc;
2164 response->header.recv_wc.recv_buf.opa_mad =
2165 (struct opa_mad *)response->mad;
2166 response->header.recv_wc.recv_buf.grh = &response->grh;
2168 agent_send_response((const struct ib_mad_hdr *)response->mad,
2169 &response->grh, wc,
2170 port_priv->device,
2171 opa_smi_get_fwd_port(smp),
2172 qp_info->qp->qp_num,
2173 recv->header.wc.byte_len,
2174 true);
2176 return IB_SMI_DISCARD;
2179 return IB_SMI_HANDLE;
2182 static enum smi_action
2183 handle_smi(struct ib_mad_port_private *port_priv,
2184 struct ib_mad_qp_info *qp_info,
2185 struct ib_wc *wc,
2186 int port_num,
2187 struct ib_mad_private *recv,
2188 struct ib_mad_private *response,
2189 bool opa)
2191 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2193 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2194 mad_hdr->class_version == OPA_SM_CLASS_VERSION)
2195 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2196 response);
2198 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2201 static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2203 struct ib_mad_port_private *port_priv = cq->cq_context;
2204 struct ib_mad_list_head *mad_list =
2205 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2206 struct ib_mad_qp_info *qp_info;
2207 struct ib_mad_private_header *mad_priv_hdr;
2208 struct ib_mad_private *recv, *response = NULL;
2209 struct ib_mad_agent_private *mad_agent;
2210 int port_num;
2211 int ret = IB_MAD_RESULT_SUCCESS;
2212 size_t mad_size;
2213 u16 resp_mad_pkey_index = 0;
2214 bool opa;
2216 if (list_empty_careful(&port_priv->port_list))
2217 return;
2219 if (wc->status != IB_WC_SUCCESS) {
2221 * Receive errors indicate that the QP has entered the error
2222 * state - error handling/shutdown code will cleanup
2224 return;
2227 qp_info = mad_list->mad_queue->qp_info;
2228 dequeue_mad(mad_list);
2230 opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2231 qp_info->port_priv->port_num);
2233 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2234 mad_list);
2235 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2236 ib_dma_unmap_single(port_priv->device,
2237 recv->header.mapping,
2238 mad_priv_dma_size(recv),
2239 DMA_FROM_DEVICE);
2241 /* Setup MAD receive work completion from "normal" work completion */
2242 recv->header.wc = *wc;
2243 recv->header.recv_wc.wc = &recv->header.wc;
2245 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2246 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2247 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2248 } else {
2249 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2250 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2253 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
2254 recv->header.recv_wc.recv_buf.grh = &recv->grh;
2256 if (atomic_read(&qp_info->snoop_count))
2257 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2259 /* Validate MAD */
2260 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
2261 goto out;
2263 mad_size = recv->mad_size;
2264 response = alloc_mad_private(mad_size, GFP_KERNEL);
2265 if (!response)
2266 goto out;
2268 if (rdma_cap_ib_switch(port_priv->device))
2269 port_num = wc->port_num;
2270 else
2271 port_num = port_priv->port_num;
2273 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
2274 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
2275 if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2276 response, opa)
2277 == IB_SMI_DISCARD)
2278 goto out;
2281 /* Give driver "right of first refusal" on incoming MAD */
2282 if (port_priv->device->process_mad) {
2283 ret = port_priv->device->process_mad(port_priv->device, 0,
2284 port_priv->port_num,
2285 wc, &recv->grh,
2286 (const struct ib_mad_hdr *)recv->mad,
2287 recv->mad_size,
2288 (struct ib_mad_hdr *)response->mad,
2289 &mad_size, &resp_mad_pkey_index);
2291 if (opa)
2292 wc->pkey_index = resp_mad_pkey_index;
2294 if (ret & IB_MAD_RESULT_SUCCESS) {
2295 if (ret & IB_MAD_RESULT_CONSUMED)
2296 goto out;
2297 if (ret & IB_MAD_RESULT_REPLY) {
2298 agent_send_response((const struct ib_mad_hdr *)response->mad,
2299 &recv->grh, wc,
2300 port_priv->device,
2301 port_num,
2302 qp_info->qp->qp_num,
2303 mad_size, opa);
2304 goto out;
2309 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
2310 if (mad_agent) {
2311 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2313 * recv is freed up in error cases in ib_mad_complete_recv
2314 * or via recv_handler in ib_mad_complete_recv()
2316 recv = NULL;
2317 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2318 generate_unmatched_resp(recv, response, &mad_size, opa)) {
2319 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2320 port_priv->device, port_num,
2321 qp_info->qp->qp_num, mad_size, opa);
2324 out:
2325 /* Post another receive request for this QP */
2326 if (response) {
2327 ib_mad_post_receive_mads(qp_info, response);
2328 kfree(recv);
2329 } else
2330 ib_mad_post_receive_mads(qp_info, recv);
2333 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2335 struct ib_mad_send_wr_private *mad_send_wr;
2336 unsigned long delay;
2338 if (list_empty(&mad_agent_priv->wait_list)) {
2339 cancel_delayed_work(&mad_agent_priv->timed_work);
2340 } else {
2341 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2342 struct ib_mad_send_wr_private,
2343 agent_list);
2345 if (time_after(mad_agent_priv->timeout,
2346 mad_send_wr->timeout)) {
2347 mad_agent_priv->timeout = mad_send_wr->timeout;
2348 delay = mad_send_wr->timeout - jiffies;
2349 if ((long)delay <= 0)
2350 delay = 1;
2351 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2352 &mad_agent_priv->timed_work, delay);
2357 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2359 struct ib_mad_agent_private *mad_agent_priv;
2360 struct ib_mad_send_wr_private *temp_mad_send_wr;
2361 struct list_head *list_item;
2362 unsigned long delay;
2364 mad_agent_priv = mad_send_wr->mad_agent_priv;
2365 list_del(&mad_send_wr->agent_list);
2367 delay = mad_send_wr->timeout;
2368 mad_send_wr->timeout += jiffies;
2370 if (delay) {
2371 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2372 temp_mad_send_wr = list_entry(list_item,
2373 struct ib_mad_send_wr_private,
2374 agent_list);
2375 if (time_after(mad_send_wr->timeout,
2376 temp_mad_send_wr->timeout))
2377 break;
2380 else
2381 list_item = &mad_agent_priv->wait_list;
2382 list_add(&mad_send_wr->agent_list, list_item);
2384 /* Reschedule a work item if we have a shorter timeout */
2385 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2386 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2387 &mad_agent_priv->timed_work, delay);
2390 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2391 int timeout_ms)
2393 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2394 wait_for_response(mad_send_wr);
2398 * Process a send work completion
2400 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2401 struct ib_mad_send_wc *mad_send_wc)
2403 struct ib_mad_agent_private *mad_agent_priv;
2404 unsigned long flags;
2405 int ret;
2407 mad_agent_priv = mad_send_wr->mad_agent_priv;
2408 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2409 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2410 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2411 if (ret == IB_RMPP_RESULT_CONSUMED)
2412 goto done;
2413 } else
2414 ret = IB_RMPP_RESULT_UNHANDLED;
2416 if (mad_send_wc->status != IB_WC_SUCCESS &&
2417 mad_send_wr->status == IB_WC_SUCCESS) {
2418 mad_send_wr->status = mad_send_wc->status;
2419 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2422 if (--mad_send_wr->refcount > 0) {
2423 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2424 mad_send_wr->status == IB_WC_SUCCESS) {
2425 wait_for_response(mad_send_wr);
2427 goto done;
2430 /* Remove send from MAD agent and notify client of completion */
2431 list_del(&mad_send_wr->agent_list);
2432 adjust_timeout(mad_agent_priv);
2433 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2435 if (mad_send_wr->status != IB_WC_SUCCESS )
2436 mad_send_wc->status = mad_send_wr->status;
2437 if (ret == IB_RMPP_RESULT_INTERNAL)
2438 ib_rmpp_send_handler(mad_send_wc);
2439 else
2440 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2441 mad_send_wc);
2443 /* Release reference on agent taken when sending */
2444 deref_mad_agent(mad_agent_priv);
2445 return;
2446 done:
2447 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2450 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
2452 struct ib_mad_port_private *port_priv = cq->cq_context;
2453 struct ib_mad_list_head *mad_list =
2454 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2455 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2456 struct ib_mad_qp_info *qp_info;
2457 struct ib_mad_queue *send_queue;
2458 struct ib_send_wr *bad_send_wr;
2459 struct ib_mad_send_wc mad_send_wc;
2460 unsigned long flags;
2461 int ret;
2463 if (list_empty_careful(&port_priv->port_list))
2464 return;
2466 if (wc->status != IB_WC_SUCCESS) {
2467 if (!ib_mad_send_error(port_priv, wc))
2468 return;
2471 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2472 mad_list);
2473 send_queue = mad_list->mad_queue;
2474 qp_info = send_queue->qp_info;
2476 retry:
2477 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2478 mad_send_wr->header_mapping,
2479 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2480 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2481 mad_send_wr->payload_mapping,
2482 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2483 queued_send_wr = NULL;
2484 spin_lock_irqsave(&send_queue->lock, flags);
2485 list_del(&mad_list->list);
2487 /* Move queued send to the send queue */
2488 if (send_queue->count-- > send_queue->max_active) {
2489 mad_list = container_of(qp_info->overflow_list.next,
2490 struct ib_mad_list_head, list);
2491 queued_send_wr = container_of(mad_list,
2492 struct ib_mad_send_wr_private,
2493 mad_list);
2494 list_move_tail(&mad_list->list, &send_queue->list);
2496 spin_unlock_irqrestore(&send_queue->lock, flags);
2498 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2499 mad_send_wc.status = wc->status;
2500 mad_send_wc.vendor_err = wc->vendor_err;
2501 if (atomic_read(&qp_info->snoop_count))
2502 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2503 IB_MAD_SNOOP_SEND_COMPLETIONS);
2504 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2506 if (queued_send_wr) {
2507 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
2508 &bad_send_wr);
2509 if (ret) {
2510 dev_err(&port_priv->device->dev,
2511 "ib_post_send failed: %d\n", ret);
2512 mad_send_wr = queued_send_wr;
2513 wc->status = IB_WC_LOC_QP_OP_ERR;
2514 goto retry;
2519 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2521 struct ib_mad_send_wr_private *mad_send_wr;
2522 struct ib_mad_list_head *mad_list;
2523 unsigned long flags;
2525 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2526 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2527 mad_send_wr = container_of(mad_list,
2528 struct ib_mad_send_wr_private,
2529 mad_list);
2530 mad_send_wr->retry = 1;
2532 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2535 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
2536 struct ib_wc *wc)
2538 struct ib_mad_list_head *mad_list =
2539 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2540 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
2541 struct ib_mad_send_wr_private *mad_send_wr;
2542 int ret;
2545 * Send errors will transition the QP to SQE - move
2546 * QP to RTS and repost flushed work requests
2548 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2549 mad_list);
2550 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2551 if (mad_send_wr->retry) {
2552 /* Repost send */
2553 struct ib_send_wr *bad_send_wr;
2555 mad_send_wr->retry = 0;
2556 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
2557 &bad_send_wr);
2558 if (!ret)
2559 return false;
2561 } else {
2562 struct ib_qp_attr *attr;
2564 /* Transition QP to RTS and fail offending send */
2565 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2566 if (attr) {
2567 attr->qp_state = IB_QPS_RTS;
2568 attr->cur_qp_state = IB_QPS_SQE;
2569 ret = ib_modify_qp(qp_info->qp, attr,
2570 IB_QP_STATE | IB_QP_CUR_STATE);
2571 kfree(attr);
2572 if (ret)
2573 dev_err(&port_priv->device->dev,
2574 "%s - ib_modify_qp to RTS: %d\n",
2575 __func__, ret);
2576 else
2577 mark_sends_for_retry(qp_info);
2581 return true;
2584 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2586 unsigned long flags;
2587 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2588 struct ib_mad_send_wc mad_send_wc;
2589 struct list_head cancel_list;
2591 INIT_LIST_HEAD(&cancel_list);
2593 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2594 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2595 &mad_agent_priv->send_list, agent_list) {
2596 if (mad_send_wr->status == IB_WC_SUCCESS) {
2597 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2598 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2602 /* Empty wait list to prevent receives from finding a request */
2603 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2604 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2606 /* Report all cancelled requests */
2607 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2608 mad_send_wc.vendor_err = 0;
2610 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2611 &cancel_list, agent_list) {
2612 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2613 list_del(&mad_send_wr->agent_list);
2614 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2615 &mad_send_wc);
2616 atomic_dec(&mad_agent_priv->refcount);
2620 static struct ib_mad_send_wr_private*
2621 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2622 struct ib_mad_send_buf *send_buf)
2624 struct ib_mad_send_wr_private *mad_send_wr;
2626 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2627 agent_list) {
2628 if (&mad_send_wr->send_buf == send_buf)
2629 return mad_send_wr;
2632 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2633 agent_list) {
2634 if (is_rmpp_data_mad(mad_agent_priv,
2635 mad_send_wr->send_buf.mad) &&
2636 &mad_send_wr->send_buf == send_buf)
2637 return mad_send_wr;
2639 return NULL;
2642 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2643 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2645 struct ib_mad_agent_private *mad_agent_priv;
2646 struct ib_mad_send_wr_private *mad_send_wr;
2647 unsigned long flags;
2648 int active;
2650 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2651 agent);
2652 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2653 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2654 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2655 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2656 return -EINVAL;
2659 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2660 if (!timeout_ms) {
2661 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2662 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2665 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2666 if (active)
2667 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2668 else
2669 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2671 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2672 return 0;
2674 EXPORT_SYMBOL(ib_modify_mad);
2676 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2677 struct ib_mad_send_buf *send_buf)
2679 ib_modify_mad(mad_agent, send_buf, 0);
2681 EXPORT_SYMBOL(ib_cancel_mad);
2683 static void local_completions(struct work_struct *work)
2685 struct ib_mad_agent_private *mad_agent_priv;
2686 struct ib_mad_local_private *local;
2687 struct ib_mad_agent_private *recv_mad_agent;
2688 unsigned long flags;
2689 int free_mad;
2690 struct ib_wc wc;
2691 struct ib_mad_send_wc mad_send_wc;
2692 bool opa;
2694 mad_agent_priv =
2695 container_of(work, struct ib_mad_agent_private, local_work);
2697 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2698 mad_agent_priv->qp_info->port_priv->port_num);
2700 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2701 while (!list_empty(&mad_agent_priv->local_list)) {
2702 local = list_entry(mad_agent_priv->local_list.next,
2703 struct ib_mad_local_private,
2704 completion_list);
2705 list_del(&local->completion_list);
2706 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2707 free_mad = 0;
2708 if (local->mad_priv) {
2709 u8 base_version;
2710 recv_mad_agent = local->recv_mad_agent;
2711 if (!recv_mad_agent) {
2712 dev_err(&mad_agent_priv->agent.device->dev,
2713 "No receive MAD agent for local completion\n");
2714 free_mad = 1;
2715 goto local_send_completion;
2719 * Defined behavior is to complete response
2720 * before request
2722 build_smp_wc(recv_mad_agent->agent.qp,
2723 local->mad_send_wr->send_wr.wr.wr_cqe,
2724 be16_to_cpu(IB_LID_PERMISSIVE),
2725 local->mad_send_wr->send_wr.pkey_index,
2726 recv_mad_agent->agent.port_num, &wc);
2728 local->mad_priv->header.recv_wc.wc = &wc;
2730 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2731 if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2732 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2733 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2734 } else {
2735 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2736 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2739 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2740 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2741 &local->mad_priv->header.recv_wc.rmpp_list);
2742 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2743 local->mad_priv->header.recv_wc.recv_buf.mad =
2744 (struct ib_mad *)local->mad_priv->mad;
2745 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2746 snoop_recv(recv_mad_agent->qp_info,
2747 &local->mad_priv->header.recv_wc,
2748 IB_MAD_SNOOP_RECVS);
2749 recv_mad_agent->agent.recv_handler(
2750 &recv_mad_agent->agent,
2751 &local->mad_send_wr->send_buf,
2752 &local->mad_priv->header.recv_wc);
2753 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2754 atomic_dec(&recv_mad_agent->refcount);
2755 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2758 local_send_completion:
2759 /* Complete send */
2760 mad_send_wc.status = IB_WC_SUCCESS;
2761 mad_send_wc.vendor_err = 0;
2762 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2763 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2764 snoop_send(mad_agent_priv->qp_info,
2765 &local->mad_send_wr->send_buf,
2766 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2767 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2768 &mad_send_wc);
2770 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2771 atomic_dec(&mad_agent_priv->refcount);
2772 if (free_mad)
2773 kfree(local->mad_priv);
2774 kfree(local);
2776 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2779 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2781 int ret;
2783 if (!mad_send_wr->retries_left)
2784 return -ETIMEDOUT;
2786 mad_send_wr->retries_left--;
2787 mad_send_wr->send_buf.retries++;
2789 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2791 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2792 ret = ib_retry_rmpp(mad_send_wr);
2793 switch (ret) {
2794 case IB_RMPP_RESULT_UNHANDLED:
2795 ret = ib_send_mad(mad_send_wr);
2796 break;
2797 case IB_RMPP_RESULT_CONSUMED:
2798 ret = 0;
2799 break;
2800 default:
2801 ret = -ECOMM;
2802 break;
2804 } else
2805 ret = ib_send_mad(mad_send_wr);
2807 if (!ret) {
2808 mad_send_wr->refcount++;
2809 list_add_tail(&mad_send_wr->agent_list,
2810 &mad_send_wr->mad_agent_priv->send_list);
2812 return ret;
2815 static void timeout_sends(struct work_struct *work)
2817 struct ib_mad_agent_private *mad_agent_priv;
2818 struct ib_mad_send_wr_private *mad_send_wr;
2819 struct ib_mad_send_wc mad_send_wc;
2820 unsigned long flags, delay;
2822 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2823 timed_work.work);
2824 mad_send_wc.vendor_err = 0;
2826 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2827 while (!list_empty(&mad_agent_priv->wait_list)) {
2828 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2829 struct ib_mad_send_wr_private,
2830 agent_list);
2832 if (time_after(mad_send_wr->timeout, jiffies)) {
2833 delay = mad_send_wr->timeout - jiffies;
2834 if ((long)delay <= 0)
2835 delay = 1;
2836 queue_delayed_work(mad_agent_priv->qp_info->
2837 port_priv->wq,
2838 &mad_agent_priv->timed_work, delay);
2839 break;
2842 list_del(&mad_send_wr->agent_list);
2843 if (mad_send_wr->status == IB_WC_SUCCESS &&
2844 !retry_send(mad_send_wr))
2845 continue;
2847 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2849 if (mad_send_wr->status == IB_WC_SUCCESS)
2850 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2851 else
2852 mad_send_wc.status = mad_send_wr->status;
2853 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2854 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2855 &mad_send_wc);
2857 atomic_dec(&mad_agent_priv->refcount);
2858 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2860 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2864 * Allocate receive MADs and post receive WRs for them
2866 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2867 struct ib_mad_private *mad)
2869 unsigned long flags;
2870 int post, ret;
2871 struct ib_mad_private *mad_priv;
2872 struct ib_sge sg_list;
2873 struct ib_recv_wr recv_wr, *bad_recv_wr;
2874 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2876 /* Initialize common scatter list fields */
2877 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
2879 /* Initialize common receive WR fields */
2880 recv_wr.next = NULL;
2881 recv_wr.sg_list = &sg_list;
2882 recv_wr.num_sge = 1;
2884 do {
2885 /* Allocate and map receive buffer */
2886 if (mad) {
2887 mad_priv = mad;
2888 mad = NULL;
2889 } else {
2890 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2891 GFP_ATOMIC);
2892 if (!mad_priv) {
2893 ret = -ENOMEM;
2894 break;
2897 sg_list.length = mad_priv_dma_size(mad_priv);
2898 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2899 &mad_priv->grh,
2900 mad_priv_dma_size(mad_priv),
2901 DMA_FROM_DEVICE);
2902 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2903 sg_list.addr))) {
2904 ret = -ENOMEM;
2905 break;
2907 mad_priv->header.mapping = sg_list.addr;
2908 mad_priv->header.mad_list.mad_queue = recv_queue;
2909 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
2910 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
2912 /* Post receive WR */
2913 spin_lock_irqsave(&recv_queue->lock, flags);
2914 post = (++recv_queue->count < recv_queue->max_active);
2915 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2916 spin_unlock_irqrestore(&recv_queue->lock, flags);
2917 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2918 if (ret) {
2919 spin_lock_irqsave(&recv_queue->lock, flags);
2920 list_del(&mad_priv->header.mad_list.list);
2921 recv_queue->count--;
2922 spin_unlock_irqrestore(&recv_queue->lock, flags);
2923 ib_dma_unmap_single(qp_info->port_priv->device,
2924 mad_priv->header.mapping,
2925 mad_priv_dma_size(mad_priv),
2926 DMA_FROM_DEVICE);
2927 kfree(mad_priv);
2928 dev_err(&qp_info->port_priv->device->dev,
2929 "ib_post_recv failed: %d\n", ret);
2930 break;
2932 } while (post);
2934 return ret;
2938 * Return all the posted receive MADs
2940 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2942 struct ib_mad_private_header *mad_priv_hdr;
2943 struct ib_mad_private *recv;
2944 struct ib_mad_list_head *mad_list;
2946 if (!qp_info->qp)
2947 return;
2949 while (!list_empty(&qp_info->recv_queue.list)) {
2951 mad_list = list_entry(qp_info->recv_queue.list.next,
2952 struct ib_mad_list_head, list);
2953 mad_priv_hdr = container_of(mad_list,
2954 struct ib_mad_private_header,
2955 mad_list);
2956 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2957 header);
2959 /* Remove from posted receive MAD list */
2960 list_del(&mad_list->list);
2962 ib_dma_unmap_single(qp_info->port_priv->device,
2963 recv->header.mapping,
2964 mad_priv_dma_size(recv),
2965 DMA_FROM_DEVICE);
2966 kfree(recv);
2969 qp_info->recv_queue.count = 0;
2973 * Start the port
2975 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2977 int ret, i;
2978 struct ib_qp_attr *attr;
2979 struct ib_qp *qp;
2980 u16 pkey_index;
2982 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2983 if (!attr)
2984 return -ENOMEM;
2986 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2987 IB_DEFAULT_PKEY_FULL, &pkey_index);
2988 if (ret)
2989 pkey_index = 0;
2991 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2992 qp = port_priv->qp_info[i].qp;
2993 if (!qp)
2994 continue;
2997 * PKey index for QP1 is irrelevant but
2998 * one is needed for the Reset to Init transition
3000 attr->qp_state = IB_QPS_INIT;
3001 attr->pkey_index = pkey_index;
3002 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
3003 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
3004 IB_QP_PKEY_INDEX | IB_QP_QKEY);
3005 if (ret) {
3006 dev_err(&port_priv->device->dev,
3007 "Couldn't change QP%d state to INIT: %d\n",
3008 i, ret);
3009 goto out;
3012 attr->qp_state = IB_QPS_RTR;
3013 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
3014 if (ret) {
3015 dev_err(&port_priv->device->dev,
3016 "Couldn't change QP%d state to RTR: %d\n",
3017 i, ret);
3018 goto out;
3021 attr->qp_state = IB_QPS_RTS;
3022 attr->sq_psn = IB_MAD_SEND_Q_PSN;
3023 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
3024 if (ret) {
3025 dev_err(&port_priv->device->dev,
3026 "Couldn't change QP%d state to RTS: %d\n",
3027 i, ret);
3028 goto out;
3032 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
3033 if (ret) {
3034 dev_err(&port_priv->device->dev,
3035 "Failed to request completion notification: %d\n",
3036 ret);
3037 goto out;
3040 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3041 if (!port_priv->qp_info[i].qp)
3042 continue;
3044 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3045 if (ret) {
3046 dev_err(&port_priv->device->dev,
3047 "Couldn't post receive WRs\n");
3048 goto out;
3051 out:
3052 kfree(attr);
3053 return ret;
3056 static void qp_event_handler(struct ib_event *event, void *qp_context)
3058 struct ib_mad_qp_info *qp_info = qp_context;
3060 /* It's worse than that! He's dead, Jim! */
3061 dev_err(&qp_info->port_priv->device->dev,
3062 "Fatal error (%d) on MAD QP (%d)\n",
3063 event->event, qp_info->qp->qp_num);
3066 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
3067 struct ib_mad_queue *mad_queue)
3069 mad_queue->qp_info = qp_info;
3070 mad_queue->count = 0;
3071 spin_lock_init(&mad_queue->lock);
3072 INIT_LIST_HEAD(&mad_queue->list);
3075 static void init_mad_qp(struct ib_mad_port_private *port_priv,
3076 struct ib_mad_qp_info *qp_info)
3078 qp_info->port_priv = port_priv;
3079 init_mad_queue(qp_info, &qp_info->send_queue);
3080 init_mad_queue(qp_info, &qp_info->recv_queue);
3081 INIT_LIST_HEAD(&qp_info->overflow_list);
3082 spin_lock_init(&qp_info->snoop_lock);
3083 qp_info->snoop_table = NULL;
3084 qp_info->snoop_table_size = 0;
3085 atomic_set(&qp_info->snoop_count, 0);
3088 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
3089 enum ib_qp_type qp_type)
3091 struct ib_qp_init_attr qp_init_attr;
3092 int ret;
3094 memset(&qp_init_attr, 0, sizeof qp_init_attr);
3095 qp_init_attr.send_cq = qp_info->port_priv->cq;
3096 qp_init_attr.recv_cq = qp_info->port_priv->cq;
3097 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
3098 qp_init_attr.cap.max_send_wr = mad_sendq_size;
3099 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
3100 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3101 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3102 qp_init_attr.qp_type = qp_type;
3103 qp_init_attr.port_num = qp_info->port_priv->port_num;
3104 qp_init_attr.qp_context = qp_info;
3105 qp_init_attr.event_handler = qp_event_handler;
3106 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3107 if (IS_ERR(qp_info->qp)) {
3108 dev_err(&qp_info->port_priv->device->dev,
3109 "Couldn't create ib_mad QP%d\n",
3110 get_spl_qp_index(qp_type));
3111 ret = PTR_ERR(qp_info->qp);
3112 goto error;
3114 /* Use minimum queue sizes unless the CQ is resized */
3115 qp_info->send_queue.max_active = mad_sendq_size;
3116 qp_info->recv_queue.max_active = mad_recvq_size;
3117 return 0;
3119 error:
3120 return ret;
3123 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3125 if (!qp_info->qp)
3126 return;
3128 ib_destroy_qp(qp_info->qp);
3129 kfree(qp_info->snoop_table);
3133 * Open the port
3134 * Create the QP, PD, MR, and CQ if needed
3136 static int ib_mad_port_open(struct ib_device *device,
3137 int port_num)
3139 int ret, cq_size;
3140 struct ib_mad_port_private *port_priv;
3141 unsigned long flags;
3142 char name[sizeof "ib_mad123"];
3143 int has_smi;
3145 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3146 return -EFAULT;
3148 if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3149 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3150 return -EFAULT;
3152 /* Create new device info */
3153 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
3154 if (!port_priv)
3155 return -ENOMEM;
3157 port_priv->device = device;
3158 port_priv->port_num = port_num;
3159 spin_lock_init(&port_priv->reg_lock);
3160 INIT_LIST_HEAD(&port_priv->agent_list);
3161 init_mad_qp(port_priv, &port_priv->qp_info[0]);
3162 init_mad_qp(port_priv, &port_priv->qp_info[1]);
3164 cq_size = mad_sendq_size + mad_recvq_size;
3165 has_smi = rdma_cap_ib_smi(device, port_num);
3166 if (has_smi)
3167 cq_size *= 2;
3169 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
3170 IB_POLL_WORKQUEUE);
3171 if (IS_ERR(port_priv->cq)) {
3172 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
3173 ret = PTR_ERR(port_priv->cq);
3174 goto error3;
3177 port_priv->pd = ib_alloc_pd(device, 0);
3178 if (IS_ERR(port_priv->pd)) {
3179 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
3180 ret = PTR_ERR(port_priv->pd);
3181 goto error4;
3184 if (has_smi) {
3185 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3186 if (ret)
3187 goto error6;
3189 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3190 if (ret)
3191 goto error7;
3193 snprintf(name, sizeof name, "ib_mad%d", port_num);
3194 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
3195 if (!port_priv->wq) {
3196 ret = -ENOMEM;
3197 goto error8;
3200 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3201 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3202 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3204 ret = ib_mad_port_start(port_priv);
3205 if (ret) {
3206 dev_err(&device->dev, "Couldn't start port\n");
3207 goto error9;
3210 return 0;
3212 error9:
3213 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3214 list_del_init(&port_priv->port_list);
3215 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3217 destroy_workqueue(port_priv->wq);
3218 error8:
3219 destroy_mad_qp(&port_priv->qp_info[1]);
3220 error7:
3221 destroy_mad_qp(&port_priv->qp_info[0]);
3222 error6:
3223 ib_dealloc_pd(port_priv->pd);
3224 error4:
3225 ib_free_cq(port_priv->cq);
3226 cleanup_recv_queue(&port_priv->qp_info[1]);
3227 cleanup_recv_queue(&port_priv->qp_info[0]);
3228 error3:
3229 kfree(port_priv);
3231 return ret;
3235 * Close the port
3236 * If there are no classes using the port, free the port
3237 * resources (CQ, MR, PD, QP) and remove the port's info structure
3239 static int ib_mad_port_close(struct ib_device *device, int port_num)
3241 struct ib_mad_port_private *port_priv;
3242 unsigned long flags;
3244 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3245 port_priv = __ib_get_mad_port(device, port_num);
3246 if (port_priv == NULL) {
3247 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3248 dev_err(&device->dev, "Port %d not found\n", port_num);
3249 return -ENODEV;
3251 list_del_init(&port_priv->port_list);
3252 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3254 destroy_workqueue(port_priv->wq);
3255 destroy_mad_qp(&port_priv->qp_info[1]);
3256 destroy_mad_qp(&port_priv->qp_info[0]);
3257 ib_dealloc_pd(port_priv->pd);
3258 ib_free_cq(port_priv->cq);
3259 cleanup_recv_queue(&port_priv->qp_info[1]);
3260 cleanup_recv_queue(&port_priv->qp_info[0]);
3261 /* XXX: Handle deallocation of MAD registration tables */
3263 kfree(port_priv);
3265 return 0;
3268 static void ib_mad_init_device(struct ib_device *device)
3270 int start, i;
3272 start = rdma_start_port(device);
3274 for (i = start; i <= rdma_end_port(device); i++) {
3275 if (!rdma_cap_ib_mad(device, i))
3276 continue;
3278 if (ib_mad_port_open(device, i)) {
3279 dev_err(&device->dev, "Couldn't open port %d\n", i);
3280 goto error;
3282 if (ib_agent_port_open(device, i)) {
3283 dev_err(&device->dev,
3284 "Couldn't open port %d for agents\n", i);
3285 goto error_agent;
3288 return;
3290 error_agent:
3291 if (ib_mad_port_close(device, i))
3292 dev_err(&device->dev, "Couldn't close port %d\n", i);
3294 error:
3295 while (--i >= start) {
3296 if (!rdma_cap_ib_mad(device, i))
3297 continue;
3299 if (ib_agent_port_close(device, i))
3300 dev_err(&device->dev,
3301 "Couldn't close port %d for agents\n", i);
3302 if (ib_mad_port_close(device, i))
3303 dev_err(&device->dev, "Couldn't close port %d\n", i);
3307 static void ib_mad_remove_device(struct ib_device *device, void *client_data)
3309 int i;
3311 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
3312 if (!rdma_cap_ib_mad(device, i))
3313 continue;
3315 if (ib_agent_port_close(device, i))
3316 dev_err(&device->dev,
3317 "Couldn't close port %d for agents\n", i);
3318 if (ib_mad_port_close(device, i))
3319 dev_err(&device->dev, "Couldn't close port %d\n", i);
3323 static struct ib_client mad_client = {
3324 .name = "mad",
3325 .add = ib_mad_init_device,
3326 .remove = ib_mad_remove_device
3329 int ib_mad_init(void)
3331 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3332 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3334 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3335 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3337 INIT_LIST_HEAD(&ib_mad_port_list);
3339 if (ib_register_client(&mad_client)) {
3340 pr_err("Couldn't register ib_mad client\n");
3341 return -EINVAL;
3344 return 0;
3347 void ib_mad_cleanup(void)
3349 ib_unregister_client(&mad_client);