2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
6 * Copyright (c) 2014 Intel Corporation. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <rdma/ib_cache.h>
50 #include "core_priv.h"
52 static int mad_sendq_size
= IB_MAD_QP_SEND_SIZE
;
53 static int mad_recvq_size
= IB_MAD_QP_RECV_SIZE
;
55 module_param_named(send_queue_size
, mad_sendq_size
, int, 0444);
56 MODULE_PARM_DESC(send_queue_size
, "Size of send queue in number of work requests");
57 module_param_named(recv_queue_size
, mad_recvq_size
, int, 0444);
58 MODULE_PARM_DESC(recv_queue_size
, "Size of receive queue in number of work requests");
60 static struct list_head ib_mad_port_list
;
61 static u32 ib_mad_client_id
= 0;
64 static DEFINE_SPINLOCK(ib_mad_port_list_lock
);
66 /* Forward declarations */
67 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
68 struct ib_mad_reg_req
*mad_reg_req
);
69 static void remove_mad_reg_req(struct ib_mad_agent_private
*priv
);
70 static struct ib_mad_agent_private
*find_mad_agent(
71 struct ib_mad_port_private
*port_priv
,
72 const struct ib_mad_hdr
*mad
);
73 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
74 struct ib_mad_private
*mad
);
75 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
);
76 static void timeout_sends(struct work_struct
*work
);
77 static void local_completions(struct work_struct
*work
);
78 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
79 struct ib_mad_agent_private
*agent_priv
,
81 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
82 struct ib_mad_agent_private
*agent_priv
);
83 static bool ib_mad_send_error(struct ib_mad_port_private
*port_priv
,
85 static void ib_mad_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
88 * Returns a ib_mad_port_private structure or NULL for a device/port
89 * Assumes ib_mad_port_list_lock is being held
91 static inline struct ib_mad_port_private
*
92 __ib_get_mad_port(struct ib_device
*device
, int port_num
)
94 struct ib_mad_port_private
*entry
;
96 list_for_each_entry(entry
, &ib_mad_port_list
, port_list
) {
97 if (entry
->device
== device
&& entry
->port_num
== port_num
)
104 * Wrapper function to return a ib_mad_port_private structure or NULL
107 static inline struct ib_mad_port_private
*
108 ib_get_mad_port(struct ib_device
*device
, int port_num
)
110 struct ib_mad_port_private
*entry
;
113 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
114 entry
= __ib_get_mad_port(device
, port_num
);
115 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
120 static inline u8
convert_mgmt_class(u8 mgmt_class
)
122 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
123 return mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
?
127 static int get_spl_qp_index(enum ib_qp_type qp_type
)
140 static int vendor_class_index(u8 mgmt_class
)
142 return mgmt_class
- IB_MGMT_CLASS_VENDOR_RANGE2_START
;
145 static int is_vendor_class(u8 mgmt_class
)
147 if ((mgmt_class
< IB_MGMT_CLASS_VENDOR_RANGE2_START
) ||
148 (mgmt_class
> IB_MGMT_CLASS_VENDOR_RANGE2_END
))
153 static int is_vendor_oui(char *oui
)
155 if (oui
[0] || oui
[1] || oui
[2])
160 static int is_vendor_method_in_use(
161 struct ib_mad_mgmt_vendor_class
*vendor_class
,
162 struct ib_mad_reg_req
*mad_reg_req
)
164 struct ib_mad_mgmt_method_table
*method
;
167 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
168 if (!memcmp(vendor_class
->oui
[i
], mad_reg_req
->oui
, 3)) {
169 method
= vendor_class
->method_table
[i
];
171 if (method_in_use(&method
, mad_reg_req
))
181 int ib_response_mad(const struct ib_mad_hdr
*hdr
)
183 return ((hdr
->method
& IB_MGMT_METHOD_RESP
) ||
184 (hdr
->method
== IB_MGMT_METHOD_TRAP_REPRESS
) ||
185 ((hdr
->mgmt_class
== IB_MGMT_CLASS_BM
) &&
186 (hdr
->attr_mod
& IB_BM_ATTR_MOD_RESP
)));
188 EXPORT_SYMBOL(ib_response_mad
);
191 * ib_register_mad_agent - Register to send/receive MADs
193 struct ib_mad_agent
*ib_register_mad_agent(struct ib_device
*device
,
195 enum ib_qp_type qp_type
,
196 struct ib_mad_reg_req
*mad_reg_req
,
198 ib_mad_send_handler send_handler
,
199 ib_mad_recv_handler recv_handler
,
201 u32 registration_flags
)
203 struct ib_mad_port_private
*port_priv
;
204 struct ib_mad_agent
*ret
= ERR_PTR(-EINVAL
);
205 struct ib_mad_agent_private
*mad_agent_priv
;
206 struct ib_mad_reg_req
*reg_req
= NULL
;
207 struct ib_mad_mgmt_class_table
*class;
208 struct ib_mad_mgmt_vendor_class_table
*vendor
;
209 struct ib_mad_mgmt_vendor_class
*vendor_class
;
210 struct ib_mad_mgmt_method_table
*method
;
213 u8 mgmt_class
, vclass
;
215 /* Validate parameters */
216 qpn
= get_spl_qp_index(qp_type
);
218 dev_notice(&device
->dev
,
219 "ib_register_mad_agent: invalid QP Type %d\n",
224 if (rmpp_version
&& rmpp_version
!= IB_MGMT_RMPP_VERSION
) {
225 dev_notice(&device
->dev
,
226 "ib_register_mad_agent: invalid RMPP Version %u\n",
231 /* Validate MAD registration request if supplied */
233 if (mad_reg_req
->mgmt_class_version
>= MAX_MGMT_VERSION
) {
234 dev_notice(&device
->dev
,
235 "ib_register_mad_agent: invalid Class Version %u\n",
236 mad_reg_req
->mgmt_class_version
);
240 dev_notice(&device
->dev
,
241 "ib_register_mad_agent: no recv_handler\n");
244 if (mad_reg_req
->mgmt_class
>= MAX_MGMT_CLASS
) {
246 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
247 * one in this range currently allowed
249 if (mad_reg_req
->mgmt_class
!=
250 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
251 dev_notice(&device
->dev
,
252 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
253 mad_reg_req
->mgmt_class
);
256 } else if (mad_reg_req
->mgmt_class
== 0) {
258 * Class 0 is reserved in IBA and is used for
259 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
261 dev_notice(&device
->dev
,
262 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
264 } else if (is_vendor_class(mad_reg_req
->mgmt_class
)) {
266 * If class is in "new" vendor range,
267 * ensure supplied OUI is not zero
269 if (!is_vendor_oui(mad_reg_req
->oui
)) {
270 dev_notice(&device
->dev
,
271 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
272 mad_reg_req
->mgmt_class
);
276 /* Make sure class supplied is consistent with RMPP */
277 if (!ib_is_mad_class_rmpp(mad_reg_req
->mgmt_class
)) {
279 dev_notice(&device
->dev
,
280 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
281 mad_reg_req
->mgmt_class
);
286 /* Make sure class supplied is consistent with QP type */
287 if (qp_type
== IB_QPT_SMI
) {
288 if ((mad_reg_req
->mgmt_class
!=
289 IB_MGMT_CLASS_SUBN_LID_ROUTED
) &&
290 (mad_reg_req
->mgmt_class
!=
291 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
292 dev_notice(&device
->dev
,
293 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
294 mad_reg_req
->mgmt_class
);
298 if ((mad_reg_req
->mgmt_class
==
299 IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
300 (mad_reg_req
->mgmt_class
==
301 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
302 dev_notice(&device
->dev
,
303 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
304 mad_reg_req
->mgmt_class
);
309 /* No registration request supplied */
312 if (registration_flags
& IB_MAD_USER_RMPP
)
316 /* Validate device and port */
317 port_priv
= ib_get_mad_port(device
, port_num
);
319 dev_notice(&device
->dev
, "ib_register_mad_agent: Invalid port\n");
320 ret
= ERR_PTR(-ENODEV
);
324 /* Verify the QP requested is supported. For example, Ethernet devices
325 * will not have QP0 */
326 if (!port_priv
->qp_info
[qpn
].qp
) {
327 dev_notice(&device
->dev
,
328 "ib_register_mad_agent: QP %d not supported\n", qpn
);
329 ret
= ERR_PTR(-EPROTONOSUPPORT
);
333 /* Allocate structures */
334 mad_agent_priv
= kzalloc(sizeof *mad_agent_priv
, GFP_KERNEL
);
335 if (!mad_agent_priv
) {
336 ret
= ERR_PTR(-ENOMEM
);
341 reg_req
= kmemdup(mad_reg_req
, sizeof *reg_req
, GFP_KERNEL
);
343 ret
= ERR_PTR(-ENOMEM
);
348 /* Now, fill in the various structures */
349 mad_agent_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
350 mad_agent_priv
->reg_req
= reg_req
;
351 mad_agent_priv
->agent
.rmpp_version
= rmpp_version
;
352 mad_agent_priv
->agent
.device
= device
;
353 mad_agent_priv
->agent
.recv_handler
= recv_handler
;
354 mad_agent_priv
->agent
.send_handler
= send_handler
;
355 mad_agent_priv
->agent
.context
= context
;
356 mad_agent_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
357 mad_agent_priv
->agent
.port_num
= port_num
;
358 mad_agent_priv
->agent
.flags
= registration_flags
;
359 spin_lock_init(&mad_agent_priv
->lock
);
360 INIT_LIST_HEAD(&mad_agent_priv
->send_list
);
361 INIT_LIST_HEAD(&mad_agent_priv
->wait_list
);
362 INIT_LIST_HEAD(&mad_agent_priv
->done_list
);
363 INIT_LIST_HEAD(&mad_agent_priv
->rmpp_list
);
364 INIT_DELAYED_WORK(&mad_agent_priv
->timed_work
, timeout_sends
);
365 INIT_LIST_HEAD(&mad_agent_priv
->local_list
);
366 INIT_WORK(&mad_agent_priv
->local_work
, local_completions
);
367 atomic_set(&mad_agent_priv
->refcount
, 1);
368 init_completion(&mad_agent_priv
->comp
);
370 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
371 mad_agent_priv
->agent
.hi_tid
= ++ib_mad_client_id
;
374 * Make sure MAD registration (if supplied)
375 * is non overlapping with any existing ones
378 mgmt_class
= convert_mgmt_class(mad_reg_req
->mgmt_class
);
379 if (!is_vendor_class(mgmt_class
)) {
380 class = port_priv
->version
[mad_reg_req
->
381 mgmt_class_version
].class;
383 method
= class->method_table
[mgmt_class
];
385 if (method_in_use(&method
,
390 ret2
= add_nonoui_reg_req(mad_reg_req
, mad_agent_priv
,
393 /* "New" vendor class range */
394 vendor
= port_priv
->version
[mad_reg_req
->
395 mgmt_class_version
].vendor
;
397 vclass
= vendor_class_index(mgmt_class
);
398 vendor_class
= vendor
->vendor_class
[vclass
];
400 if (is_vendor_method_in_use(
406 ret2
= add_oui_reg_req(mad_reg_req
, mad_agent_priv
);
414 /* Add mad agent into port's agent list */
415 list_add_tail(&mad_agent_priv
->agent_list
, &port_priv
->agent_list
);
416 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
418 return &mad_agent_priv
->agent
;
421 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
424 kfree(mad_agent_priv
);
428 EXPORT_SYMBOL(ib_register_mad_agent
);
430 static inline int is_snooping_sends(int mad_snoop_flags
)
432 return (mad_snoop_flags
&
433 (/*IB_MAD_SNOOP_POSTED_SENDS |
434 IB_MAD_SNOOP_RMPP_SENDS |*/
435 IB_MAD_SNOOP_SEND_COMPLETIONS
/*|
436 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
439 static inline int is_snooping_recvs(int mad_snoop_flags
)
441 return (mad_snoop_flags
&
442 (IB_MAD_SNOOP_RECVS
/*|
443 IB_MAD_SNOOP_RMPP_RECVS*/));
446 static int register_snoop_agent(struct ib_mad_qp_info
*qp_info
,
447 struct ib_mad_snoop_private
*mad_snoop_priv
)
449 struct ib_mad_snoop_private
**new_snoop_table
;
453 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
454 /* Check for empty slot in array. */
455 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++)
456 if (!qp_info
->snoop_table
[i
])
459 if (i
== qp_info
->snoop_table_size
) {
461 new_snoop_table
= krealloc(qp_info
->snoop_table
,
462 sizeof mad_snoop_priv
*
463 (qp_info
->snoop_table_size
+ 1),
465 if (!new_snoop_table
) {
470 qp_info
->snoop_table
= new_snoop_table
;
471 qp_info
->snoop_table_size
++;
473 qp_info
->snoop_table
[i
] = mad_snoop_priv
;
474 atomic_inc(&qp_info
->snoop_count
);
476 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
480 struct ib_mad_agent
*ib_register_mad_snoop(struct ib_device
*device
,
482 enum ib_qp_type qp_type
,
484 ib_mad_snoop_handler snoop_handler
,
485 ib_mad_recv_handler recv_handler
,
488 struct ib_mad_port_private
*port_priv
;
489 struct ib_mad_agent
*ret
;
490 struct ib_mad_snoop_private
*mad_snoop_priv
;
493 /* Validate parameters */
494 if ((is_snooping_sends(mad_snoop_flags
) && !snoop_handler
) ||
495 (is_snooping_recvs(mad_snoop_flags
) && !recv_handler
)) {
496 ret
= ERR_PTR(-EINVAL
);
499 qpn
= get_spl_qp_index(qp_type
);
501 ret
= ERR_PTR(-EINVAL
);
504 port_priv
= ib_get_mad_port(device
, port_num
);
506 ret
= ERR_PTR(-ENODEV
);
509 /* Allocate structures */
510 mad_snoop_priv
= kzalloc(sizeof *mad_snoop_priv
, GFP_KERNEL
);
511 if (!mad_snoop_priv
) {
512 ret
= ERR_PTR(-ENOMEM
);
516 /* Now, fill in the various structures */
517 mad_snoop_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
518 mad_snoop_priv
->agent
.device
= device
;
519 mad_snoop_priv
->agent
.recv_handler
= recv_handler
;
520 mad_snoop_priv
->agent
.snoop_handler
= snoop_handler
;
521 mad_snoop_priv
->agent
.context
= context
;
522 mad_snoop_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
523 mad_snoop_priv
->agent
.port_num
= port_num
;
524 mad_snoop_priv
->mad_snoop_flags
= mad_snoop_flags
;
525 init_completion(&mad_snoop_priv
->comp
);
526 mad_snoop_priv
->snoop_index
= register_snoop_agent(
527 &port_priv
->qp_info
[qpn
],
529 if (mad_snoop_priv
->snoop_index
< 0) {
530 ret
= ERR_PTR(mad_snoop_priv
->snoop_index
);
534 atomic_set(&mad_snoop_priv
->refcount
, 1);
535 return &mad_snoop_priv
->agent
;
538 kfree(mad_snoop_priv
);
542 EXPORT_SYMBOL(ib_register_mad_snoop
);
544 static inline void deref_mad_agent(struct ib_mad_agent_private
*mad_agent_priv
)
546 if (atomic_dec_and_test(&mad_agent_priv
->refcount
))
547 complete(&mad_agent_priv
->comp
);
550 static inline void deref_snoop_agent(struct ib_mad_snoop_private
*mad_snoop_priv
)
552 if (atomic_dec_and_test(&mad_snoop_priv
->refcount
))
553 complete(&mad_snoop_priv
->comp
);
556 static void unregister_mad_agent(struct ib_mad_agent_private
*mad_agent_priv
)
558 struct ib_mad_port_private
*port_priv
;
561 /* Note that we could still be handling received MADs */
564 * Canceling all sends results in dropping received response
565 * MADs, preventing us from queuing additional work
567 cancel_mads(mad_agent_priv
);
568 port_priv
= mad_agent_priv
->qp_info
->port_priv
;
569 cancel_delayed_work(&mad_agent_priv
->timed_work
);
571 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
572 remove_mad_reg_req(mad_agent_priv
);
573 list_del(&mad_agent_priv
->agent_list
);
574 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
576 flush_workqueue(port_priv
->wq
);
577 ib_cancel_rmpp_recvs(mad_agent_priv
);
579 deref_mad_agent(mad_agent_priv
);
580 wait_for_completion(&mad_agent_priv
->comp
);
582 kfree(mad_agent_priv
->reg_req
);
583 kfree(mad_agent_priv
);
586 static void unregister_mad_snoop(struct ib_mad_snoop_private
*mad_snoop_priv
)
588 struct ib_mad_qp_info
*qp_info
;
591 qp_info
= mad_snoop_priv
->qp_info
;
592 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
593 qp_info
->snoop_table
[mad_snoop_priv
->snoop_index
] = NULL
;
594 atomic_dec(&qp_info
->snoop_count
);
595 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
597 deref_snoop_agent(mad_snoop_priv
);
598 wait_for_completion(&mad_snoop_priv
->comp
);
600 kfree(mad_snoop_priv
);
604 * ib_unregister_mad_agent - Unregisters a client from using MAD services
606 int ib_unregister_mad_agent(struct ib_mad_agent
*mad_agent
)
608 struct ib_mad_agent_private
*mad_agent_priv
;
609 struct ib_mad_snoop_private
*mad_snoop_priv
;
611 /* If the TID is zero, the agent can only snoop. */
612 if (mad_agent
->hi_tid
) {
613 mad_agent_priv
= container_of(mad_agent
,
614 struct ib_mad_agent_private
,
616 unregister_mad_agent(mad_agent_priv
);
618 mad_snoop_priv
= container_of(mad_agent
,
619 struct ib_mad_snoop_private
,
621 unregister_mad_snoop(mad_snoop_priv
);
625 EXPORT_SYMBOL(ib_unregister_mad_agent
);
627 static void dequeue_mad(struct ib_mad_list_head
*mad_list
)
629 struct ib_mad_queue
*mad_queue
;
632 BUG_ON(!mad_list
->mad_queue
);
633 mad_queue
= mad_list
->mad_queue
;
634 spin_lock_irqsave(&mad_queue
->lock
, flags
);
635 list_del(&mad_list
->list
);
637 spin_unlock_irqrestore(&mad_queue
->lock
, flags
);
640 static void snoop_send(struct ib_mad_qp_info
*qp_info
,
641 struct ib_mad_send_buf
*send_buf
,
642 struct ib_mad_send_wc
*mad_send_wc
,
645 struct ib_mad_snoop_private
*mad_snoop_priv
;
649 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
650 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
651 mad_snoop_priv
= qp_info
->snoop_table
[i
];
652 if (!mad_snoop_priv
||
653 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
656 atomic_inc(&mad_snoop_priv
->refcount
);
657 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
658 mad_snoop_priv
->agent
.snoop_handler(&mad_snoop_priv
->agent
,
659 send_buf
, mad_send_wc
);
660 deref_snoop_agent(mad_snoop_priv
);
661 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
663 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
666 static void snoop_recv(struct ib_mad_qp_info
*qp_info
,
667 struct ib_mad_recv_wc
*mad_recv_wc
,
670 struct ib_mad_snoop_private
*mad_snoop_priv
;
674 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
675 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
676 mad_snoop_priv
= qp_info
->snoop_table
[i
];
677 if (!mad_snoop_priv
||
678 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
681 atomic_inc(&mad_snoop_priv
->refcount
);
682 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
683 mad_snoop_priv
->agent
.recv_handler(&mad_snoop_priv
->agent
, NULL
,
685 deref_snoop_agent(mad_snoop_priv
);
686 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
688 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
691 static void build_smp_wc(struct ib_qp
*qp
, struct ib_cqe
*cqe
, u16 slid
,
692 u16 pkey_index
, u8 port_num
, struct ib_wc
*wc
)
694 memset(wc
, 0, sizeof *wc
);
696 wc
->status
= IB_WC_SUCCESS
;
697 wc
->opcode
= IB_WC_RECV
;
698 wc
->pkey_index
= pkey_index
;
699 wc
->byte_len
= sizeof(struct ib_mad
) + sizeof(struct ib_grh
);
704 wc
->dlid_path_bits
= 0;
705 wc
->port_num
= port_num
;
708 static size_t mad_priv_size(const struct ib_mad_private
*mp
)
710 return sizeof(struct ib_mad_private
) + mp
->mad_size
;
713 static struct ib_mad_private
*alloc_mad_private(size_t mad_size
, gfp_t flags
)
715 size_t size
= sizeof(struct ib_mad_private
) + mad_size
;
716 struct ib_mad_private
*ret
= kzalloc(size
, flags
);
719 ret
->mad_size
= mad_size
;
724 static size_t port_mad_size(const struct ib_mad_port_private
*port_priv
)
726 return rdma_max_mad_size(port_priv
->device
, port_priv
->port_num
);
729 static size_t mad_priv_dma_size(const struct ib_mad_private
*mp
)
731 return sizeof(struct ib_grh
) + mp
->mad_size
;
735 * Return 0 if SMP is to be sent
736 * Return 1 if SMP was consumed locally (whether or not solicited)
737 * Return < 0 if error
739 static int handle_outgoing_dr_smp(struct ib_mad_agent_private
*mad_agent_priv
,
740 struct ib_mad_send_wr_private
*mad_send_wr
)
743 struct ib_smp
*smp
= mad_send_wr
->send_buf
.mad
;
744 struct opa_smp
*opa_smp
= (struct opa_smp
*)smp
;
746 struct ib_mad_local_private
*local
;
747 struct ib_mad_private
*mad_priv
;
748 struct ib_mad_port_private
*port_priv
;
749 struct ib_mad_agent_private
*recv_mad_agent
= NULL
;
750 struct ib_device
*device
= mad_agent_priv
->agent
.device
;
753 struct ib_ud_wr
*send_wr
= &mad_send_wr
->send_wr
;
754 size_t mad_size
= port_mad_size(mad_agent_priv
->qp_info
->port_priv
);
755 u16 out_mad_pkey_index
= 0;
757 bool opa
= rdma_cap_opa_mad(mad_agent_priv
->qp_info
->port_priv
->device
,
758 mad_agent_priv
->qp_info
->port_priv
->port_num
);
760 if (rdma_cap_ib_switch(device
) &&
761 smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
762 port_num
= send_wr
->port_num
;
764 port_num
= mad_agent_priv
->agent
.port_num
;
767 * Directed route handling starts if the initial LID routed part of
768 * a request or the ending LID routed part of a response is empty.
769 * If we are at the start of the LID routed part, don't update the
770 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
772 if (opa
&& smp
->class_version
== OPA_SMP_CLASS_VERSION
) {
775 if ((opa_get_smp_direction(opa_smp
)
776 ? opa_smp
->route
.dr
.dr_dlid
: opa_smp
->route
.dr
.dr_slid
) ==
777 OPA_LID_PERMISSIVE
&&
778 opa_smi_handle_dr_smp_send(opa_smp
,
779 rdma_cap_ib_switch(device
),
780 port_num
) == IB_SMI_DISCARD
) {
782 dev_err(&device
->dev
, "OPA Invalid directed route\n");
785 opa_drslid
= be32_to_cpu(opa_smp
->route
.dr
.dr_slid
);
786 if (opa_drslid
!= be32_to_cpu(OPA_LID_PERMISSIVE
) &&
787 opa_drslid
& 0xffff0000) {
789 dev_err(&device
->dev
, "OPA Invalid dr_slid 0x%x\n",
793 drslid
= (u16
)(opa_drslid
& 0x0000ffff);
795 /* Check to post send on QP or process locally */
796 if (opa_smi_check_local_smp(opa_smp
, device
) == IB_SMI_DISCARD
&&
797 opa_smi_check_local_returning_smp(opa_smp
, device
) == IB_SMI_DISCARD
)
800 if ((ib_get_smp_direction(smp
) ? smp
->dr_dlid
: smp
->dr_slid
) ==
802 smi_handle_dr_smp_send(smp
, rdma_cap_ib_switch(device
), port_num
) ==
805 dev_err(&device
->dev
, "Invalid directed route\n");
808 drslid
= be16_to_cpu(smp
->dr_slid
);
810 /* Check to post send on QP or process locally */
811 if (smi_check_local_smp(smp
, device
) == IB_SMI_DISCARD
&&
812 smi_check_local_returning_smp(smp
, device
) == IB_SMI_DISCARD
)
816 local
= kmalloc(sizeof *local
, GFP_ATOMIC
);
819 dev_err(&device
->dev
, "No memory for ib_mad_local_private\n");
822 local
->mad_priv
= NULL
;
823 local
->recv_mad_agent
= NULL
;
824 mad_priv
= alloc_mad_private(mad_size
, GFP_ATOMIC
);
827 dev_err(&device
->dev
, "No memory for local response MAD\n");
832 build_smp_wc(mad_agent_priv
->agent
.qp
,
833 send_wr
->wr
.wr_cqe
, drslid
,
835 send_wr
->port_num
, &mad_wc
);
837 if (opa
&& smp
->base_version
== OPA_MGMT_BASE_VERSION
) {
838 mad_wc
.byte_len
= mad_send_wr
->send_buf
.hdr_len
839 + mad_send_wr
->send_buf
.data_len
840 + sizeof(struct ib_grh
);
843 /* No GRH for DR SMP */
844 ret
= device
->process_mad(device
, 0, port_num
, &mad_wc
, NULL
,
845 (const struct ib_mad_hdr
*)smp
, mad_size
,
846 (struct ib_mad_hdr
*)mad_priv
->mad
,
847 &mad_size
, &out_mad_pkey_index
);
850 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
:
851 if (ib_response_mad((const struct ib_mad_hdr
*)mad_priv
->mad
) &&
852 mad_agent_priv
->agent
.recv_handler
) {
853 local
->mad_priv
= mad_priv
;
854 local
->recv_mad_agent
= mad_agent_priv
;
856 * Reference MAD agent until receive
857 * side of local completion handled
859 atomic_inc(&mad_agent_priv
->refcount
);
863 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
:
866 case IB_MAD_RESULT_SUCCESS
:
867 /* Treat like an incoming receive MAD */
868 port_priv
= ib_get_mad_port(mad_agent_priv
->agent
.device
,
869 mad_agent_priv
->agent
.port_num
);
871 memcpy(mad_priv
->mad
, smp
, mad_priv
->mad_size
);
872 recv_mad_agent
= find_mad_agent(port_priv
,
873 (const struct ib_mad_hdr
*)mad_priv
->mad
);
875 if (!port_priv
|| !recv_mad_agent
) {
877 * No receiving agent so drop packet and
878 * generate send completion.
883 local
->mad_priv
= mad_priv
;
884 local
->recv_mad_agent
= recv_mad_agent
;
893 local
->mad_send_wr
= mad_send_wr
;
895 local
->mad_send_wr
->send_wr
.pkey_index
= out_mad_pkey_index
;
896 local
->return_wc_byte_len
= mad_size
;
898 /* Reference MAD agent until send side of local completion handled */
899 atomic_inc(&mad_agent_priv
->refcount
);
900 /* Queue local completion to local list */
901 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
902 list_add_tail(&local
->completion_list
, &mad_agent_priv
->local_list
);
903 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
904 queue_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
905 &mad_agent_priv
->local_work
);
911 static int get_pad_size(int hdr_len
, int data_len
, size_t mad_size
)
915 seg_size
= mad_size
- hdr_len
;
916 if (data_len
&& seg_size
) {
917 pad
= seg_size
- data_len
% seg_size
;
918 return pad
== seg_size
? 0 : pad
;
923 static void free_send_rmpp_list(struct ib_mad_send_wr_private
*mad_send_wr
)
925 struct ib_rmpp_segment
*s
, *t
;
927 list_for_each_entry_safe(s
, t
, &mad_send_wr
->rmpp_list
, list
) {
933 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private
*send_wr
,
934 size_t mad_size
, gfp_t gfp_mask
)
936 struct ib_mad_send_buf
*send_buf
= &send_wr
->send_buf
;
937 struct ib_rmpp_mad
*rmpp_mad
= send_buf
->mad
;
938 struct ib_rmpp_segment
*seg
= NULL
;
939 int left
, seg_size
, pad
;
941 send_buf
->seg_size
= mad_size
- send_buf
->hdr_len
;
942 send_buf
->seg_rmpp_size
= mad_size
- IB_MGMT_RMPP_HDR
;
943 seg_size
= send_buf
->seg_size
;
946 /* Allocate data segments. */
947 for (left
= send_buf
->data_len
+ pad
; left
> 0; left
-= seg_size
) {
948 seg
= kmalloc(sizeof (*seg
) + seg_size
, gfp_mask
);
950 dev_err(&send_buf
->mad_agent
->device
->dev
,
951 "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
952 sizeof (*seg
) + seg_size
, gfp_mask
);
953 free_send_rmpp_list(send_wr
);
956 seg
->num
= ++send_buf
->seg_count
;
957 list_add_tail(&seg
->list
, &send_wr
->rmpp_list
);
960 /* Zero any padding */
962 memset(seg
->data
+ seg_size
- pad
, 0, pad
);
964 rmpp_mad
->rmpp_hdr
.rmpp_version
= send_wr
->mad_agent_priv
->
966 rmpp_mad
->rmpp_hdr
.rmpp_type
= IB_MGMT_RMPP_TYPE_DATA
;
967 ib_set_rmpp_flags(&rmpp_mad
->rmpp_hdr
, IB_MGMT_RMPP_FLAG_ACTIVE
);
969 send_wr
->cur_seg
= container_of(send_wr
->rmpp_list
.next
,
970 struct ib_rmpp_segment
, list
);
971 send_wr
->last_ack_seg
= send_wr
->cur_seg
;
975 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent
*agent
)
977 return agent
->rmpp_version
&& !(agent
->flags
& IB_MAD_USER_RMPP
);
979 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent
);
981 struct ib_mad_send_buf
* ib_create_send_mad(struct ib_mad_agent
*mad_agent
,
982 u32 remote_qpn
, u16 pkey_index
,
984 int hdr_len
, int data_len
,
988 struct ib_mad_agent_private
*mad_agent_priv
;
989 struct ib_mad_send_wr_private
*mad_send_wr
;
990 int pad
, message_size
, ret
, size
;
995 mad_agent_priv
= container_of(mad_agent
, struct ib_mad_agent_private
,
998 opa
= rdma_cap_opa_mad(mad_agent
->device
, mad_agent
->port_num
);
1000 if (opa
&& base_version
== OPA_MGMT_BASE_VERSION
)
1001 mad_size
= sizeof(struct opa_mad
);
1003 mad_size
= sizeof(struct ib_mad
);
1005 pad
= get_pad_size(hdr_len
, data_len
, mad_size
);
1006 message_size
= hdr_len
+ data_len
+ pad
;
1008 if (ib_mad_kernel_rmpp_agent(mad_agent
)) {
1009 if (!rmpp_active
&& message_size
> mad_size
)
1010 return ERR_PTR(-EINVAL
);
1012 if (rmpp_active
|| message_size
> mad_size
)
1013 return ERR_PTR(-EINVAL
);
1015 size
= rmpp_active
? hdr_len
: mad_size
;
1016 buf
= kzalloc(sizeof *mad_send_wr
+ size
, gfp_mask
);
1018 return ERR_PTR(-ENOMEM
);
1020 mad_send_wr
= buf
+ size
;
1021 INIT_LIST_HEAD(&mad_send_wr
->rmpp_list
);
1022 mad_send_wr
->send_buf
.mad
= buf
;
1023 mad_send_wr
->send_buf
.hdr_len
= hdr_len
;
1024 mad_send_wr
->send_buf
.data_len
= data_len
;
1025 mad_send_wr
->pad
= pad
;
1027 mad_send_wr
->mad_agent_priv
= mad_agent_priv
;
1028 mad_send_wr
->sg_list
[0].length
= hdr_len
;
1029 mad_send_wr
->sg_list
[0].lkey
= mad_agent
->qp
->pd
->local_dma_lkey
;
1031 /* OPA MADs don't have to be the full 2048 bytes */
1032 if (opa
&& base_version
== OPA_MGMT_BASE_VERSION
&&
1033 data_len
< mad_size
- hdr_len
)
1034 mad_send_wr
->sg_list
[1].length
= data_len
;
1036 mad_send_wr
->sg_list
[1].length
= mad_size
- hdr_len
;
1038 mad_send_wr
->sg_list
[1].lkey
= mad_agent
->qp
->pd
->local_dma_lkey
;
1040 mad_send_wr
->mad_list
.cqe
.done
= ib_mad_send_done
;
1042 mad_send_wr
->send_wr
.wr
.wr_cqe
= &mad_send_wr
->mad_list
.cqe
;
1043 mad_send_wr
->send_wr
.wr
.sg_list
= mad_send_wr
->sg_list
;
1044 mad_send_wr
->send_wr
.wr
.num_sge
= 2;
1045 mad_send_wr
->send_wr
.wr
.opcode
= IB_WR_SEND
;
1046 mad_send_wr
->send_wr
.wr
.send_flags
= IB_SEND_SIGNALED
;
1047 mad_send_wr
->send_wr
.remote_qpn
= remote_qpn
;
1048 mad_send_wr
->send_wr
.remote_qkey
= IB_QP_SET_QKEY
;
1049 mad_send_wr
->send_wr
.pkey_index
= pkey_index
;
1052 ret
= alloc_send_rmpp_list(mad_send_wr
, mad_size
, gfp_mask
);
1055 return ERR_PTR(ret
);
1059 mad_send_wr
->send_buf
.mad_agent
= mad_agent
;
1060 atomic_inc(&mad_agent_priv
->refcount
);
1061 return &mad_send_wr
->send_buf
;
1063 EXPORT_SYMBOL(ib_create_send_mad
);
1065 int ib_get_mad_data_offset(u8 mgmt_class
)
1067 if (mgmt_class
== IB_MGMT_CLASS_SUBN_ADM
)
1068 return IB_MGMT_SA_HDR
;
1069 else if ((mgmt_class
== IB_MGMT_CLASS_DEVICE_MGMT
) ||
1070 (mgmt_class
== IB_MGMT_CLASS_DEVICE_ADM
) ||
1071 (mgmt_class
== IB_MGMT_CLASS_BIS
))
1072 return IB_MGMT_DEVICE_HDR
;
1073 else if ((mgmt_class
>= IB_MGMT_CLASS_VENDOR_RANGE2_START
) &&
1074 (mgmt_class
<= IB_MGMT_CLASS_VENDOR_RANGE2_END
))
1075 return IB_MGMT_VENDOR_HDR
;
1077 return IB_MGMT_MAD_HDR
;
1079 EXPORT_SYMBOL(ib_get_mad_data_offset
);
1081 int ib_is_mad_class_rmpp(u8 mgmt_class
)
1083 if ((mgmt_class
== IB_MGMT_CLASS_SUBN_ADM
) ||
1084 (mgmt_class
== IB_MGMT_CLASS_DEVICE_MGMT
) ||
1085 (mgmt_class
== IB_MGMT_CLASS_DEVICE_ADM
) ||
1086 (mgmt_class
== IB_MGMT_CLASS_BIS
) ||
1087 ((mgmt_class
>= IB_MGMT_CLASS_VENDOR_RANGE2_START
) &&
1088 (mgmt_class
<= IB_MGMT_CLASS_VENDOR_RANGE2_END
)))
1092 EXPORT_SYMBOL(ib_is_mad_class_rmpp
);
1094 void *ib_get_rmpp_segment(struct ib_mad_send_buf
*send_buf
, int seg_num
)
1096 struct ib_mad_send_wr_private
*mad_send_wr
;
1097 struct list_head
*list
;
1099 mad_send_wr
= container_of(send_buf
, struct ib_mad_send_wr_private
,
1101 list
= &mad_send_wr
->cur_seg
->list
;
1103 if (mad_send_wr
->cur_seg
->num
< seg_num
) {
1104 list_for_each_entry(mad_send_wr
->cur_seg
, list
, list
)
1105 if (mad_send_wr
->cur_seg
->num
== seg_num
)
1107 } else if (mad_send_wr
->cur_seg
->num
> seg_num
) {
1108 list_for_each_entry_reverse(mad_send_wr
->cur_seg
, list
, list
)
1109 if (mad_send_wr
->cur_seg
->num
== seg_num
)
1112 return mad_send_wr
->cur_seg
->data
;
1114 EXPORT_SYMBOL(ib_get_rmpp_segment
);
1116 static inline void *ib_get_payload(struct ib_mad_send_wr_private
*mad_send_wr
)
1118 if (mad_send_wr
->send_buf
.seg_count
)
1119 return ib_get_rmpp_segment(&mad_send_wr
->send_buf
,
1120 mad_send_wr
->seg_num
);
1122 return mad_send_wr
->send_buf
.mad
+
1123 mad_send_wr
->send_buf
.hdr_len
;
1126 void ib_free_send_mad(struct ib_mad_send_buf
*send_buf
)
1128 struct ib_mad_agent_private
*mad_agent_priv
;
1129 struct ib_mad_send_wr_private
*mad_send_wr
;
1131 mad_agent_priv
= container_of(send_buf
->mad_agent
,
1132 struct ib_mad_agent_private
, agent
);
1133 mad_send_wr
= container_of(send_buf
, struct ib_mad_send_wr_private
,
1136 free_send_rmpp_list(mad_send_wr
);
1137 kfree(send_buf
->mad
);
1138 deref_mad_agent(mad_agent_priv
);
1140 EXPORT_SYMBOL(ib_free_send_mad
);
1142 int ib_send_mad(struct ib_mad_send_wr_private
*mad_send_wr
)
1144 struct ib_mad_qp_info
*qp_info
;
1145 struct list_head
*list
;
1146 struct ib_send_wr
*bad_send_wr
;
1147 struct ib_mad_agent
*mad_agent
;
1149 unsigned long flags
;
1152 /* Set WR ID to find mad_send_wr upon completion */
1153 qp_info
= mad_send_wr
->mad_agent_priv
->qp_info
;
1154 mad_send_wr
->mad_list
.mad_queue
= &qp_info
->send_queue
;
1155 mad_send_wr
->mad_list
.cqe
.done
= ib_mad_send_done
;
1156 mad_send_wr
->send_wr
.wr
.wr_cqe
= &mad_send_wr
->mad_list
.cqe
;
1158 mad_agent
= mad_send_wr
->send_buf
.mad_agent
;
1159 sge
= mad_send_wr
->sg_list
;
1160 sge
[0].addr
= ib_dma_map_single(mad_agent
->device
,
1161 mad_send_wr
->send_buf
.mad
,
1164 if (unlikely(ib_dma_mapping_error(mad_agent
->device
, sge
[0].addr
)))
1167 mad_send_wr
->header_mapping
= sge
[0].addr
;
1169 sge
[1].addr
= ib_dma_map_single(mad_agent
->device
,
1170 ib_get_payload(mad_send_wr
),
1173 if (unlikely(ib_dma_mapping_error(mad_agent
->device
, sge
[1].addr
))) {
1174 ib_dma_unmap_single(mad_agent
->device
,
1175 mad_send_wr
->header_mapping
,
1176 sge
[0].length
, DMA_TO_DEVICE
);
1179 mad_send_wr
->payload_mapping
= sge
[1].addr
;
1181 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
1182 if (qp_info
->send_queue
.count
< qp_info
->send_queue
.max_active
) {
1183 ret
= ib_post_send(mad_agent
->qp
, &mad_send_wr
->send_wr
.wr
,
1185 list
= &qp_info
->send_queue
.list
;
1188 list
= &qp_info
->overflow_list
;
1192 qp_info
->send_queue
.count
++;
1193 list_add_tail(&mad_send_wr
->mad_list
.list
, list
);
1195 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
1197 ib_dma_unmap_single(mad_agent
->device
,
1198 mad_send_wr
->header_mapping
,
1199 sge
[0].length
, DMA_TO_DEVICE
);
1200 ib_dma_unmap_single(mad_agent
->device
,
1201 mad_send_wr
->payload_mapping
,
1202 sge
[1].length
, DMA_TO_DEVICE
);
1208 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1209 * with the registered client
1211 int ib_post_send_mad(struct ib_mad_send_buf
*send_buf
,
1212 struct ib_mad_send_buf
**bad_send_buf
)
1214 struct ib_mad_agent_private
*mad_agent_priv
;
1215 struct ib_mad_send_buf
*next_send_buf
;
1216 struct ib_mad_send_wr_private
*mad_send_wr
;
1217 unsigned long flags
;
1220 /* Walk list of send WRs and post each on send list */
1221 for (; send_buf
; send_buf
= next_send_buf
) {
1223 mad_send_wr
= container_of(send_buf
,
1224 struct ib_mad_send_wr_private
,
1226 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
1228 if (!send_buf
->mad_agent
->send_handler
||
1229 (send_buf
->timeout_ms
&&
1230 !send_buf
->mad_agent
->recv_handler
)) {
1235 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr
*) send_buf
->mad
)->mgmt_class
)) {
1236 if (mad_agent_priv
->agent
.rmpp_version
) {
1243 * Save pointer to next work request to post in case the
1244 * current one completes, and the user modifies the work
1245 * request associated with the completion
1247 next_send_buf
= send_buf
->next
;
1248 mad_send_wr
->send_wr
.ah
= send_buf
->ah
;
1250 if (((struct ib_mad_hdr
*) send_buf
->mad
)->mgmt_class
==
1251 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
1252 ret
= handle_outgoing_dr_smp(mad_agent_priv
,
1254 if (ret
< 0) /* error */
1256 else if (ret
== 1) /* locally consumed */
1260 mad_send_wr
->tid
= ((struct ib_mad_hdr
*) send_buf
->mad
)->tid
;
1261 /* Timeout will be updated after send completes */
1262 mad_send_wr
->timeout
= msecs_to_jiffies(send_buf
->timeout_ms
);
1263 mad_send_wr
->max_retries
= send_buf
->retries
;
1264 mad_send_wr
->retries_left
= send_buf
->retries
;
1265 send_buf
->retries
= 0;
1266 /* Reference for work request to QP + response */
1267 mad_send_wr
->refcount
= 1 + (mad_send_wr
->timeout
> 0);
1268 mad_send_wr
->status
= IB_WC_SUCCESS
;
1270 /* Reference MAD agent until send completes */
1271 atomic_inc(&mad_agent_priv
->refcount
);
1272 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1273 list_add_tail(&mad_send_wr
->agent_list
,
1274 &mad_agent_priv
->send_list
);
1275 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1277 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)) {
1278 ret
= ib_send_rmpp_mad(mad_send_wr
);
1279 if (ret
>= 0 && ret
!= IB_RMPP_RESULT_CONSUMED
)
1280 ret
= ib_send_mad(mad_send_wr
);
1282 ret
= ib_send_mad(mad_send_wr
);
1284 /* Fail send request */
1285 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1286 list_del(&mad_send_wr
->agent_list
);
1287 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1288 atomic_dec(&mad_agent_priv
->refcount
);
1295 *bad_send_buf
= send_buf
;
1298 EXPORT_SYMBOL(ib_post_send_mad
);
1301 * ib_free_recv_mad - Returns data buffers used to receive
1302 * a MAD to the access layer
1304 void ib_free_recv_mad(struct ib_mad_recv_wc
*mad_recv_wc
)
1306 struct ib_mad_recv_buf
*mad_recv_buf
, *temp_recv_buf
;
1307 struct ib_mad_private_header
*mad_priv_hdr
;
1308 struct ib_mad_private
*priv
;
1309 struct list_head free_list
;
1311 INIT_LIST_HEAD(&free_list
);
1312 list_splice_init(&mad_recv_wc
->rmpp_list
, &free_list
);
1314 list_for_each_entry_safe(mad_recv_buf
, temp_recv_buf
,
1316 mad_recv_wc
= container_of(mad_recv_buf
, struct ib_mad_recv_wc
,
1318 mad_priv_hdr
= container_of(mad_recv_wc
,
1319 struct ib_mad_private_header
,
1321 priv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
1326 EXPORT_SYMBOL(ib_free_recv_mad
);
1328 struct ib_mad_agent
*ib_redirect_mad_qp(struct ib_qp
*qp
,
1330 ib_mad_send_handler send_handler
,
1331 ib_mad_recv_handler recv_handler
,
1334 return ERR_PTR(-EINVAL
); /* XXX: for now */
1336 EXPORT_SYMBOL(ib_redirect_mad_qp
);
1338 int ib_process_mad_wc(struct ib_mad_agent
*mad_agent
,
1341 dev_err(&mad_agent
->device
->dev
,
1342 "ib_process_mad_wc() not implemented yet\n");
1345 EXPORT_SYMBOL(ib_process_mad_wc
);
1347 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
1348 struct ib_mad_reg_req
*mad_reg_req
)
1352 for_each_set_bit(i
, mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
) {
1353 if ((*method
)->agent
[i
]) {
1354 pr_err("Method %d already in use\n", i
);
1361 static int allocate_method_table(struct ib_mad_mgmt_method_table
**method
)
1363 /* Allocate management method table */
1364 *method
= kzalloc(sizeof **method
, GFP_ATOMIC
);
1366 pr_err("No memory for ib_mad_mgmt_method_table\n");
1374 * Check to see if there are any methods still in use
1376 static int check_method_table(struct ib_mad_mgmt_method_table
*method
)
1380 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++)
1381 if (method
->agent
[i
])
1387 * Check to see if there are any method tables for this class still in use
1389 static int check_class_table(struct ib_mad_mgmt_class_table
*class)
1393 for (i
= 0; i
< MAX_MGMT_CLASS
; i
++)
1394 if (class->method_table
[i
])
1399 static int check_vendor_class(struct ib_mad_mgmt_vendor_class
*vendor_class
)
1403 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1404 if (vendor_class
->method_table
[i
])
1409 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class
*vendor_class
,
1414 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1415 /* Is there matching OUI for this vendor class ? */
1416 if (!memcmp(vendor_class
->oui
[i
], oui
, 3))
1422 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table
*vendor
)
1426 for (i
= 0; i
< MAX_MGMT_VENDOR_RANGE2
; i
++)
1427 if (vendor
->vendor_class
[i
])
1433 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table
*method
,
1434 struct ib_mad_agent_private
*agent
)
1438 /* Remove any methods for this mad agent */
1439 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++) {
1440 if (method
->agent
[i
] == agent
) {
1441 method
->agent
[i
] = NULL
;
1446 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1447 struct ib_mad_agent_private
*agent_priv
,
1450 struct ib_mad_port_private
*port_priv
;
1451 struct ib_mad_mgmt_class_table
**class;
1452 struct ib_mad_mgmt_method_table
**method
;
1455 port_priv
= agent_priv
->qp_info
->port_priv
;
1456 class = &port_priv
->version
[mad_reg_req
->mgmt_class_version
].class;
1458 /* Allocate management class table for "new" class version */
1459 *class = kzalloc(sizeof **class, GFP_ATOMIC
);
1461 dev_err(&agent_priv
->agent
.device
->dev
,
1462 "No memory for ib_mad_mgmt_class_table\n");
1467 /* Allocate method table for this management class */
1468 method
= &(*class)->method_table
[mgmt_class
];
1469 if ((ret
= allocate_method_table(method
)))
1472 method
= &(*class)->method_table
[mgmt_class
];
1474 /* Allocate method table for this management class */
1475 if ((ret
= allocate_method_table(method
)))
1480 /* Now, make sure methods are not already in use */
1481 if (method_in_use(method
, mad_reg_req
))
1484 /* Finally, add in methods being registered */
1485 for_each_set_bit(i
, mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
)
1486 (*method
)->agent
[i
] = agent_priv
;
1491 /* Remove any methods for this mad agent */
1492 remove_methods_mad_agent(*method
, agent_priv
);
1493 /* Now, check to see if there are any methods in use */
1494 if (!check_method_table(*method
)) {
1495 /* If not, release management method table */
1508 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1509 struct ib_mad_agent_private
*agent_priv
)
1511 struct ib_mad_port_private
*port_priv
;
1512 struct ib_mad_mgmt_vendor_class_table
**vendor_table
;
1513 struct ib_mad_mgmt_vendor_class_table
*vendor
= NULL
;
1514 struct ib_mad_mgmt_vendor_class
*vendor_class
= NULL
;
1515 struct ib_mad_mgmt_method_table
**method
;
1516 int i
, ret
= -ENOMEM
;
1519 /* "New" vendor (with OUI) class */
1520 vclass
= vendor_class_index(mad_reg_req
->mgmt_class
);
1521 port_priv
= agent_priv
->qp_info
->port_priv
;
1522 vendor_table
= &port_priv
->version
[
1523 mad_reg_req
->mgmt_class_version
].vendor
;
1524 if (!*vendor_table
) {
1525 /* Allocate mgmt vendor class table for "new" class version */
1526 vendor
= kzalloc(sizeof *vendor
, GFP_ATOMIC
);
1528 dev_err(&agent_priv
->agent
.device
->dev
,
1529 "No memory for ib_mad_mgmt_vendor_class_table\n");
1533 *vendor_table
= vendor
;
1535 if (!(*vendor_table
)->vendor_class
[vclass
]) {
1536 /* Allocate table for this management vendor class */
1537 vendor_class
= kzalloc(sizeof *vendor_class
, GFP_ATOMIC
);
1538 if (!vendor_class
) {
1539 dev_err(&agent_priv
->agent
.device
->dev
,
1540 "No memory for ib_mad_mgmt_vendor_class\n");
1544 (*vendor_table
)->vendor_class
[vclass
] = vendor_class
;
1546 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1547 /* Is there matching OUI for this vendor class ? */
1548 if (!memcmp((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1549 mad_reg_req
->oui
, 3)) {
1550 method
= &(*vendor_table
)->vendor_class
[
1551 vclass
]->method_table
[i
];
1556 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1557 /* OUI slot available ? */
1558 if (!is_vendor_oui((*vendor_table
)->vendor_class
[
1560 method
= &(*vendor_table
)->vendor_class
[
1561 vclass
]->method_table
[i
];
1563 /* Allocate method table for this OUI */
1564 if ((ret
= allocate_method_table(method
)))
1566 memcpy((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1567 mad_reg_req
->oui
, 3);
1571 dev_err(&agent_priv
->agent
.device
->dev
, "All OUI slots in use\n");
1575 /* Now, make sure methods are not already in use */
1576 if (method_in_use(method
, mad_reg_req
))
1579 /* Finally, add in methods being registered */
1580 for_each_set_bit(i
, mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
)
1581 (*method
)->agent
[i
] = agent_priv
;
1586 /* Remove any methods for this mad agent */
1587 remove_methods_mad_agent(*method
, agent_priv
);
1588 /* Now, check to see if there are any methods in use */
1589 if (!check_method_table(*method
)) {
1590 /* If not, release management method table */
1597 (*vendor_table
)->vendor_class
[vclass
] = NULL
;
1598 kfree(vendor_class
);
1602 *vendor_table
= NULL
;
1609 static void remove_mad_reg_req(struct ib_mad_agent_private
*agent_priv
)
1611 struct ib_mad_port_private
*port_priv
;
1612 struct ib_mad_mgmt_class_table
*class;
1613 struct ib_mad_mgmt_method_table
*method
;
1614 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1615 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1620 * Was MAD registration request supplied
1621 * with original registration ?
1623 if (!agent_priv
->reg_req
) {
1627 port_priv
= agent_priv
->qp_info
->port_priv
;
1628 mgmt_class
= convert_mgmt_class(agent_priv
->reg_req
->mgmt_class
);
1629 class = port_priv
->version
[
1630 agent_priv
->reg_req
->mgmt_class_version
].class;
1634 method
= class->method_table
[mgmt_class
];
1636 /* Remove any methods for this mad agent */
1637 remove_methods_mad_agent(method
, agent_priv
);
1638 /* Now, check to see if there are any methods still in use */
1639 if (!check_method_table(method
)) {
1640 /* If not, release management method table */
1642 class->method_table
[mgmt_class
] = NULL
;
1643 /* Any management classes left ? */
1644 if (!check_class_table(class)) {
1645 /* If not, release management class table */
1648 agent_priv
->reg_req
->
1649 mgmt_class_version
].class = NULL
;
1655 if (!is_vendor_class(mgmt_class
))
1658 /* normalize mgmt_class to vendor range 2 */
1659 mgmt_class
= vendor_class_index(agent_priv
->reg_req
->mgmt_class
);
1660 vendor
= port_priv
->version
[
1661 agent_priv
->reg_req
->mgmt_class_version
].vendor
;
1666 vendor_class
= vendor
->vendor_class
[mgmt_class
];
1668 index
= find_vendor_oui(vendor_class
, agent_priv
->reg_req
->oui
);
1671 method
= vendor_class
->method_table
[index
];
1673 /* Remove any methods for this mad agent */
1674 remove_methods_mad_agent(method
, agent_priv
);
1676 * Now, check to see if there are
1677 * any methods still in use
1679 if (!check_method_table(method
)) {
1680 /* If not, release management method table */
1682 vendor_class
->method_table
[index
] = NULL
;
1683 memset(vendor_class
->oui
[index
], 0, 3);
1684 /* Any OUIs left ? */
1685 if (!check_vendor_class(vendor_class
)) {
1686 /* If not, release vendor class table */
1687 kfree(vendor_class
);
1688 vendor
->vendor_class
[mgmt_class
] = NULL
;
1689 /* Any other vendor classes left ? */
1690 if (!check_vendor_table(vendor
)) {
1693 agent_priv
->reg_req
->
1694 mgmt_class_version
].
1706 static struct ib_mad_agent_private
*
1707 find_mad_agent(struct ib_mad_port_private
*port_priv
,
1708 const struct ib_mad_hdr
*mad_hdr
)
1710 struct ib_mad_agent_private
*mad_agent
= NULL
;
1711 unsigned long flags
;
1713 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
1714 if (ib_response_mad(mad_hdr
)) {
1716 struct ib_mad_agent_private
*entry
;
1719 * Routing is based on high 32 bits of transaction ID
1722 hi_tid
= be64_to_cpu(mad_hdr
->tid
) >> 32;
1723 list_for_each_entry(entry
, &port_priv
->agent_list
, agent_list
) {
1724 if (entry
->agent
.hi_tid
== hi_tid
) {
1730 struct ib_mad_mgmt_class_table
*class;
1731 struct ib_mad_mgmt_method_table
*method
;
1732 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1733 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1734 const struct ib_vendor_mad
*vendor_mad
;
1738 * Routing is based on version, class, and method
1739 * For "newer" vendor MADs, also based on OUI
1741 if (mad_hdr
->class_version
>= MAX_MGMT_VERSION
)
1743 if (!is_vendor_class(mad_hdr
->mgmt_class
)) {
1744 class = port_priv
->version
[
1745 mad_hdr
->class_version
].class;
1748 if (convert_mgmt_class(mad_hdr
->mgmt_class
) >=
1749 IB_MGMT_MAX_METHODS
)
1751 method
= class->method_table
[convert_mgmt_class(
1752 mad_hdr
->mgmt_class
)];
1754 mad_agent
= method
->agent
[mad_hdr
->method
&
1755 ~IB_MGMT_METHOD_RESP
];
1757 vendor
= port_priv
->version
[
1758 mad_hdr
->class_version
].vendor
;
1761 vendor_class
= vendor
->vendor_class
[vendor_class_index(
1762 mad_hdr
->mgmt_class
)];
1765 /* Find matching OUI */
1766 vendor_mad
= (const struct ib_vendor_mad
*)mad_hdr
;
1767 index
= find_vendor_oui(vendor_class
, vendor_mad
->oui
);
1770 method
= vendor_class
->method_table
[index
];
1772 mad_agent
= method
->agent
[mad_hdr
->method
&
1773 ~IB_MGMT_METHOD_RESP
];
1779 if (mad_agent
->agent
.recv_handler
)
1780 atomic_inc(&mad_agent
->refcount
);
1782 dev_notice(&port_priv
->device
->dev
,
1783 "No receive handler for client %p on port %d\n",
1784 &mad_agent
->agent
, port_priv
->port_num
);
1789 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
1794 static int validate_mad(const struct ib_mad_hdr
*mad_hdr
,
1795 const struct ib_mad_qp_info
*qp_info
,
1799 u32 qp_num
= qp_info
->qp
->qp_num
;
1801 /* Make sure MAD base version is understood */
1802 if (mad_hdr
->base_version
!= IB_MGMT_BASE_VERSION
&&
1803 (!opa
|| mad_hdr
->base_version
!= OPA_MGMT_BASE_VERSION
)) {
1804 pr_err("MAD received with unsupported base version %d %s\n",
1805 mad_hdr
->base_version
, opa
? "(opa)" : "");
1809 /* Filter SMI packets sent to other than QP0 */
1810 if ((mad_hdr
->mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
1811 (mad_hdr
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
1815 /* CM attributes other than ClassPortInfo only use Send method */
1816 if ((mad_hdr
->mgmt_class
== IB_MGMT_CLASS_CM
) &&
1817 (mad_hdr
->attr_id
!= IB_MGMT_CLASSPORTINFO_ATTR_ID
) &&
1818 (mad_hdr
->method
!= IB_MGMT_METHOD_SEND
))
1820 /* Filter GSI packets sent to QP0 */
1829 static int is_rmpp_data_mad(const struct ib_mad_agent_private
*mad_agent_priv
,
1830 const struct ib_mad_hdr
*mad_hdr
)
1832 struct ib_rmpp_mad
*rmpp_mad
;
1834 rmpp_mad
= (struct ib_rmpp_mad
*)mad_hdr
;
1835 return !mad_agent_priv
->agent
.rmpp_version
||
1836 !ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
) ||
1837 !(ib_get_rmpp_flags(&rmpp_mad
->rmpp_hdr
) &
1838 IB_MGMT_RMPP_FLAG_ACTIVE
) ||
1839 (rmpp_mad
->rmpp_hdr
.rmpp_type
== IB_MGMT_RMPP_TYPE_DATA
);
1842 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private
*wr
,
1843 const struct ib_mad_recv_wc
*rwc
)
1845 return ((struct ib_mad_hdr
*)(wr
->send_buf
.mad
))->mgmt_class
==
1846 rwc
->recv_buf
.mad
->mad_hdr
.mgmt_class
;
1849 static inline int rcv_has_same_gid(const struct ib_mad_agent_private
*mad_agent_priv
,
1850 const struct ib_mad_send_wr_private
*wr
,
1851 const struct ib_mad_recv_wc
*rwc
)
1853 struct ib_ah_attr attr
;
1854 u8 send_resp
, rcv_resp
;
1856 struct ib_device
*device
= mad_agent_priv
->agent
.device
;
1857 u8 port_num
= mad_agent_priv
->agent
.port_num
;
1860 send_resp
= ib_response_mad((struct ib_mad_hdr
*)wr
->send_buf
.mad
);
1861 rcv_resp
= ib_response_mad(&rwc
->recv_buf
.mad
->mad_hdr
);
1863 if (send_resp
== rcv_resp
)
1864 /* both requests, or both responses. GIDs different */
1867 if (ib_query_ah(wr
->send_buf
.ah
, &attr
))
1868 /* Assume not equal, to avoid false positives. */
1871 if (!!(attr
.ah_flags
& IB_AH_GRH
) !=
1872 !!(rwc
->wc
->wc_flags
& IB_WC_GRH
))
1873 /* one has GID, other does not. Assume different */
1876 if (!send_resp
&& rcv_resp
) {
1877 /* is request/response. */
1878 if (!(attr
.ah_flags
& IB_AH_GRH
)) {
1879 if (ib_get_cached_lmc(device
, port_num
, &lmc
))
1881 return (!lmc
|| !((attr
.src_path_bits
^
1882 rwc
->wc
->dlid_path_bits
) &
1885 if (ib_get_cached_gid(device
, port_num
,
1886 attr
.grh
.sgid_index
, &sgid
, NULL
))
1888 return !memcmp(sgid
.raw
, rwc
->recv_buf
.grh
->dgid
.raw
,
1893 if (!(attr
.ah_flags
& IB_AH_GRH
))
1894 return attr
.dlid
== rwc
->wc
->slid
;
1896 return !memcmp(attr
.grh
.dgid
.raw
, rwc
->recv_buf
.grh
->sgid
.raw
,
1900 static inline int is_direct(u8
class)
1902 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
);
1905 struct ib_mad_send_wr_private
*
1906 ib_find_send_mad(const struct ib_mad_agent_private
*mad_agent_priv
,
1907 const struct ib_mad_recv_wc
*wc
)
1909 struct ib_mad_send_wr_private
*wr
;
1910 const struct ib_mad_hdr
*mad_hdr
;
1912 mad_hdr
= &wc
->recv_buf
.mad
->mad_hdr
;
1914 list_for_each_entry(wr
, &mad_agent_priv
->wait_list
, agent_list
) {
1915 if ((wr
->tid
== mad_hdr
->tid
) &&
1916 rcv_has_same_class(wr
, wc
) &&
1918 * Don't check GID for direct routed MADs.
1919 * These might have permissive LIDs.
1921 (is_direct(mad_hdr
->mgmt_class
) ||
1922 rcv_has_same_gid(mad_agent_priv
, wr
, wc
)))
1923 return (wr
->status
== IB_WC_SUCCESS
) ? wr
: NULL
;
1927 * It's possible to receive the response before we've
1928 * been notified that the send has completed
1930 list_for_each_entry(wr
, &mad_agent_priv
->send_list
, agent_list
) {
1931 if (is_rmpp_data_mad(mad_agent_priv
, wr
->send_buf
.mad
) &&
1932 wr
->tid
== mad_hdr
->tid
&&
1934 rcv_has_same_class(wr
, wc
) &&
1936 * Don't check GID for direct routed MADs.
1937 * These might have permissive LIDs.
1939 (is_direct(mad_hdr
->mgmt_class
) ||
1940 rcv_has_same_gid(mad_agent_priv
, wr
, wc
)))
1941 /* Verify request has not been canceled */
1942 return (wr
->status
== IB_WC_SUCCESS
) ? wr
: NULL
;
1947 void ib_mark_mad_done(struct ib_mad_send_wr_private
*mad_send_wr
)
1949 mad_send_wr
->timeout
= 0;
1950 if (mad_send_wr
->refcount
== 1)
1951 list_move_tail(&mad_send_wr
->agent_list
,
1952 &mad_send_wr
->mad_agent_priv
->done_list
);
1955 static void ib_mad_complete_recv(struct ib_mad_agent_private
*mad_agent_priv
,
1956 struct ib_mad_recv_wc
*mad_recv_wc
)
1958 struct ib_mad_send_wr_private
*mad_send_wr
;
1959 struct ib_mad_send_wc mad_send_wc
;
1960 unsigned long flags
;
1962 INIT_LIST_HEAD(&mad_recv_wc
->rmpp_list
);
1963 list_add(&mad_recv_wc
->recv_buf
.list
, &mad_recv_wc
->rmpp_list
);
1964 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)) {
1965 mad_recv_wc
= ib_process_rmpp_recv_wc(mad_agent_priv
,
1968 deref_mad_agent(mad_agent_priv
);
1973 /* Complete corresponding request */
1974 if (ib_response_mad(&mad_recv_wc
->recv_buf
.mad
->mad_hdr
)) {
1975 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1976 mad_send_wr
= ib_find_send_mad(mad_agent_priv
, mad_recv_wc
);
1978 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1979 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)
1980 && ib_is_mad_class_rmpp(mad_recv_wc
->recv_buf
.mad
->mad_hdr
.mgmt_class
)
1981 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad
*)mad_recv_wc
->recv_buf
.mad
)->rmpp_hdr
)
1982 & IB_MGMT_RMPP_FLAG_ACTIVE
)) {
1983 /* user rmpp is in effect
1984 * and this is an active RMPP MAD
1986 mad_agent_priv
->agent
.recv_handler(
1987 &mad_agent_priv
->agent
, NULL
,
1989 atomic_dec(&mad_agent_priv
->refcount
);
1991 /* not user rmpp, revert to normal behavior and
1993 ib_free_recv_mad(mad_recv_wc
);
1994 deref_mad_agent(mad_agent_priv
);
1998 ib_mark_mad_done(mad_send_wr
);
1999 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2001 /* Defined behavior is to complete response before request */
2002 mad_agent_priv
->agent
.recv_handler(
2003 &mad_agent_priv
->agent
,
2004 &mad_send_wr
->send_buf
,
2006 atomic_dec(&mad_agent_priv
->refcount
);
2008 mad_send_wc
.status
= IB_WC_SUCCESS
;
2009 mad_send_wc
.vendor_err
= 0;
2010 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2011 ib_mad_complete_send_wr(mad_send_wr
, &mad_send_wc
);
2014 mad_agent_priv
->agent
.recv_handler(&mad_agent_priv
->agent
, NULL
,
2016 deref_mad_agent(mad_agent_priv
);
2020 static enum smi_action
handle_ib_smi(const struct ib_mad_port_private
*port_priv
,
2021 const struct ib_mad_qp_info
*qp_info
,
2022 const struct ib_wc
*wc
,
2024 struct ib_mad_private
*recv
,
2025 struct ib_mad_private
*response
)
2027 enum smi_forward_action retsmi
;
2028 struct ib_smp
*smp
= (struct ib_smp
*)recv
->mad
;
2030 if (smi_handle_dr_smp_recv(smp
,
2031 rdma_cap_ib_switch(port_priv
->device
),
2033 port_priv
->device
->phys_port_cnt
) ==
2035 return IB_SMI_DISCARD
;
2037 retsmi
= smi_check_forward_dr_smp(smp
);
2038 if (retsmi
== IB_SMI_LOCAL
)
2039 return IB_SMI_HANDLE
;
2041 if (retsmi
== IB_SMI_SEND
) { /* don't forward */
2042 if (smi_handle_dr_smp_send(smp
,
2043 rdma_cap_ib_switch(port_priv
->device
),
2044 port_num
) == IB_SMI_DISCARD
)
2045 return IB_SMI_DISCARD
;
2047 if (smi_check_local_smp(smp
, port_priv
->device
) == IB_SMI_DISCARD
)
2048 return IB_SMI_DISCARD
;
2049 } else if (rdma_cap_ib_switch(port_priv
->device
)) {
2050 /* forward case for switches */
2051 memcpy(response
, recv
, mad_priv_size(response
));
2052 response
->header
.recv_wc
.wc
= &response
->header
.wc
;
2053 response
->header
.recv_wc
.recv_buf
.mad
= (struct ib_mad
*)response
->mad
;
2054 response
->header
.recv_wc
.recv_buf
.grh
= &response
->grh
;
2056 agent_send_response((const struct ib_mad_hdr
*)response
->mad
,
2059 smi_get_fwd_port(smp
),
2060 qp_info
->qp
->qp_num
,
2064 return IB_SMI_DISCARD
;
2066 return IB_SMI_HANDLE
;
2069 static bool generate_unmatched_resp(const struct ib_mad_private
*recv
,
2070 struct ib_mad_private
*response
,
2071 size_t *resp_len
, bool opa
)
2073 const struct ib_mad_hdr
*recv_hdr
= (const struct ib_mad_hdr
*)recv
->mad
;
2074 struct ib_mad_hdr
*resp_hdr
= (struct ib_mad_hdr
*)response
->mad
;
2076 if (recv_hdr
->method
== IB_MGMT_METHOD_GET
||
2077 recv_hdr
->method
== IB_MGMT_METHOD_SET
) {
2078 memcpy(response
, recv
, mad_priv_size(response
));
2079 response
->header
.recv_wc
.wc
= &response
->header
.wc
;
2080 response
->header
.recv_wc
.recv_buf
.mad
= (struct ib_mad
*)response
->mad
;
2081 response
->header
.recv_wc
.recv_buf
.grh
= &response
->grh
;
2082 resp_hdr
->method
= IB_MGMT_METHOD_GET_RESP
;
2083 resp_hdr
->status
= cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB
);
2084 if (recv_hdr
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
2085 resp_hdr
->status
|= IB_SMP_DIRECTION
;
2087 if (opa
&& recv_hdr
->base_version
== OPA_MGMT_BASE_VERSION
) {
2088 if (recv_hdr
->mgmt_class
==
2089 IB_MGMT_CLASS_SUBN_LID_ROUTED
||
2090 recv_hdr
->mgmt_class
==
2091 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
2092 *resp_len
= opa_get_smp_header_size(
2093 (struct opa_smp
*)recv
->mad
);
2095 *resp_len
= sizeof(struct ib_mad_hdr
);
2104 static enum smi_action
2105 handle_opa_smi(struct ib_mad_port_private
*port_priv
,
2106 struct ib_mad_qp_info
*qp_info
,
2109 struct ib_mad_private
*recv
,
2110 struct ib_mad_private
*response
)
2112 enum smi_forward_action retsmi
;
2113 struct opa_smp
*smp
= (struct opa_smp
*)recv
->mad
;
2115 if (opa_smi_handle_dr_smp_recv(smp
,
2116 rdma_cap_ib_switch(port_priv
->device
),
2118 port_priv
->device
->phys_port_cnt
) ==
2120 return IB_SMI_DISCARD
;
2122 retsmi
= opa_smi_check_forward_dr_smp(smp
);
2123 if (retsmi
== IB_SMI_LOCAL
)
2124 return IB_SMI_HANDLE
;
2126 if (retsmi
== IB_SMI_SEND
) { /* don't forward */
2127 if (opa_smi_handle_dr_smp_send(smp
,
2128 rdma_cap_ib_switch(port_priv
->device
),
2129 port_num
) == IB_SMI_DISCARD
)
2130 return IB_SMI_DISCARD
;
2132 if (opa_smi_check_local_smp(smp
, port_priv
->device
) ==
2134 return IB_SMI_DISCARD
;
2136 } else if (rdma_cap_ib_switch(port_priv
->device
)) {
2137 /* forward case for switches */
2138 memcpy(response
, recv
, mad_priv_size(response
));
2139 response
->header
.recv_wc
.wc
= &response
->header
.wc
;
2140 response
->header
.recv_wc
.recv_buf
.opa_mad
=
2141 (struct opa_mad
*)response
->mad
;
2142 response
->header
.recv_wc
.recv_buf
.grh
= &response
->grh
;
2144 agent_send_response((const struct ib_mad_hdr
*)response
->mad
,
2147 opa_smi_get_fwd_port(smp
),
2148 qp_info
->qp
->qp_num
,
2149 recv
->header
.wc
.byte_len
,
2152 return IB_SMI_DISCARD
;
2155 return IB_SMI_HANDLE
;
2158 static enum smi_action
2159 handle_smi(struct ib_mad_port_private
*port_priv
,
2160 struct ib_mad_qp_info
*qp_info
,
2163 struct ib_mad_private
*recv
,
2164 struct ib_mad_private
*response
,
2167 struct ib_mad_hdr
*mad_hdr
= (struct ib_mad_hdr
*)recv
->mad
;
2169 if (opa
&& mad_hdr
->base_version
== OPA_MGMT_BASE_VERSION
&&
2170 mad_hdr
->class_version
== OPA_SMI_CLASS_VERSION
)
2171 return handle_opa_smi(port_priv
, qp_info
, wc
, port_num
, recv
,
2174 return handle_ib_smi(port_priv
, qp_info
, wc
, port_num
, recv
, response
);
2177 static void ib_mad_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
2179 struct ib_mad_port_private
*port_priv
= cq
->cq_context
;
2180 struct ib_mad_list_head
*mad_list
=
2181 container_of(wc
->wr_cqe
, struct ib_mad_list_head
, cqe
);
2182 struct ib_mad_qp_info
*qp_info
;
2183 struct ib_mad_private_header
*mad_priv_hdr
;
2184 struct ib_mad_private
*recv
, *response
= NULL
;
2185 struct ib_mad_agent_private
*mad_agent
;
2187 int ret
= IB_MAD_RESULT_SUCCESS
;
2189 u16 resp_mad_pkey_index
= 0;
2192 if (list_empty_careful(&port_priv
->port_list
))
2195 if (wc
->status
!= IB_WC_SUCCESS
) {
2197 * Receive errors indicate that the QP has entered the error
2198 * state - error handling/shutdown code will cleanup
2203 qp_info
= mad_list
->mad_queue
->qp_info
;
2204 dequeue_mad(mad_list
);
2206 opa
= rdma_cap_opa_mad(qp_info
->port_priv
->device
,
2207 qp_info
->port_priv
->port_num
);
2209 mad_priv_hdr
= container_of(mad_list
, struct ib_mad_private_header
,
2211 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
, header
);
2212 ib_dma_unmap_single(port_priv
->device
,
2213 recv
->header
.mapping
,
2214 mad_priv_dma_size(recv
),
2217 /* Setup MAD receive work completion from "normal" work completion */
2218 recv
->header
.wc
= *wc
;
2219 recv
->header
.recv_wc
.wc
= &recv
->header
.wc
;
2221 if (opa
&& ((struct ib_mad_hdr
*)(recv
->mad
))->base_version
== OPA_MGMT_BASE_VERSION
) {
2222 recv
->header
.recv_wc
.mad_len
= wc
->byte_len
- sizeof(struct ib_grh
);
2223 recv
->header
.recv_wc
.mad_seg_size
= sizeof(struct opa_mad
);
2225 recv
->header
.recv_wc
.mad_len
= sizeof(struct ib_mad
);
2226 recv
->header
.recv_wc
.mad_seg_size
= sizeof(struct ib_mad
);
2229 recv
->header
.recv_wc
.recv_buf
.mad
= (struct ib_mad
*)recv
->mad
;
2230 recv
->header
.recv_wc
.recv_buf
.grh
= &recv
->grh
;
2232 if (atomic_read(&qp_info
->snoop_count
))
2233 snoop_recv(qp_info
, &recv
->header
.recv_wc
, IB_MAD_SNOOP_RECVS
);
2236 if (!validate_mad((const struct ib_mad_hdr
*)recv
->mad
, qp_info
, opa
))
2239 mad_size
= recv
->mad_size
;
2240 response
= alloc_mad_private(mad_size
, GFP_KERNEL
);
2242 dev_err(&port_priv
->device
->dev
,
2243 "%s: no memory for response buffer\n", __func__
);
2247 if (rdma_cap_ib_switch(port_priv
->device
))
2248 port_num
= wc
->port_num
;
2250 port_num
= port_priv
->port_num
;
2252 if (((struct ib_mad_hdr
*)recv
->mad
)->mgmt_class
==
2253 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
2254 if (handle_smi(port_priv
, qp_info
, wc
, port_num
, recv
,
2260 /* Give driver "right of first refusal" on incoming MAD */
2261 if (port_priv
->device
->process_mad
) {
2262 ret
= port_priv
->device
->process_mad(port_priv
->device
, 0,
2263 port_priv
->port_num
,
2265 (const struct ib_mad_hdr
*)recv
->mad
,
2267 (struct ib_mad_hdr
*)response
->mad
,
2268 &mad_size
, &resp_mad_pkey_index
);
2271 wc
->pkey_index
= resp_mad_pkey_index
;
2273 if (ret
& IB_MAD_RESULT_SUCCESS
) {
2274 if (ret
& IB_MAD_RESULT_CONSUMED
)
2276 if (ret
& IB_MAD_RESULT_REPLY
) {
2277 agent_send_response((const struct ib_mad_hdr
*)response
->mad
,
2281 qp_info
->qp
->qp_num
,
2288 mad_agent
= find_mad_agent(port_priv
, (const struct ib_mad_hdr
*)recv
->mad
);
2290 ib_mad_complete_recv(mad_agent
, &recv
->header
.recv_wc
);
2292 * recv is freed up in error cases in ib_mad_complete_recv
2293 * or via recv_handler in ib_mad_complete_recv()
2296 } else if ((ret
& IB_MAD_RESULT_SUCCESS
) &&
2297 generate_unmatched_resp(recv
, response
, &mad_size
, opa
)) {
2298 agent_send_response((const struct ib_mad_hdr
*)response
->mad
, &recv
->grh
, wc
,
2299 port_priv
->device
, port_num
,
2300 qp_info
->qp
->qp_num
, mad_size
, opa
);
2304 /* Post another receive request for this QP */
2306 ib_mad_post_receive_mads(qp_info
, response
);
2309 ib_mad_post_receive_mads(qp_info
, recv
);
2312 static void adjust_timeout(struct ib_mad_agent_private
*mad_agent_priv
)
2314 struct ib_mad_send_wr_private
*mad_send_wr
;
2315 unsigned long delay
;
2317 if (list_empty(&mad_agent_priv
->wait_list
)) {
2318 cancel_delayed_work(&mad_agent_priv
->timed_work
);
2320 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
2321 struct ib_mad_send_wr_private
,
2324 if (time_after(mad_agent_priv
->timeout
,
2325 mad_send_wr
->timeout
)) {
2326 mad_agent_priv
->timeout
= mad_send_wr
->timeout
;
2327 delay
= mad_send_wr
->timeout
- jiffies
;
2328 if ((long)delay
<= 0)
2330 mod_delayed_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
2331 &mad_agent_priv
->timed_work
, delay
);
2336 static void wait_for_response(struct ib_mad_send_wr_private
*mad_send_wr
)
2338 struct ib_mad_agent_private
*mad_agent_priv
;
2339 struct ib_mad_send_wr_private
*temp_mad_send_wr
;
2340 struct list_head
*list_item
;
2341 unsigned long delay
;
2343 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
2344 list_del(&mad_send_wr
->agent_list
);
2346 delay
= mad_send_wr
->timeout
;
2347 mad_send_wr
->timeout
+= jiffies
;
2350 list_for_each_prev(list_item
, &mad_agent_priv
->wait_list
) {
2351 temp_mad_send_wr
= list_entry(list_item
,
2352 struct ib_mad_send_wr_private
,
2354 if (time_after(mad_send_wr
->timeout
,
2355 temp_mad_send_wr
->timeout
))
2360 list_item
= &mad_agent_priv
->wait_list
;
2361 list_add(&mad_send_wr
->agent_list
, list_item
);
2363 /* Reschedule a work item if we have a shorter timeout */
2364 if (mad_agent_priv
->wait_list
.next
== &mad_send_wr
->agent_list
)
2365 mod_delayed_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
2366 &mad_agent_priv
->timed_work
, delay
);
2369 void ib_reset_mad_timeout(struct ib_mad_send_wr_private
*mad_send_wr
,
2372 mad_send_wr
->timeout
= msecs_to_jiffies(timeout_ms
);
2373 wait_for_response(mad_send_wr
);
2377 * Process a send work completion
2379 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private
*mad_send_wr
,
2380 struct ib_mad_send_wc
*mad_send_wc
)
2382 struct ib_mad_agent_private
*mad_agent_priv
;
2383 unsigned long flags
;
2386 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
2387 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2388 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)) {
2389 ret
= ib_process_rmpp_send_wc(mad_send_wr
, mad_send_wc
);
2390 if (ret
== IB_RMPP_RESULT_CONSUMED
)
2393 ret
= IB_RMPP_RESULT_UNHANDLED
;
2395 if (mad_send_wc
->status
!= IB_WC_SUCCESS
&&
2396 mad_send_wr
->status
== IB_WC_SUCCESS
) {
2397 mad_send_wr
->status
= mad_send_wc
->status
;
2398 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2401 if (--mad_send_wr
->refcount
> 0) {
2402 if (mad_send_wr
->refcount
== 1 && mad_send_wr
->timeout
&&
2403 mad_send_wr
->status
== IB_WC_SUCCESS
) {
2404 wait_for_response(mad_send_wr
);
2409 /* Remove send from MAD agent and notify client of completion */
2410 list_del(&mad_send_wr
->agent_list
);
2411 adjust_timeout(mad_agent_priv
);
2412 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2414 if (mad_send_wr
->status
!= IB_WC_SUCCESS
)
2415 mad_send_wc
->status
= mad_send_wr
->status
;
2416 if (ret
== IB_RMPP_RESULT_INTERNAL
)
2417 ib_rmpp_send_handler(mad_send_wc
);
2419 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2422 /* Release reference on agent taken when sending */
2423 deref_mad_agent(mad_agent_priv
);
2426 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2429 static void ib_mad_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
2431 struct ib_mad_port_private
*port_priv
= cq
->cq_context
;
2432 struct ib_mad_list_head
*mad_list
=
2433 container_of(wc
->wr_cqe
, struct ib_mad_list_head
, cqe
);
2434 struct ib_mad_send_wr_private
*mad_send_wr
, *queued_send_wr
;
2435 struct ib_mad_qp_info
*qp_info
;
2436 struct ib_mad_queue
*send_queue
;
2437 struct ib_send_wr
*bad_send_wr
;
2438 struct ib_mad_send_wc mad_send_wc
;
2439 unsigned long flags
;
2442 if (list_empty_careful(&port_priv
->port_list
))
2445 if (wc
->status
!= IB_WC_SUCCESS
) {
2446 if (!ib_mad_send_error(port_priv
, wc
))
2450 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
2452 send_queue
= mad_list
->mad_queue
;
2453 qp_info
= send_queue
->qp_info
;
2456 ib_dma_unmap_single(mad_send_wr
->send_buf
.mad_agent
->device
,
2457 mad_send_wr
->header_mapping
,
2458 mad_send_wr
->sg_list
[0].length
, DMA_TO_DEVICE
);
2459 ib_dma_unmap_single(mad_send_wr
->send_buf
.mad_agent
->device
,
2460 mad_send_wr
->payload_mapping
,
2461 mad_send_wr
->sg_list
[1].length
, DMA_TO_DEVICE
);
2462 queued_send_wr
= NULL
;
2463 spin_lock_irqsave(&send_queue
->lock
, flags
);
2464 list_del(&mad_list
->list
);
2466 /* Move queued send to the send queue */
2467 if (send_queue
->count
-- > send_queue
->max_active
) {
2468 mad_list
= container_of(qp_info
->overflow_list
.next
,
2469 struct ib_mad_list_head
, list
);
2470 queued_send_wr
= container_of(mad_list
,
2471 struct ib_mad_send_wr_private
,
2473 list_move_tail(&mad_list
->list
, &send_queue
->list
);
2475 spin_unlock_irqrestore(&send_queue
->lock
, flags
);
2477 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2478 mad_send_wc
.status
= wc
->status
;
2479 mad_send_wc
.vendor_err
= wc
->vendor_err
;
2480 if (atomic_read(&qp_info
->snoop_count
))
2481 snoop_send(qp_info
, &mad_send_wr
->send_buf
, &mad_send_wc
,
2482 IB_MAD_SNOOP_SEND_COMPLETIONS
);
2483 ib_mad_complete_send_wr(mad_send_wr
, &mad_send_wc
);
2485 if (queued_send_wr
) {
2486 ret
= ib_post_send(qp_info
->qp
, &queued_send_wr
->send_wr
.wr
,
2489 dev_err(&port_priv
->device
->dev
,
2490 "ib_post_send failed: %d\n", ret
);
2491 mad_send_wr
= queued_send_wr
;
2492 wc
->status
= IB_WC_LOC_QP_OP_ERR
;
2498 static void mark_sends_for_retry(struct ib_mad_qp_info
*qp_info
)
2500 struct ib_mad_send_wr_private
*mad_send_wr
;
2501 struct ib_mad_list_head
*mad_list
;
2502 unsigned long flags
;
2504 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
2505 list_for_each_entry(mad_list
, &qp_info
->send_queue
.list
, list
) {
2506 mad_send_wr
= container_of(mad_list
,
2507 struct ib_mad_send_wr_private
,
2509 mad_send_wr
->retry
= 1;
2511 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
2514 static bool ib_mad_send_error(struct ib_mad_port_private
*port_priv
,
2517 struct ib_mad_list_head
*mad_list
=
2518 container_of(wc
->wr_cqe
, struct ib_mad_list_head
, cqe
);
2519 struct ib_mad_qp_info
*qp_info
= mad_list
->mad_queue
->qp_info
;
2520 struct ib_mad_send_wr_private
*mad_send_wr
;
2524 * Send errors will transition the QP to SQE - move
2525 * QP to RTS and repost flushed work requests
2527 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
2529 if (wc
->status
== IB_WC_WR_FLUSH_ERR
) {
2530 if (mad_send_wr
->retry
) {
2532 struct ib_send_wr
*bad_send_wr
;
2534 mad_send_wr
->retry
= 0;
2535 ret
= ib_post_send(qp_info
->qp
, &mad_send_wr
->send_wr
.wr
,
2541 struct ib_qp_attr
*attr
;
2543 /* Transition QP to RTS and fail offending send */
2544 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2546 attr
->qp_state
= IB_QPS_RTS
;
2547 attr
->cur_qp_state
= IB_QPS_SQE
;
2548 ret
= ib_modify_qp(qp_info
->qp
, attr
,
2549 IB_QP_STATE
| IB_QP_CUR_STATE
);
2552 dev_err(&port_priv
->device
->dev
,
2553 "%s - ib_modify_qp to RTS: %d\n",
2556 mark_sends_for_retry(qp_info
);
2563 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
)
2565 unsigned long flags
;
2566 struct ib_mad_send_wr_private
*mad_send_wr
, *temp_mad_send_wr
;
2567 struct ib_mad_send_wc mad_send_wc
;
2568 struct list_head cancel_list
;
2570 INIT_LIST_HEAD(&cancel_list
);
2572 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2573 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
2574 &mad_agent_priv
->send_list
, agent_list
) {
2575 if (mad_send_wr
->status
== IB_WC_SUCCESS
) {
2576 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
2577 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2581 /* Empty wait list to prevent receives from finding a request */
2582 list_splice_init(&mad_agent_priv
->wait_list
, &cancel_list
);
2583 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2585 /* Report all cancelled requests */
2586 mad_send_wc
.status
= IB_WC_WR_FLUSH_ERR
;
2587 mad_send_wc
.vendor_err
= 0;
2589 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
2590 &cancel_list
, agent_list
) {
2591 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2592 list_del(&mad_send_wr
->agent_list
);
2593 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2595 atomic_dec(&mad_agent_priv
->refcount
);
2599 static struct ib_mad_send_wr_private
*
2600 find_send_wr(struct ib_mad_agent_private
*mad_agent_priv
,
2601 struct ib_mad_send_buf
*send_buf
)
2603 struct ib_mad_send_wr_private
*mad_send_wr
;
2605 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->wait_list
,
2607 if (&mad_send_wr
->send_buf
== send_buf
)
2611 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->send_list
,
2613 if (is_rmpp_data_mad(mad_agent_priv
,
2614 mad_send_wr
->send_buf
.mad
) &&
2615 &mad_send_wr
->send_buf
== send_buf
)
2621 int ib_modify_mad(struct ib_mad_agent
*mad_agent
,
2622 struct ib_mad_send_buf
*send_buf
, u32 timeout_ms
)
2624 struct ib_mad_agent_private
*mad_agent_priv
;
2625 struct ib_mad_send_wr_private
*mad_send_wr
;
2626 unsigned long flags
;
2629 mad_agent_priv
= container_of(mad_agent
, struct ib_mad_agent_private
,
2631 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2632 mad_send_wr
= find_send_wr(mad_agent_priv
, send_buf
);
2633 if (!mad_send_wr
|| mad_send_wr
->status
!= IB_WC_SUCCESS
) {
2634 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2638 active
= (!mad_send_wr
->timeout
|| mad_send_wr
->refcount
> 1);
2640 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
2641 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2644 mad_send_wr
->send_buf
.timeout_ms
= timeout_ms
;
2646 mad_send_wr
->timeout
= msecs_to_jiffies(timeout_ms
);
2648 ib_reset_mad_timeout(mad_send_wr
, timeout_ms
);
2650 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2653 EXPORT_SYMBOL(ib_modify_mad
);
2655 void ib_cancel_mad(struct ib_mad_agent
*mad_agent
,
2656 struct ib_mad_send_buf
*send_buf
)
2658 ib_modify_mad(mad_agent
, send_buf
, 0);
2660 EXPORT_SYMBOL(ib_cancel_mad
);
2662 static void local_completions(struct work_struct
*work
)
2664 struct ib_mad_agent_private
*mad_agent_priv
;
2665 struct ib_mad_local_private
*local
;
2666 struct ib_mad_agent_private
*recv_mad_agent
;
2667 unsigned long flags
;
2670 struct ib_mad_send_wc mad_send_wc
;
2674 container_of(work
, struct ib_mad_agent_private
, local_work
);
2676 opa
= rdma_cap_opa_mad(mad_agent_priv
->qp_info
->port_priv
->device
,
2677 mad_agent_priv
->qp_info
->port_priv
->port_num
);
2679 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2680 while (!list_empty(&mad_agent_priv
->local_list
)) {
2681 local
= list_entry(mad_agent_priv
->local_list
.next
,
2682 struct ib_mad_local_private
,
2684 list_del(&local
->completion_list
);
2685 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2687 if (local
->mad_priv
) {
2689 recv_mad_agent
= local
->recv_mad_agent
;
2690 if (!recv_mad_agent
) {
2691 dev_err(&mad_agent_priv
->agent
.device
->dev
,
2692 "No receive MAD agent for local completion\n");
2694 goto local_send_completion
;
2698 * Defined behavior is to complete response
2701 build_smp_wc(recv_mad_agent
->agent
.qp
,
2702 local
->mad_send_wr
->send_wr
.wr
.wr_cqe
,
2703 be16_to_cpu(IB_LID_PERMISSIVE
),
2704 local
->mad_send_wr
->send_wr
.pkey_index
,
2705 recv_mad_agent
->agent
.port_num
, &wc
);
2707 local
->mad_priv
->header
.recv_wc
.wc
= &wc
;
2709 base_version
= ((struct ib_mad_hdr
*)(local
->mad_priv
->mad
))->base_version
;
2710 if (opa
&& base_version
== OPA_MGMT_BASE_VERSION
) {
2711 local
->mad_priv
->header
.recv_wc
.mad_len
= local
->return_wc_byte_len
;
2712 local
->mad_priv
->header
.recv_wc
.mad_seg_size
= sizeof(struct opa_mad
);
2714 local
->mad_priv
->header
.recv_wc
.mad_len
= sizeof(struct ib_mad
);
2715 local
->mad_priv
->header
.recv_wc
.mad_seg_size
= sizeof(struct ib_mad
);
2718 INIT_LIST_HEAD(&local
->mad_priv
->header
.recv_wc
.rmpp_list
);
2719 list_add(&local
->mad_priv
->header
.recv_wc
.recv_buf
.list
,
2720 &local
->mad_priv
->header
.recv_wc
.rmpp_list
);
2721 local
->mad_priv
->header
.recv_wc
.recv_buf
.grh
= NULL
;
2722 local
->mad_priv
->header
.recv_wc
.recv_buf
.mad
=
2723 (struct ib_mad
*)local
->mad_priv
->mad
;
2724 if (atomic_read(&recv_mad_agent
->qp_info
->snoop_count
))
2725 snoop_recv(recv_mad_agent
->qp_info
,
2726 &local
->mad_priv
->header
.recv_wc
,
2727 IB_MAD_SNOOP_RECVS
);
2728 recv_mad_agent
->agent
.recv_handler(
2729 &recv_mad_agent
->agent
,
2730 &local
->mad_send_wr
->send_buf
,
2731 &local
->mad_priv
->header
.recv_wc
);
2732 spin_lock_irqsave(&recv_mad_agent
->lock
, flags
);
2733 atomic_dec(&recv_mad_agent
->refcount
);
2734 spin_unlock_irqrestore(&recv_mad_agent
->lock
, flags
);
2737 local_send_completion
:
2739 mad_send_wc
.status
= IB_WC_SUCCESS
;
2740 mad_send_wc
.vendor_err
= 0;
2741 mad_send_wc
.send_buf
= &local
->mad_send_wr
->send_buf
;
2742 if (atomic_read(&mad_agent_priv
->qp_info
->snoop_count
))
2743 snoop_send(mad_agent_priv
->qp_info
,
2744 &local
->mad_send_wr
->send_buf
,
2745 &mad_send_wc
, IB_MAD_SNOOP_SEND_COMPLETIONS
);
2746 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2749 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2750 atomic_dec(&mad_agent_priv
->refcount
);
2752 kfree(local
->mad_priv
);
2755 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2758 static int retry_send(struct ib_mad_send_wr_private
*mad_send_wr
)
2762 if (!mad_send_wr
->retries_left
)
2765 mad_send_wr
->retries_left
--;
2766 mad_send_wr
->send_buf
.retries
++;
2768 mad_send_wr
->timeout
= msecs_to_jiffies(mad_send_wr
->send_buf
.timeout_ms
);
2770 if (ib_mad_kernel_rmpp_agent(&mad_send_wr
->mad_agent_priv
->agent
)) {
2771 ret
= ib_retry_rmpp(mad_send_wr
);
2773 case IB_RMPP_RESULT_UNHANDLED
:
2774 ret
= ib_send_mad(mad_send_wr
);
2776 case IB_RMPP_RESULT_CONSUMED
:
2784 ret
= ib_send_mad(mad_send_wr
);
2787 mad_send_wr
->refcount
++;
2788 list_add_tail(&mad_send_wr
->agent_list
,
2789 &mad_send_wr
->mad_agent_priv
->send_list
);
2794 static void timeout_sends(struct work_struct
*work
)
2796 struct ib_mad_agent_private
*mad_agent_priv
;
2797 struct ib_mad_send_wr_private
*mad_send_wr
;
2798 struct ib_mad_send_wc mad_send_wc
;
2799 unsigned long flags
, delay
;
2801 mad_agent_priv
= container_of(work
, struct ib_mad_agent_private
,
2803 mad_send_wc
.vendor_err
= 0;
2805 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2806 while (!list_empty(&mad_agent_priv
->wait_list
)) {
2807 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
2808 struct ib_mad_send_wr_private
,
2811 if (time_after(mad_send_wr
->timeout
, jiffies
)) {
2812 delay
= mad_send_wr
->timeout
- jiffies
;
2813 if ((long)delay
<= 0)
2815 queue_delayed_work(mad_agent_priv
->qp_info
->
2817 &mad_agent_priv
->timed_work
, delay
);
2821 list_del(&mad_send_wr
->agent_list
);
2822 if (mad_send_wr
->status
== IB_WC_SUCCESS
&&
2823 !retry_send(mad_send_wr
))
2826 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2828 if (mad_send_wr
->status
== IB_WC_SUCCESS
)
2829 mad_send_wc
.status
= IB_WC_RESP_TIMEOUT_ERR
;
2831 mad_send_wc
.status
= mad_send_wr
->status
;
2832 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2833 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2836 atomic_dec(&mad_agent_priv
->refcount
);
2837 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2839 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2843 * Allocate receive MADs and post receive WRs for them
2845 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
2846 struct ib_mad_private
*mad
)
2848 unsigned long flags
;
2850 struct ib_mad_private
*mad_priv
;
2851 struct ib_sge sg_list
;
2852 struct ib_recv_wr recv_wr
, *bad_recv_wr
;
2853 struct ib_mad_queue
*recv_queue
= &qp_info
->recv_queue
;
2855 /* Initialize common scatter list fields */
2856 sg_list
.lkey
= qp_info
->port_priv
->pd
->local_dma_lkey
;
2858 /* Initialize common receive WR fields */
2859 recv_wr
.next
= NULL
;
2860 recv_wr
.sg_list
= &sg_list
;
2861 recv_wr
.num_sge
= 1;
2864 /* Allocate and map receive buffer */
2869 mad_priv
= alloc_mad_private(port_mad_size(qp_info
->port_priv
),
2872 dev_err(&qp_info
->port_priv
->device
->dev
,
2873 "No memory for receive buffer\n");
2878 sg_list
.length
= mad_priv_dma_size(mad_priv
);
2879 sg_list
.addr
= ib_dma_map_single(qp_info
->port_priv
->device
,
2881 mad_priv_dma_size(mad_priv
),
2883 if (unlikely(ib_dma_mapping_error(qp_info
->port_priv
->device
,
2888 mad_priv
->header
.mapping
= sg_list
.addr
;
2889 mad_priv
->header
.mad_list
.mad_queue
= recv_queue
;
2890 mad_priv
->header
.mad_list
.cqe
.done
= ib_mad_recv_done
;
2891 recv_wr
.wr_cqe
= &mad_priv
->header
.mad_list
.cqe
;
2893 /* Post receive WR */
2894 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2895 post
= (++recv_queue
->count
< recv_queue
->max_active
);
2896 list_add_tail(&mad_priv
->header
.mad_list
.list
, &recv_queue
->list
);
2897 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2898 ret
= ib_post_recv(qp_info
->qp
, &recv_wr
, &bad_recv_wr
);
2900 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2901 list_del(&mad_priv
->header
.mad_list
.list
);
2902 recv_queue
->count
--;
2903 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2904 ib_dma_unmap_single(qp_info
->port_priv
->device
,
2905 mad_priv
->header
.mapping
,
2906 mad_priv_dma_size(mad_priv
),
2909 dev_err(&qp_info
->port_priv
->device
->dev
,
2910 "ib_post_recv failed: %d\n", ret
);
2919 * Return all the posted receive MADs
2921 static void cleanup_recv_queue(struct ib_mad_qp_info
*qp_info
)
2923 struct ib_mad_private_header
*mad_priv_hdr
;
2924 struct ib_mad_private
*recv
;
2925 struct ib_mad_list_head
*mad_list
;
2930 while (!list_empty(&qp_info
->recv_queue
.list
)) {
2932 mad_list
= list_entry(qp_info
->recv_queue
.list
.next
,
2933 struct ib_mad_list_head
, list
);
2934 mad_priv_hdr
= container_of(mad_list
,
2935 struct ib_mad_private_header
,
2937 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
2940 /* Remove from posted receive MAD list */
2941 list_del(&mad_list
->list
);
2943 ib_dma_unmap_single(qp_info
->port_priv
->device
,
2944 recv
->header
.mapping
,
2945 mad_priv_dma_size(recv
),
2950 qp_info
->recv_queue
.count
= 0;
2956 static int ib_mad_port_start(struct ib_mad_port_private
*port_priv
)
2959 struct ib_qp_attr
*attr
;
2963 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2965 dev_err(&port_priv
->device
->dev
,
2966 "Couldn't kmalloc ib_qp_attr\n");
2970 ret
= ib_find_pkey(port_priv
->device
, port_priv
->port_num
,
2971 IB_DEFAULT_PKEY_FULL
, &pkey_index
);
2975 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
2976 qp
= port_priv
->qp_info
[i
].qp
;
2981 * PKey index for QP1 is irrelevant but
2982 * one is needed for the Reset to Init transition
2984 attr
->qp_state
= IB_QPS_INIT
;
2985 attr
->pkey_index
= pkey_index
;
2986 attr
->qkey
= (qp
->qp_num
== 0) ? 0 : IB_QP1_QKEY
;
2987 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
|
2988 IB_QP_PKEY_INDEX
| IB_QP_QKEY
);
2990 dev_err(&port_priv
->device
->dev
,
2991 "Couldn't change QP%d state to INIT: %d\n",
2996 attr
->qp_state
= IB_QPS_RTR
;
2997 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
);
2999 dev_err(&port_priv
->device
->dev
,
3000 "Couldn't change QP%d state to RTR: %d\n",
3005 attr
->qp_state
= IB_QPS_RTS
;
3006 attr
->sq_psn
= IB_MAD_SEND_Q_PSN
;
3007 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
3009 dev_err(&port_priv
->device
->dev
,
3010 "Couldn't change QP%d state to RTS: %d\n",
3016 ret
= ib_req_notify_cq(port_priv
->cq
, IB_CQ_NEXT_COMP
);
3018 dev_err(&port_priv
->device
->dev
,
3019 "Failed to request completion notification: %d\n",
3024 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
3025 if (!port_priv
->qp_info
[i
].qp
)
3028 ret
= ib_mad_post_receive_mads(&port_priv
->qp_info
[i
], NULL
);
3030 dev_err(&port_priv
->device
->dev
,
3031 "Couldn't post receive WRs\n");
3040 static void qp_event_handler(struct ib_event
*event
, void *qp_context
)
3042 struct ib_mad_qp_info
*qp_info
= qp_context
;
3044 /* It's worse than that! He's dead, Jim! */
3045 dev_err(&qp_info
->port_priv
->device
->dev
,
3046 "Fatal error (%d) on MAD QP (%d)\n",
3047 event
->event
, qp_info
->qp
->qp_num
);
3050 static void init_mad_queue(struct ib_mad_qp_info
*qp_info
,
3051 struct ib_mad_queue
*mad_queue
)
3053 mad_queue
->qp_info
= qp_info
;
3054 mad_queue
->count
= 0;
3055 spin_lock_init(&mad_queue
->lock
);
3056 INIT_LIST_HEAD(&mad_queue
->list
);
3059 static void init_mad_qp(struct ib_mad_port_private
*port_priv
,
3060 struct ib_mad_qp_info
*qp_info
)
3062 qp_info
->port_priv
= port_priv
;
3063 init_mad_queue(qp_info
, &qp_info
->send_queue
);
3064 init_mad_queue(qp_info
, &qp_info
->recv_queue
);
3065 INIT_LIST_HEAD(&qp_info
->overflow_list
);
3066 spin_lock_init(&qp_info
->snoop_lock
);
3067 qp_info
->snoop_table
= NULL
;
3068 qp_info
->snoop_table_size
= 0;
3069 atomic_set(&qp_info
->snoop_count
, 0);
3072 static int create_mad_qp(struct ib_mad_qp_info
*qp_info
,
3073 enum ib_qp_type qp_type
)
3075 struct ib_qp_init_attr qp_init_attr
;
3078 memset(&qp_init_attr
, 0, sizeof qp_init_attr
);
3079 qp_init_attr
.send_cq
= qp_info
->port_priv
->cq
;
3080 qp_init_attr
.recv_cq
= qp_info
->port_priv
->cq
;
3081 qp_init_attr
.sq_sig_type
= IB_SIGNAL_ALL_WR
;
3082 qp_init_attr
.cap
.max_send_wr
= mad_sendq_size
;
3083 qp_init_attr
.cap
.max_recv_wr
= mad_recvq_size
;
3084 qp_init_attr
.cap
.max_send_sge
= IB_MAD_SEND_REQ_MAX_SG
;
3085 qp_init_attr
.cap
.max_recv_sge
= IB_MAD_RECV_REQ_MAX_SG
;
3086 qp_init_attr
.qp_type
= qp_type
;
3087 qp_init_attr
.port_num
= qp_info
->port_priv
->port_num
;
3088 qp_init_attr
.qp_context
= qp_info
;
3089 qp_init_attr
.event_handler
= qp_event_handler
;
3090 qp_info
->qp
= ib_create_qp(qp_info
->port_priv
->pd
, &qp_init_attr
);
3091 if (IS_ERR(qp_info
->qp
)) {
3092 dev_err(&qp_info
->port_priv
->device
->dev
,
3093 "Couldn't create ib_mad QP%d\n",
3094 get_spl_qp_index(qp_type
));
3095 ret
= PTR_ERR(qp_info
->qp
);
3098 /* Use minimum queue sizes unless the CQ is resized */
3099 qp_info
->send_queue
.max_active
= mad_sendq_size
;
3100 qp_info
->recv_queue
.max_active
= mad_recvq_size
;
3107 static void destroy_mad_qp(struct ib_mad_qp_info
*qp_info
)
3112 ib_destroy_qp(qp_info
->qp
);
3113 kfree(qp_info
->snoop_table
);
3118 * Create the QP, PD, MR, and CQ if needed
3120 static int ib_mad_port_open(struct ib_device
*device
,
3124 struct ib_mad_port_private
*port_priv
;
3125 unsigned long flags
;
3126 char name
[sizeof "ib_mad123"];
3129 if (WARN_ON(rdma_max_mad_size(device
, port_num
) < IB_MGMT_MAD_SIZE
))
3132 if (WARN_ON(rdma_cap_opa_mad(device
, port_num
) &&
3133 rdma_max_mad_size(device
, port_num
) < OPA_MGMT_MAD_SIZE
))
3136 /* Create new device info */
3137 port_priv
= kzalloc(sizeof *port_priv
, GFP_KERNEL
);
3139 dev_err(&device
->dev
, "No memory for ib_mad_port_private\n");
3143 port_priv
->device
= device
;
3144 port_priv
->port_num
= port_num
;
3145 spin_lock_init(&port_priv
->reg_lock
);
3146 INIT_LIST_HEAD(&port_priv
->agent_list
);
3147 init_mad_qp(port_priv
, &port_priv
->qp_info
[0]);
3148 init_mad_qp(port_priv
, &port_priv
->qp_info
[1]);
3150 cq_size
= mad_sendq_size
+ mad_recvq_size
;
3151 has_smi
= rdma_cap_ib_smi(device
, port_num
);
3155 port_priv
->cq
= ib_alloc_cq(port_priv
->device
, port_priv
, cq_size
, 0,
3157 if (IS_ERR(port_priv
->cq
)) {
3158 dev_err(&device
->dev
, "Couldn't create ib_mad CQ\n");
3159 ret
= PTR_ERR(port_priv
->cq
);
3163 port_priv
->pd
= ib_alloc_pd(device
, 0);
3164 if (IS_ERR(port_priv
->pd
)) {
3165 dev_err(&device
->dev
, "Couldn't create ib_mad PD\n");
3166 ret
= PTR_ERR(port_priv
->pd
);
3171 ret
= create_mad_qp(&port_priv
->qp_info
[0], IB_QPT_SMI
);
3175 ret
= create_mad_qp(&port_priv
->qp_info
[1], IB_QPT_GSI
);
3179 snprintf(name
, sizeof name
, "ib_mad%d", port_num
);
3180 port_priv
->wq
= alloc_ordered_workqueue(name
, WQ_MEM_RECLAIM
);
3181 if (!port_priv
->wq
) {
3186 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
3187 list_add_tail(&port_priv
->port_list
, &ib_mad_port_list
);
3188 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3190 ret
= ib_mad_port_start(port_priv
);
3192 dev_err(&device
->dev
, "Couldn't start port\n");
3199 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
3200 list_del_init(&port_priv
->port_list
);
3201 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3203 destroy_workqueue(port_priv
->wq
);
3205 destroy_mad_qp(&port_priv
->qp_info
[1]);
3207 destroy_mad_qp(&port_priv
->qp_info
[0]);
3209 ib_dealloc_pd(port_priv
->pd
);
3211 ib_free_cq(port_priv
->cq
);
3212 cleanup_recv_queue(&port_priv
->qp_info
[1]);
3213 cleanup_recv_queue(&port_priv
->qp_info
[0]);
3222 * If there are no classes using the port, free the port
3223 * resources (CQ, MR, PD, QP) and remove the port's info structure
3225 static int ib_mad_port_close(struct ib_device
*device
, int port_num
)
3227 struct ib_mad_port_private
*port_priv
;
3228 unsigned long flags
;
3230 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
3231 port_priv
= __ib_get_mad_port(device
, port_num
);
3232 if (port_priv
== NULL
) {
3233 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3234 dev_err(&device
->dev
, "Port %d not found\n", port_num
);
3237 list_del_init(&port_priv
->port_list
);
3238 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3240 destroy_workqueue(port_priv
->wq
);
3241 destroy_mad_qp(&port_priv
->qp_info
[1]);
3242 destroy_mad_qp(&port_priv
->qp_info
[0]);
3243 ib_dealloc_pd(port_priv
->pd
);
3244 ib_free_cq(port_priv
->cq
);
3245 cleanup_recv_queue(&port_priv
->qp_info
[1]);
3246 cleanup_recv_queue(&port_priv
->qp_info
[0]);
3247 /* XXX: Handle deallocation of MAD registration tables */
3254 static void ib_mad_init_device(struct ib_device
*device
)
3258 start
= rdma_start_port(device
);
3260 for (i
= start
; i
<= rdma_end_port(device
); i
++) {
3261 if (!rdma_cap_ib_mad(device
, i
))
3264 if (ib_mad_port_open(device
, i
)) {
3265 dev_err(&device
->dev
, "Couldn't open port %d\n", i
);
3268 if (ib_agent_port_open(device
, i
)) {
3269 dev_err(&device
->dev
,
3270 "Couldn't open port %d for agents\n", i
);
3277 if (ib_mad_port_close(device
, i
))
3278 dev_err(&device
->dev
, "Couldn't close port %d\n", i
);
3281 while (--i
>= start
) {
3282 if (!rdma_cap_ib_mad(device
, i
))
3285 if (ib_agent_port_close(device
, i
))
3286 dev_err(&device
->dev
,
3287 "Couldn't close port %d for agents\n", i
);
3288 if (ib_mad_port_close(device
, i
))
3289 dev_err(&device
->dev
, "Couldn't close port %d\n", i
);
3293 static void ib_mad_remove_device(struct ib_device
*device
, void *client_data
)
3297 for (i
= rdma_start_port(device
); i
<= rdma_end_port(device
); i
++) {
3298 if (!rdma_cap_ib_mad(device
, i
))
3301 if (ib_agent_port_close(device
, i
))
3302 dev_err(&device
->dev
,
3303 "Couldn't close port %d for agents\n", i
);
3304 if (ib_mad_port_close(device
, i
))
3305 dev_err(&device
->dev
, "Couldn't close port %d\n", i
);
3309 static struct ib_client mad_client
= {
3311 .add
= ib_mad_init_device
,
3312 .remove
= ib_mad_remove_device
3315 int ib_mad_init(void)
3317 mad_recvq_size
= min(mad_recvq_size
, IB_MAD_QP_MAX_SIZE
);
3318 mad_recvq_size
= max(mad_recvq_size
, IB_MAD_QP_MIN_SIZE
);
3320 mad_sendq_size
= min(mad_sendq_size
, IB_MAD_QP_MAX_SIZE
);
3321 mad_sendq_size
= max(mad_sendq_size
, IB_MAD_QP_MIN_SIZE
);
3323 INIT_LIST_HEAD(&ib_mad_port_list
);
3325 if (ib_register_client(&mad_client
)) {
3326 pr_err("Couldn't register ib_mad client\n");
3333 void ib_mad_cleanup(void)
3335 ib_unregister_client(&mad_client
);