2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
6 * Copyright (c) 2014 Intel Corporation. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <rdma/ib_cache.h>
51 MODULE_LICENSE("Dual BSD/GPL");
52 MODULE_DESCRIPTION("kernel IB MAD API");
53 MODULE_AUTHOR("Hal Rosenstock");
54 MODULE_AUTHOR("Sean Hefty");
56 static int mad_sendq_size
= IB_MAD_QP_SEND_SIZE
;
57 static int mad_recvq_size
= IB_MAD_QP_RECV_SIZE
;
59 module_param_named(send_queue_size
, mad_sendq_size
, int, 0444);
60 MODULE_PARM_DESC(send_queue_size
, "Size of send queue in number of work requests");
61 module_param_named(recv_queue_size
, mad_recvq_size
, int, 0444);
62 MODULE_PARM_DESC(recv_queue_size
, "Size of receive queue in number of work requests");
64 static struct list_head ib_mad_port_list
;
65 static u32 ib_mad_client_id
= 0;
68 static DEFINE_SPINLOCK(ib_mad_port_list_lock
);
70 /* Forward declarations */
71 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
72 struct ib_mad_reg_req
*mad_reg_req
);
73 static void remove_mad_reg_req(struct ib_mad_agent_private
*priv
);
74 static struct ib_mad_agent_private
*find_mad_agent(
75 struct ib_mad_port_private
*port_priv
,
76 const struct ib_mad_hdr
*mad
);
77 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
78 struct ib_mad_private
*mad
);
79 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
);
80 static void timeout_sends(struct work_struct
*work
);
81 static void local_completions(struct work_struct
*work
);
82 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
83 struct ib_mad_agent_private
*agent_priv
,
85 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
86 struct ib_mad_agent_private
*agent_priv
);
89 * Returns a ib_mad_port_private structure or NULL for a device/port
90 * Assumes ib_mad_port_list_lock is being held
92 static inline struct ib_mad_port_private
*
93 __ib_get_mad_port(struct ib_device
*device
, int port_num
)
95 struct ib_mad_port_private
*entry
;
97 list_for_each_entry(entry
, &ib_mad_port_list
, port_list
) {
98 if (entry
->device
== device
&& entry
->port_num
== port_num
)
105 * Wrapper function to return a ib_mad_port_private structure or NULL
108 static inline struct ib_mad_port_private
*
109 ib_get_mad_port(struct ib_device
*device
, int port_num
)
111 struct ib_mad_port_private
*entry
;
114 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
115 entry
= __ib_get_mad_port(device
, port_num
);
116 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
121 static inline u8
convert_mgmt_class(u8 mgmt_class
)
123 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
124 return mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
?
128 static int get_spl_qp_index(enum ib_qp_type qp_type
)
141 static int vendor_class_index(u8 mgmt_class
)
143 return mgmt_class
- IB_MGMT_CLASS_VENDOR_RANGE2_START
;
146 static int is_vendor_class(u8 mgmt_class
)
148 if ((mgmt_class
< IB_MGMT_CLASS_VENDOR_RANGE2_START
) ||
149 (mgmt_class
> IB_MGMT_CLASS_VENDOR_RANGE2_END
))
154 static int is_vendor_oui(char *oui
)
156 if (oui
[0] || oui
[1] || oui
[2])
161 static int is_vendor_method_in_use(
162 struct ib_mad_mgmt_vendor_class
*vendor_class
,
163 struct ib_mad_reg_req
*mad_reg_req
)
165 struct ib_mad_mgmt_method_table
*method
;
168 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
169 if (!memcmp(vendor_class
->oui
[i
], mad_reg_req
->oui
, 3)) {
170 method
= vendor_class
->method_table
[i
];
172 if (method_in_use(&method
, mad_reg_req
))
182 int ib_response_mad(const struct ib_mad_hdr
*hdr
)
184 return ((hdr
->method
& IB_MGMT_METHOD_RESP
) ||
185 (hdr
->method
== IB_MGMT_METHOD_TRAP_REPRESS
) ||
186 ((hdr
->mgmt_class
== IB_MGMT_CLASS_BM
) &&
187 (hdr
->attr_mod
& IB_BM_ATTR_MOD_RESP
)));
189 EXPORT_SYMBOL(ib_response_mad
);
192 * ib_register_mad_agent - Register to send/receive MADs
194 struct ib_mad_agent
*ib_register_mad_agent(struct ib_device
*device
,
196 enum ib_qp_type qp_type
,
197 struct ib_mad_reg_req
*mad_reg_req
,
199 ib_mad_send_handler send_handler
,
200 ib_mad_recv_handler recv_handler
,
202 u32 registration_flags
)
204 struct ib_mad_port_private
*port_priv
;
205 struct ib_mad_agent
*ret
= ERR_PTR(-EINVAL
);
206 struct ib_mad_agent_private
*mad_agent_priv
;
207 struct ib_mad_reg_req
*reg_req
= NULL
;
208 struct ib_mad_mgmt_class_table
*class;
209 struct ib_mad_mgmt_vendor_class_table
*vendor
;
210 struct ib_mad_mgmt_vendor_class
*vendor_class
;
211 struct ib_mad_mgmt_method_table
*method
;
214 u8 mgmt_class
, vclass
;
216 /* Validate parameters */
217 qpn
= get_spl_qp_index(qp_type
);
219 dev_notice(&device
->dev
,
220 "ib_register_mad_agent: invalid QP Type %d\n",
225 if (rmpp_version
&& rmpp_version
!= IB_MGMT_RMPP_VERSION
) {
226 dev_notice(&device
->dev
,
227 "ib_register_mad_agent: invalid RMPP Version %u\n",
232 /* Validate MAD registration request if supplied */
234 if (mad_reg_req
->mgmt_class_version
>= MAX_MGMT_VERSION
) {
235 dev_notice(&device
->dev
,
236 "ib_register_mad_agent: invalid Class Version %u\n",
237 mad_reg_req
->mgmt_class_version
);
241 dev_notice(&device
->dev
,
242 "ib_register_mad_agent: no recv_handler\n");
245 if (mad_reg_req
->mgmt_class
>= MAX_MGMT_CLASS
) {
247 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
248 * one in this range currently allowed
250 if (mad_reg_req
->mgmt_class
!=
251 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
252 dev_notice(&device
->dev
,
253 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
254 mad_reg_req
->mgmt_class
);
257 } else if (mad_reg_req
->mgmt_class
== 0) {
259 * Class 0 is reserved in IBA and is used for
260 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
262 dev_notice(&device
->dev
,
263 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
265 } else if (is_vendor_class(mad_reg_req
->mgmt_class
)) {
267 * If class is in "new" vendor range,
268 * ensure supplied OUI is not zero
270 if (!is_vendor_oui(mad_reg_req
->oui
)) {
271 dev_notice(&device
->dev
,
272 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
273 mad_reg_req
->mgmt_class
);
277 /* Make sure class supplied is consistent with RMPP */
278 if (!ib_is_mad_class_rmpp(mad_reg_req
->mgmt_class
)) {
280 dev_notice(&device
->dev
,
281 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
282 mad_reg_req
->mgmt_class
);
287 /* Make sure class supplied is consistent with QP type */
288 if (qp_type
== IB_QPT_SMI
) {
289 if ((mad_reg_req
->mgmt_class
!=
290 IB_MGMT_CLASS_SUBN_LID_ROUTED
) &&
291 (mad_reg_req
->mgmt_class
!=
292 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
293 dev_notice(&device
->dev
,
294 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
295 mad_reg_req
->mgmt_class
);
299 if ((mad_reg_req
->mgmt_class
==
300 IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
301 (mad_reg_req
->mgmt_class
==
302 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
303 dev_notice(&device
->dev
,
304 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
305 mad_reg_req
->mgmt_class
);
310 /* No registration request supplied */
313 if (registration_flags
& IB_MAD_USER_RMPP
)
317 /* Validate device and port */
318 port_priv
= ib_get_mad_port(device
, port_num
);
320 dev_notice(&device
->dev
, "ib_register_mad_agent: Invalid port\n");
321 ret
= ERR_PTR(-ENODEV
);
325 /* Verify the QP requested is supported. For example, Ethernet devices
326 * will not have QP0 */
327 if (!port_priv
->qp_info
[qpn
].qp
) {
328 dev_notice(&device
->dev
,
329 "ib_register_mad_agent: QP %d not supported\n", qpn
);
330 ret
= ERR_PTR(-EPROTONOSUPPORT
);
334 /* Allocate structures */
335 mad_agent_priv
= kzalloc(sizeof *mad_agent_priv
, GFP_KERNEL
);
336 if (!mad_agent_priv
) {
337 ret
= ERR_PTR(-ENOMEM
);
342 reg_req
= kmemdup(mad_reg_req
, sizeof *reg_req
, GFP_KERNEL
);
344 ret
= ERR_PTR(-ENOMEM
);
349 /* Now, fill in the various structures */
350 mad_agent_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
351 mad_agent_priv
->reg_req
= reg_req
;
352 mad_agent_priv
->agent
.rmpp_version
= rmpp_version
;
353 mad_agent_priv
->agent
.device
= device
;
354 mad_agent_priv
->agent
.recv_handler
= recv_handler
;
355 mad_agent_priv
->agent
.send_handler
= send_handler
;
356 mad_agent_priv
->agent
.context
= context
;
357 mad_agent_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
358 mad_agent_priv
->agent
.port_num
= port_num
;
359 mad_agent_priv
->agent
.flags
= registration_flags
;
360 spin_lock_init(&mad_agent_priv
->lock
);
361 INIT_LIST_HEAD(&mad_agent_priv
->send_list
);
362 INIT_LIST_HEAD(&mad_agent_priv
->wait_list
);
363 INIT_LIST_HEAD(&mad_agent_priv
->done_list
);
364 INIT_LIST_HEAD(&mad_agent_priv
->rmpp_list
);
365 INIT_DELAYED_WORK(&mad_agent_priv
->timed_work
, timeout_sends
);
366 INIT_LIST_HEAD(&mad_agent_priv
->local_list
);
367 INIT_WORK(&mad_agent_priv
->local_work
, local_completions
);
368 atomic_set(&mad_agent_priv
->refcount
, 1);
369 init_completion(&mad_agent_priv
->comp
);
371 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
372 mad_agent_priv
->agent
.hi_tid
= ++ib_mad_client_id
;
375 * Make sure MAD registration (if supplied)
376 * is non overlapping with any existing ones
379 mgmt_class
= convert_mgmt_class(mad_reg_req
->mgmt_class
);
380 if (!is_vendor_class(mgmt_class
)) {
381 class = port_priv
->version
[mad_reg_req
->
382 mgmt_class_version
].class;
384 method
= class->method_table
[mgmt_class
];
386 if (method_in_use(&method
,
391 ret2
= add_nonoui_reg_req(mad_reg_req
, mad_agent_priv
,
394 /* "New" vendor class range */
395 vendor
= port_priv
->version
[mad_reg_req
->
396 mgmt_class_version
].vendor
;
398 vclass
= vendor_class_index(mgmt_class
);
399 vendor_class
= vendor
->vendor_class
[vclass
];
401 if (is_vendor_method_in_use(
407 ret2
= add_oui_reg_req(mad_reg_req
, mad_agent_priv
);
415 /* Add mad agent into port's agent list */
416 list_add_tail(&mad_agent_priv
->agent_list
, &port_priv
->agent_list
);
417 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
419 return &mad_agent_priv
->agent
;
422 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
425 kfree(mad_agent_priv
);
429 EXPORT_SYMBOL(ib_register_mad_agent
);
431 static inline int is_snooping_sends(int mad_snoop_flags
)
433 return (mad_snoop_flags
&
434 (/*IB_MAD_SNOOP_POSTED_SENDS |
435 IB_MAD_SNOOP_RMPP_SENDS |*/
436 IB_MAD_SNOOP_SEND_COMPLETIONS
/*|
437 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
440 static inline int is_snooping_recvs(int mad_snoop_flags
)
442 return (mad_snoop_flags
&
443 (IB_MAD_SNOOP_RECVS
/*|
444 IB_MAD_SNOOP_RMPP_RECVS*/));
447 static int register_snoop_agent(struct ib_mad_qp_info
*qp_info
,
448 struct ib_mad_snoop_private
*mad_snoop_priv
)
450 struct ib_mad_snoop_private
**new_snoop_table
;
454 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
455 /* Check for empty slot in array. */
456 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++)
457 if (!qp_info
->snoop_table
[i
])
460 if (i
== qp_info
->snoop_table_size
) {
462 new_snoop_table
= krealloc(qp_info
->snoop_table
,
463 sizeof mad_snoop_priv
*
464 (qp_info
->snoop_table_size
+ 1),
466 if (!new_snoop_table
) {
471 qp_info
->snoop_table
= new_snoop_table
;
472 qp_info
->snoop_table_size
++;
474 qp_info
->snoop_table
[i
] = mad_snoop_priv
;
475 atomic_inc(&qp_info
->snoop_count
);
477 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
481 struct ib_mad_agent
*ib_register_mad_snoop(struct ib_device
*device
,
483 enum ib_qp_type qp_type
,
485 ib_mad_snoop_handler snoop_handler
,
486 ib_mad_recv_handler recv_handler
,
489 struct ib_mad_port_private
*port_priv
;
490 struct ib_mad_agent
*ret
;
491 struct ib_mad_snoop_private
*mad_snoop_priv
;
494 /* Validate parameters */
495 if ((is_snooping_sends(mad_snoop_flags
) && !snoop_handler
) ||
496 (is_snooping_recvs(mad_snoop_flags
) && !recv_handler
)) {
497 ret
= ERR_PTR(-EINVAL
);
500 qpn
= get_spl_qp_index(qp_type
);
502 ret
= ERR_PTR(-EINVAL
);
505 port_priv
= ib_get_mad_port(device
, port_num
);
507 ret
= ERR_PTR(-ENODEV
);
510 /* Allocate structures */
511 mad_snoop_priv
= kzalloc(sizeof *mad_snoop_priv
, GFP_KERNEL
);
512 if (!mad_snoop_priv
) {
513 ret
= ERR_PTR(-ENOMEM
);
517 /* Now, fill in the various structures */
518 mad_snoop_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
519 mad_snoop_priv
->agent
.device
= device
;
520 mad_snoop_priv
->agent
.recv_handler
= recv_handler
;
521 mad_snoop_priv
->agent
.snoop_handler
= snoop_handler
;
522 mad_snoop_priv
->agent
.context
= context
;
523 mad_snoop_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
524 mad_snoop_priv
->agent
.port_num
= port_num
;
525 mad_snoop_priv
->mad_snoop_flags
= mad_snoop_flags
;
526 init_completion(&mad_snoop_priv
->comp
);
527 mad_snoop_priv
->snoop_index
= register_snoop_agent(
528 &port_priv
->qp_info
[qpn
],
530 if (mad_snoop_priv
->snoop_index
< 0) {
531 ret
= ERR_PTR(mad_snoop_priv
->snoop_index
);
535 atomic_set(&mad_snoop_priv
->refcount
, 1);
536 return &mad_snoop_priv
->agent
;
539 kfree(mad_snoop_priv
);
543 EXPORT_SYMBOL(ib_register_mad_snoop
);
545 static inline void deref_mad_agent(struct ib_mad_agent_private
*mad_agent_priv
)
547 if (atomic_dec_and_test(&mad_agent_priv
->refcount
))
548 complete(&mad_agent_priv
->comp
);
551 static inline void deref_snoop_agent(struct ib_mad_snoop_private
*mad_snoop_priv
)
553 if (atomic_dec_and_test(&mad_snoop_priv
->refcount
))
554 complete(&mad_snoop_priv
->comp
);
557 static void unregister_mad_agent(struct ib_mad_agent_private
*mad_agent_priv
)
559 struct ib_mad_port_private
*port_priv
;
562 /* Note that we could still be handling received MADs */
565 * Canceling all sends results in dropping received response
566 * MADs, preventing us from queuing additional work
568 cancel_mads(mad_agent_priv
);
569 port_priv
= mad_agent_priv
->qp_info
->port_priv
;
570 cancel_delayed_work(&mad_agent_priv
->timed_work
);
572 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
573 remove_mad_reg_req(mad_agent_priv
);
574 list_del(&mad_agent_priv
->agent_list
);
575 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
577 flush_workqueue(port_priv
->wq
);
578 ib_cancel_rmpp_recvs(mad_agent_priv
);
580 deref_mad_agent(mad_agent_priv
);
581 wait_for_completion(&mad_agent_priv
->comp
);
583 kfree(mad_agent_priv
->reg_req
);
584 kfree(mad_agent_priv
);
587 static void unregister_mad_snoop(struct ib_mad_snoop_private
*mad_snoop_priv
)
589 struct ib_mad_qp_info
*qp_info
;
592 qp_info
= mad_snoop_priv
->qp_info
;
593 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
594 qp_info
->snoop_table
[mad_snoop_priv
->snoop_index
] = NULL
;
595 atomic_dec(&qp_info
->snoop_count
);
596 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
598 deref_snoop_agent(mad_snoop_priv
);
599 wait_for_completion(&mad_snoop_priv
->comp
);
601 kfree(mad_snoop_priv
);
605 * ib_unregister_mad_agent - Unregisters a client from using MAD services
607 int ib_unregister_mad_agent(struct ib_mad_agent
*mad_agent
)
609 struct ib_mad_agent_private
*mad_agent_priv
;
610 struct ib_mad_snoop_private
*mad_snoop_priv
;
612 /* If the TID is zero, the agent can only snoop. */
613 if (mad_agent
->hi_tid
) {
614 mad_agent_priv
= container_of(mad_agent
,
615 struct ib_mad_agent_private
,
617 unregister_mad_agent(mad_agent_priv
);
619 mad_snoop_priv
= container_of(mad_agent
,
620 struct ib_mad_snoop_private
,
622 unregister_mad_snoop(mad_snoop_priv
);
626 EXPORT_SYMBOL(ib_unregister_mad_agent
);
628 static void dequeue_mad(struct ib_mad_list_head
*mad_list
)
630 struct ib_mad_queue
*mad_queue
;
633 BUG_ON(!mad_list
->mad_queue
);
634 mad_queue
= mad_list
->mad_queue
;
635 spin_lock_irqsave(&mad_queue
->lock
, flags
);
636 list_del(&mad_list
->list
);
638 spin_unlock_irqrestore(&mad_queue
->lock
, flags
);
641 static void snoop_send(struct ib_mad_qp_info
*qp_info
,
642 struct ib_mad_send_buf
*send_buf
,
643 struct ib_mad_send_wc
*mad_send_wc
,
646 struct ib_mad_snoop_private
*mad_snoop_priv
;
650 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
651 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
652 mad_snoop_priv
= qp_info
->snoop_table
[i
];
653 if (!mad_snoop_priv
||
654 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
657 atomic_inc(&mad_snoop_priv
->refcount
);
658 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
659 mad_snoop_priv
->agent
.snoop_handler(&mad_snoop_priv
->agent
,
660 send_buf
, mad_send_wc
);
661 deref_snoop_agent(mad_snoop_priv
);
662 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
664 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
667 static void snoop_recv(struct ib_mad_qp_info
*qp_info
,
668 struct ib_mad_recv_wc
*mad_recv_wc
,
671 struct ib_mad_snoop_private
*mad_snoop_priv
;
675 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
676 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
677 mad_snoop_priv
= qp_info
->snoop_table
[i
];
678 if (!mad_snoop_priv
||
679 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
682 atomic_inc(&mad_snoop_priv
->refcount
);
683 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
684 mad_snoop_priv
->agent
.recv_handler(&mad_snoop_priv
->agent
,
686 deref_snoop_agent(mad_snoop_priv
);
687 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
689 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
692 static void build_smp_wc(struct ib_qp
*qp
,
693 u64 wr_id
, u16 slid
, u16 pkey_index
, u8 port_num
,
696 memset(wc
, 0, sizeof *wc
);
698 wc
->status
= IB_WC_SUCCESS
;
699 wc
->opcode
= IB_WC_RECV
;
700 wc
->pkey_index
= pkey_index
;
701 wc
->byte_len
= sizeof(struct ib_mad
) + sizeof(struct ib_grh
);
706 wc
->dlid_path_bits
= 0;
707 wc
->port_num
= port_num
;
710 static size_t mad_priv_size(const struct ib_mad_private
*mp
)
712 return sizeof(struct ib_mad_private
) + mp
->mad_size
;
715 static struct ib_mad_private
*alloc_mad_private(size_t mad_size
, gfp_t flags
)
717 size_t size
= sizeof(struct ib_mad_private
) + mad_size
;
718 struct ib_mad_private
*ret
= kzalloc(size
, flags
);
721 ret
->mad_size
= mad_size
;
726 static size_t port_mad_size(const struct ib_mad_port_private
*port_priv
)
728 return rdma_max_mad_size(port_priv
->device
, port_priv
->port_num
);
731 static size_t mad_priv_dma_size(const struct ib_mad_private
*mp
)
733 return sizeof(struct ib_grh
) + mp
->mad_size
;
737 * Return 0 if SMP is to be sent
738 * Return 1 if SMP was consumed locally (whether or not solicited)
739 * Return < 0 if error
741 static int handle_outgoing_dr_smp(struct ib_mad_agent_private
*mad_agent_priv
,
742 struct ib_mad_send_wr_private
*mad_send_wr
)
745 struct ib_smp
*smp
= mad_send_wr
->send_buf
.mad
;
746 struct opa_smp
*opa_smp
= (struct opa_smp
*)smp
;
748 struct ib_mad_local_private
*local
;
749 struct ib_mad_private
*mad_priv
;
750 struct ib_mad_port_private
*port_priv
;
751 struct ib_mad_agent_private
*recv_mad_agent
= NULL
;
752 struct ib_device
*device
= mad_agent_priv
->agent
.device
;
755 struct ib_ud_wr
*send_wr
= &mad_send_wr
->send_wr
;
756 size_t mad_size
= port_mad_size(mad_agent_priv
->qp_info
->port_priv
);
757 u16 out_mad_pkey_index
= 0;
759 bool opa
= rdma_cap_opa_mad(mad_agent_priv
->qp_info
->port_priv
->device
,
760 mad_agent_priv
->qp_info
->port_priv
->port_num
);
762 if (rdma_cap_ib_switch(device
) &&
763 smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
764 port_num
= send_wr
->port_num
;
766 port_num
= mad_agent_priv
->agent
.port_num
;
769 * Directed route handling starts if the initial LID routed part of
770 * a request or the ending LID routed part of a response is empty.
771 * If we are at the start of the LID routed part, don't update the
772 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
774 if (opa
&& smp
->class_version
== OPA_SMP_CLASS_VERSION
) {
777 if ((opa_get_smp_direction(opa_smp
)
778 ? opa_smp
->route
.dr
.dr_dlid
: opa_smp
->route
.dr
.dr_slid
) ==
779 OPA_LID_PERMISSIVE
&&
780 opa_smi_handle_dr_smp_send(opa_smp
,
781 rdma_cap_ib_switch(device
),
782 port_num
) == IB_SMI_DISCARD
) {
784 dev_err(&device
->dev
, "OPA Invalid directed route\n");
787 opa_drslid
= be32_to_cpu(opa_smp
->route
.dr
.dr_slid
);
788 if (opa_drslid
!= be32_to_cpu(OPA_LID_PERMISSIVE
) &&
789 opa_drslid
& 0xffff0000) {
791 dev_err(&device
->dev
, "OPA Invalid dr_slid 0x%x\n",
795 drslid
= (u16
)(opa_drslid
& 0x0000ffff);
797 /* Check to post send on QP or process locally */
798 if (opa_smi_check_local_smp(opa_smp
, device
) == IB_SMI_DISCARD
&&
799 opa_smi_check_local_returning_smp(opa_smp
, device
) == IB_SMI_DISCARD
)
802 if ((ib_get_smp_direction(smp
) ? smp
->dr_dlid
: smp
->dr_slid
) ==
804 smi_handle_dr_smp_send(smp
, rdma_cap_ib_switch(device
), port_num
) ==
807 dev_err(&device
->dev
, "Invalid directed route\n");
810 drslid
= be16_to_cpu(smp
->dr_slid
);
812 /* Check to post send on QP or process locally */
813 if (smi_check_local_smp(smp
, device
) == IB_SMI_DISCARD
&&
814 smi_check_local_returning_smp(smp
, device
) == IB_SMI_DISCARD
)
818 local
= kmalloc(sizeof *local
, GFP_ATOMIC
);
821 dev_err(&device
->dev
, "No memory for ib_mad_local_private\n");
824 local
->mad_priv
= NULL
;
825 local
->recv_mad_agent
= NULL
;
826 mad_priv
= alloc_mad_private(mad_size
, GFP_ATOMIC
);
829 dev_err(&device
->dev
, "No memory for local response MAD\n");
834 build_smp_wc(mad_agent_priv
->agent
.qp
,
835 send_wr
->wr
.wr_id
, drslid
,
837 send_wr
->port_num
, &mad_wc
);
839 if (opa
&& smp
->base_version
== OPA_MGMT_BASE_VERSION
) {
840 mad_wc
.byte_len
= mad_send_wr
->send_buf
.hdr_len
841 + mad_send_wr
->send_buf
.data_len
842 + sizeof(struct ib_grh
);
845 /* No GRH for DR SMP */
846 ret
= device
->process_mad(device
, 0, port_num
, &mad_wc
, NULL
,
847 (const struct ib_mad_hdr
*)smp
, mad_size
,
848 (struct ib_mad_hdr
*)mad_priv
->mad
,
849 &mad_size
, &out_mad_pkey_index
);
852 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
:
853 if (ib_response_mad((const struct ib_mad_hdr
*)mad_priv
->mad
) &&
854 mad_agent_priv
->agent
.recv_handler
) {
855 local
->mad_priv
= mad_priv
;
856 local
->recv_mad_agent
= mad_agent_priv
;
858 * Reference MAD agent until receive
859 * side of local completion handled
861 atomic_inc(&mad_agent_priv
->refcount
);
865 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
:
868 case IB_MAD_RESULT_SUCCESS
:
869 /* Treat like an incoming receive MAD */
870 port_priv
= ib_get_mad_port(mad_agent_priv
->agent
.device
,
871 mad_agent_priv
->agent
.port_num
);
873 memcpy(mad_priv
->mad
, smp
, mad_priv
->mad_size
);
874 recv_mad_agent
= find_mad_agent(port_priv
,
875 (const struct ib_mad_hdr
*)mad_priv
->mad
);
877 if (!port_priv
|| !recv_mad_agent
) {
879 * No receiving agent so drop packet and
880 * generate send completion.
885 local
->mad_priv
= mad_priv
;
886 local
->recv_mad_agent
= recv_mad_agent
;
895 local
->mad_send_wr
= mad_send_wr
;
897 local
->mad_send_wr
->send_wr
.pkey_index
= out_mad_pkey_index
;
898 local
->return_wc_byte_len
= mad_size
;
900 /* Reference MAD agent until send side of local completion handled */
901 atomic_inc(&mad_agent_priv
->refcount
);
902 /* Queue local completion to local list */
903 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
904 list_add_tail(&local
->completion_list
, &mad_agent_priv
->local_list
);
905 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
906 queue_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
907 &mad_agent_priv
->local_work
);
913 static int get_pad_size(int hdr_len
, int data_len
, size_t mad_size
)
917 seg_size
= mad_size
- hdr_len
;
918 if (data_len
&& seg_size
) {
919 pad
= seg_size
- data_len
% seg_size
;
920 return pad
== seg_size
? 0 : pad
;
925 static void free_send_rmpp_list(struct ib_mad_send_wr_private
*mad_send_wr
)
927 struct ib_rmpp_segment
*s
, *t
;
929 list_for_each_entry_safe(s
, t
, &mad_send_wr
->rmpp_list
, list
) {
935 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private
*send_wr
,
936 size_t mad_size
, gfp_t gfp_mask
)
938 struct ib_mad_send_buf
*send_buf
= &send_wr
->send_buf
;
939 struct ib_rmpp_mad
*rmpp_mad
= send_buf
->mad
;
940 struct ib_rmpp_segment
*seg
= NULL
;
941 int left
, seg_size
, pad
;
943 send_buf
->seg_size
= mad_size
- send_buf
->hdr_len
;
944 send_buf
->seg_rmpp_size
= mad_size
- IB_MGMT_RMPP_HDR
;
945 seg_size
= send_buf
->seg_size
;
948 /* Allocate data segments. */
949 for (left
= send_buf
->data_len
+ pad
; left
> 0; left
-= seg_size
) {
950 seg
= kmalloc(sizeof (*seg
) + seg_size
, gfp_mask
);
952 dev_err(&send_buf
->mad_agent
->device
->dev
,
953 "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
954 sizeof (*seg
) + seg_size
, gfp_mask
);
955 free_send_rmpp_list(send_wr
);
958 seg
->num
= ++send_buf
->seg_count
;
959 list_add_tail(&seg
->list
, &send_wr
->rmpp_list
);
962 /* Zero any padding */
964 memset(seg
->data
+ seg_size
- pad
, 0, pad
);
966 rmpp_mad
->rmpp_hdr
.rmpp_version
= send_wr
->mad_agent_priv
->
968 rmpp_mad
->rmpp_hdr
.rmpp_type
= IB_MGMT_RMPP_TYPE_DATA
;
969 ib_set_rmpp_flags(&rmpp_mad
->rmpp_hdr
, IB_MGMT_RMPP_FLAG_ACTIVE
);
971 send_wr
->cur_seg
= container_of(send_wr
->rmpp_list
.next
,
972 struct ib_rmpp_segment
, list
);
973 send_wr
->last_ack_seg
= send_wr
->cur_seg
;
977 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent
*agent
)
979 return agent
->rmpp_version
&& !(agent
->flags
& IB_MAD_USER_RMPP
);
981 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent
);
983 struct ib_mad_send_buf
* ib_create_send_mad(struct ib_mad_agent
*mad_agent
,
984 u32 remote_qpn
, u16 pkey_index
,
986 int hdr_len
, int data_len
,
990 struct ib_mad_agent_private
*mad_agent_priv
;
991 struct ib_mad_send_wr_private
*mad_send_wr
;
992 int pad
, message_size
, ret
, size
;
997 mad_agent_priv
= container_of(mad_agent
, struct ib_mad_agent_private
,
1000 opa
= rdma_cap_opa_mad(mad_agent
->device
, mad_agent
->port_num
);
1002 if (opa
&& base_version
== OPA_MGMT_BASE_VERSION
)
1003 mad_size
= sizeof(struct opa_mad
);
1005 mad_size
= sizeof(struct ib_mad
);
1007 pad
= get_pad_size(hdr_len
, data_len
, mad_size
);
1008 message_size
= hdr_len
+ data_len
+ pad
;
1010 if (ib_mad_kernel_rmpp_agent(mad_agent
)) {
1011 if (!rmpp_active
&& message_size
> mad_size
)
1012 return ERR_PTR(-EINVAL
);
1014 if (rmpp_active
|| message_size
> mad_size
)
1015 return ERR_PTR(-EINVAL
);
1017 size
= rmpp_active
? hdr_len
: mad_size
;
1018 buf
= kzalloc(sizeof *mad_send_wr
+ size
, gfp_mask
);
1020 return ERR_PTR(-ENOMEM
);
1022 mad_send_wr
= buf
+ size
;
1023 INIT_LIST_HEAD(&mad_send_wr
->rmpp_list
);
1024 mad_send_wr
->send_buf
.mad
= buf
;
1025 mad_send_wr
->send_buf
.hdr_len
= hdr_len
;
1026 mad_send_wr
->send_buf
.data_len
= data_len
;
1027 mad_send_wr
->pad
= pad
;
1029 mad_send_wr
->mad_agent_priv
= mad_agent_priv
;
1030 mad_send_wr
->sg_list
[0].length
= hdr_len
;
1031 mad_send_wr
->sg_list
[0].lkey
= mad_agent
->qp
->pd
->local_dma_lkey
;
1033 /* OPA MADs don't have to be the full 2048 bytes */
1034 if (opa
&& base_version
== OPA_MGMT_BASE_VERSION
&&
1035 data_len
< mad_size
- hdr_len
)
1036 mad_send_wr
->sg_list
[1].length
= data_len
;
1038 mad_send_wr
->sg_list
[1].length
= mad_size
- hdr_len
;
1040 mad_send_wr
->sg_list
[1].lkey
= mad_agent
->qp
->pd
->local_dma_lkey
;
1042 mad_send_wr
->send_wr
.wr
.wr_id
= (unsigned long) mad_send_wr
;
1043 mad_send_wr
->send_wr
.wr
.sg_list
= mad_send_wr
->sg_list
;
1044 mad_send_wr
->send_wr
.wr
.num_sge
= 2;
1045 mad_send_wr
->send_wr
.wr
.opcode
= IB_WR_SEND
;
1046 mad_send_wr
->send_wr
.wr
.send_flags
= IB_SEND_SIGNALED
;
1047 mad_send_wr
->send_wr
.remote_qpn
= remote_qpn
;
1048 mad_send_wr
->send_wr
.remote_qkey
= IB_QP_SET_QKEY
;
1049 mad_send_wr
->send_wr
.pkey_index
= pkey_index
;
1052 ret
= alloc_send_rmpp_list(mad_send_wr
, mad_size
, gfp_mask
);
1055 return ERR_PTR(ret
);
1059 mad_send_wr
->send_buf
.mad_agent
= mad_agent
;
1060 atomic_inc(&mad_agent_priv
->refcount
);
1061 return &mad_send_wr
->send_buf
;
1063 EXPORT_SYMBOL(ib_create_send_mad
);
1065 int ib_get_mad_data_offset(u8 mgmt_class
)
1067 if (mgmt_class
== IB_MGMT_CLASS_SUBN_ADM
)
1068 return IB_MGMT_SA_HDR
;
1069 else if ((mgmt_class
== IB_MGMT_CLASS_DEVICE_MGMT
) ||
1070 (mgmt_class
== IB_MGMT_CLASS_DEVICE_ADM
) ||
1071 (mgmt_class
== IB_MGMT_CLASS_BIS
))
1072 return IB_MGMT_DEVICE_HDR
;
1073 else if ((mgmt_class
>= IB_MGMT_CLASS_VENDOR_RANGE2_START
) &&
1074 (mgmt_class
<= IB_MGMT_CLASS_VENDOR_RANGE2_END
))
1075 return IB_MGMT_VENDOR_HDR
;
1077 return IB_MGMT_MAD_HDR
;
1079 EXPORT_SYMBOL(ib_get_mad_data_offset
);
1081 int ib_is_mad_class_rmpp(u8 mgmt_class
)
1083 if ((mgmt_class
== IB_MGMT_CLASS_SUBN_ADM
) ||
1084 (mgmt_class
== IB_MGMT_CLASS_DEVICE_MGMT
) ||
1085 (mgmt_class
== IB_MGMT_CLASS_DEVICE_ADM
) ||
1086 (mgmt_class
== IB_MGMT_CLASS_BIS
) ||
1087 ((mgmt_class
>= IB_MGMT_CLASS_VENDOR_RANGE2_START
) &&
1088 (mgmt_class
<= IB_MGMT_CLASS_VENDOR_RANGE2_END
)))
1092 EXPORT_SYMBOL(ib_is_mad_class_rmpp
);
1094 void *ib_get_rmpp_segment(struct ib_mad_send_buf
*send_buf
, int seg_num
)
1096 struct ib_mad_send_wr_private
*mad_send_wr
;
1097 struct list_head
*list
;
1099 mad_send_wr
= container_of(send_buf
, struct ib_mad_send_wr_private
,
1101 list
= &mad_send_wr
->cur_seg
->list
;
1103 if (mad_send_wr
->cur_seg
->num
< seg_num
) {
1104 list_for_each_entry(mad_send_wr
->cur_seg
, list
, list
)
1105 if (mad_send_wr
->cur_seg
->num
== seg_num
)
1107 } else if (mad_send_wr
->cur_seg
->num
> seg_num
) {
1108 list_for_each_entry_reverse(mad_send_wr
->cur_seg
, list
, list
)
1109 if (mad_send_wr
->cur_seg
->num
== seg_num
)
1112 return mad_send_wr
->cur_seg
->data
;
1114 EXPORT_SYMBOL(ib_get_rmpp_segment
);
1116 static inline void *ib_get_payload(struct ib_mad_send_wr_private
*mad_send_wr
)
1118 if (mad_send_wr
->send_buf
.seg_count
)
1119 return ib_get_rmpp_segment(&mad_send_wr
->send_buf
,
1120 mad_send_wr
->seg_num
);
1122 return mad_send_wr
->send_buf
.mad
+
1123 mad_send_wr
->send_buf
.hdr_len
;
1126 void ib_free_send_mad(struct ib_mad_send_buf
*send_buf
)
1128 struct ib_mad_agent_private
*mad_agent_priv
;
1129 struct ib_mad_send_wr_private
*mad_send_wr
;
1131 mad_agent_priv
= container_of(send_buf
->mad_agent
,
1132 struct ib_mad_agent_private
, agent
);
1133 mad_send_wr
= container_of(send_buf
, struct ib_mad_send_wr_private
,
1136 free_send_rmpp_list(mad_send_wr
);
1137 kfree(send_buf
->mad
);
1138 deref_mad_agent(mad_agent_priv
);
1140 EXPORT_SYMBOL(ib_free_send_mad
);
1142 int ib_send_mad(struct ib_mad_send_wr_private
*mad_send_wr
)
1144 struct ib_mad_qp_info
*qp_info
;
1145 struct list_head
*list
;
1146 struct ib_send_wr
*bad_send_wr
;
1147 struct ib_mad_agent
*mad_agent
;
1149 unsigned long flags
;
1152 /* Set WR ID to find mad_send_wr upon completion */
1153 qp_info
= mad_send_wr
->mad_agent_priv
->qp_info
;
1154 mad_send_wr
->send_wr
.wr
.wr_id
= (unsigned long)&mad_send_wr
->mad_list
;
1155 mad_send_wr
->mad_list
.mad_queue
= &qp_info
->send_queue
;
1157 mad_agent
= mad_send_wr
->send_buf
.mad_agent
;
1158 sge
= mad_send_wr
->sg_list
;
1159 sge
[0].addr
= ib_dma_map_single(mad_agent
->device
,
1160 mad_send_wr
->send_buf
.mad
,
1163 if (unlikely(ib_dma_mapping_error(mad_agent
->device
, sge
[0].addr
)))
1166 mad_send_wr
->header_mapping
= sge
[0].addr
;
1168 sge
[1].addr
= ib_dma_map_single(mad_agent
->device
,
1169 ib_get_payload(mad_send_wr
),
1172 if (unlikely(ib_dma_mapping_error(mad_agent
->device
, sge
[1].addr
))) {
1173 ib_dma_unmap_single(mad_agent
->device
,
1174 mad_send_wr
->header_mapping
,
1175 sge
[0].length
, DMA_TO_DEVICE
);
1178 mad_send_wr
->payload_mapping
= sge
[1].addr
;
1180 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
1181 if (qp_info
->send_queue
.count
< qp_info
->send_queue
.max_active
) {
1182 ret
= ib_post_send(mad_agent
->qp
, &mad_send_wr
->send_wr
.wr
,
1184 list
= &qp_info
->send_queue
.list
;
1187 list
= &qp_info
->overflow_list
;
1191 qp_info
->send_queue
.count
++;
1192 list_add_tail(&mad_send_wr
->mad_list
.list
, list
);
1194 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
1196 ib_dma_unmap_single(mad_agent
->device
,
1197 mad_send_wr
->header_mapping
,
1198 sge
[0].length
, DMA_TO_DEVICE
);
1199 ib_dma_unmap_single(mad_agent
->device
,
1200 mad_send_wr
->payload_mapping
,
1201 sge
[1].length
, DMA_TO_DEVICE
);
1207 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1208 * with the registered client
1210 int ib_post_send_mad(struct ib_mad_send_buf
*send_buf
,
1211 struct ib_mad_send_buf
**bad_send_buf
)
1213 struct ib_mad_agent_private
*mad_agent_priv
;
1214 struct ib_mad_send_buf
*next_send_buf
;
1215 struct ib_mad_send_wr_private
*mad_send_wr
;
1216 unsigned long flags
;
1219 /* Walk list of send WRs and post each on send list */
1220 for (; send_buf
; send_buf
= next_send_buf
) {
1222 mad_send_wr
= container_of(send_buf
,
1223 struct ib_mad_send_wr_private
,
1225 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
1227 if (!send_buf
->mad_agent
->send_handler
||
1228 (send_buf
->timeout_ms
&&
1229 !send_buf
->mad_agent
->recv_handler
)) {
1234 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr
*) send_buf
->mad
)->mgmt_class
)) {
1235 if (mad_agent_priv
->agent
.rmpp_version
) {
1242 * Save pointer to next work request to post in case the
1243 * current one completes, and the user modifies the work
1244 * request associated with the completion
1246 next_send_buf
= send_buf
->next
;
1247 mad_send_wr
->send_wr
.ah
= send_buf
->ah
;
1249 if (((struct ib_mad_hdr
*) send_buf
->mad
)->mgmt_class
==
1250 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
1251 ret
= handle_outgoing_dr_smp(mad_agent_priv
,
1253 if (ret
< 0) /* error */
1255 else if (ret
== 1) /* locally consumed */
1259 mad_send_wr
->tid
= ((struct ib_mad_hdr
*) send_buf
->mad
)->tid
;
1260 /* Timeout will be updated after send completes */
1261 mad_send_wr
->timeout
= msecs_to_jiffies(send_buf
->timeout_ms
);
1262 mad_send_wr
->max_retries
= send_buf
->retries
;
1263 mad_send_wr
->retries_left
= send_buf
->retries
;
1264 send_buf
->retries
= 0;
1265 /* Reference for work request to QP + response */
1266 mad_send_wr
->refcount
= 1 + (mad_send_wr
->timeout
> 0);
1267 mad_send_wr
->status
= IB_WC_SUCCESS
;
1269 /* Reference MAD agent until send completes */
1270 atomic_inc(&mad_agent_priv
->refcount
);
1271 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1272 list_add_tail(&mad_send_wr
->agent_list
,
1273 &mad_agent_priv
->send_list
);
1274 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1276 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)) {
1277 ret
= ib_send_rmpp_mad(mad_send_wr
);
1278 if (ret
>= 0 && ret
!= IB_RMPP_RESULT_CONSUMED
)
1279 ret
= ib_send_mad(mad_send_wr
);
1281 ret
= ib_send_mad(mad_send_wr
);
1283 /* Fail send request */
1284 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1285 list_del(&mad_send_wr
->agent_list
);
1286 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1287 atomic_dec(&mad_agent_priv
->refcount
);
1294 *bad_send_buf
= send_buf
;
1297 EXPORT_SYMBOL(ib_post_send_mad
);
1300 * ib_free_recv_mad - Returns data buffers used to receive
1301 * a MAD to the access layer
1303 void ib_free_recv_mad(struct ib_mad_recv_wc
*mad_recv_wc
)
1305 struct ib_mad_recv_buf
*mad_recv_buf
, *temp_recv_buf
;
1306 struct ib_mad_private_header
*mad_priv_hdr
;
1307 struct ib_mad_private
*priv
;
1308 struct list_head free_list
;
1310 INIT_LIST_HEAD(&free_list
);
1311 list_splice_init(&mad_recv_wc
->rmpp_list
, &free_list
);
1313 list_for_each_entry_safe(mad_recv_buf
, temp_recv_buf
,
1315 mad_recv_wc
= container_of(mad_recv_buf
, struct ib_mad_recv_wc
,
1317 mad_priv_hdr
= container_of(mad_recv_wc
,
1318 struct ib_mad_private_header
,
1320 priv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
1325 EXPORT_SYMBOL(ib_free_recv_mad
);
1327 struct ib_mad_agent
*ib_redirect_mad_qp(struct ib_qp
*qp
,
1329 ib_mad_send_handler send_handler
,
1330 ib_mad_recv_handler recv_handler
,
1333 return ERR_PTR(-EINVAL
); /* XXX: for now */
1335 EXPORT_SYMBOL(ib_redirect_mad_qp
);
1337 int ib_process_mad_wc(struct ib_mad_agent
*mad_agent
,
1340 dev_err(&mad_agent
->device
->dev
,
1341 "ib_process_mad_wc() not implemented yet\n");
1344 EXPORT_SYMBOL(ib_process_mad_wc
);
1346 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
1347 struct ib_mad_reg_req
*mad_reg_req
)
1351 for_each_set_bit(i
, mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
) {
1352 if ((*method
)->agent
[i
]) {
1353 pr_err("Method %d already in use\n", i
);
1360 static int allocate_method_table(struct ib_mad_mgmt_method_table
**method
)
1362 /* Allocate management method table */
1363 *method
= kzalloc(sizeof **method
, GFP_ATOMIC
);
1365 pr_err("No memory for ib_mad_mgmt_method_table\n");
1373 * Check to see if there are any methods still in use
1375 static int check_method_table(struct ib_mad_mgmt_method_table
*method
)
1379 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++)
1380 if (method
->agent
[i
])
1386 * Check to see if there are any method tables for this class still in use
1388 static int check_class_table(struct ib_mad_mgmt_class_table
*class)
1392 for (i
= 0; i
< MAX_MGMT_CLASS
; i
++)
1393 if (class->method_table
[i
])
1398 static int check_vendor_class(struct ib_mad_mgmt_vendor_class
*vendor_class
)
1402 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1403 if (vendor_class
->method_table
[i
])
1408 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class
*vendor_class
,
1413 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1414 /* Is there matching OUI for this vendor class ? */
1415 if (!memcmp(vendor_class
->oui
[i
], oui
, 3))
1421 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table
*vendor
)
1425 for (i
= 0; i
< MAX_MGMT_VENDOR_RANGE2
; i
++)
1426 if (vendor
->vendor_class
[i
])
1432 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table
*method
,
1433 struct ib_mad_agent_private
*agent
)
1437 /* Remove any methods for this mad agent */
1438 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++) {
1439 if (method
->agent
[i
] == agent
) {
1440 method
->agent
[i
] = NULL
;
1445 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1446 struct ib_mad_agent_private
*agent_priv
,
1449 struct ib_mad_port_private
*port_priv
;
1450 struct ib_mad_mgmt_class_table
**class;
1451 struct ib_mad_mgmt_method_table
**method
;
1454 port_priv
= agent_priv
->qp_info
->port_priv
;
1455 class = &port_priv
->version
[mad_reg_req
->mgmt_class_version
].class;
1457 /* Allocate management class table for "new" class version */
1458 *class = kzalloc(sizeof **class, GFP_ATOMIC
);
1460 dev_err(&agent_priv
->agent
.device
->dev
,
1461 "No memory for ib_mad_mgmt_class_table\n");
1466 /* Allocate method table for this management class */
1467 method
= &(*class)->method_table
[mgmt_class
];
1468 if ((ret
= allocate_method_table(method
)))
1471 method
= &(*class)->method_table
[mgmt_class
];
1473 /* Allocate method table for this management class */
1474 if ((ret
= allocate_method_table(method
)))
1479 /* Now, make sure methods are not already in use */
1480 if (method_in_use(method
, mad_reg_req
))
1483 /* Finally, add in methods being registered */
1484 for_each_set_bit(i
, mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
)
1485 (*method
)->agent
[i
] = agent_priv
;
1490 /* Remove any methods for this mad agent */
1491 remove_methods_mad_agent(*method
, agent_priv
);
1492 /* Now, check to see if there are any methods in use */
1493 if (!check_method_table(*method
)) {
1494 /* If not, release management method table */
1507 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1508 struct ib_mad_agent_private
*agent_priv
)
1510 struct ib_mad_port_private
*port_priv
;
1511 struct ib_mad_mgmt_vendor_class_table
**vendor_table
;
1512 struct ib_mad_mgmt_vendor_class_table
*vendor
= NULL
;
1513 struct ib_mad_mgmt_vendor_class
*vendor_class
= NULL
;
1514 struct ib_mad_mgmt_method_table
**method
;
1515 int i
, ret
= -ENOMEM
;
1518 /* "New" vendor (with OUI) class */
1519 vclass
= vendor_class_index(mad_reg_req
->mgmt_class
);
1520 port_priv
= agent_priv
->qp_info
->port_priv
;
1521 vendor_table
= &port_priv
->version
[
1522 mad_reg_req
->mgmt_class_version
].vendor
;
1523 if (!*vendor_table
) {
1524 /* Allocate mgmt vendor class table for "new" class version */
1525 vendor
= kzalloc(sizeof *vendor
, GFP_ATOMIC
);
1527 dev_err(&agent_priv
->agent
.device
->dev
,
1528 "No memory for ib_mad_mgmt_vendor_class_table\n");
1532 *vendor_table
= vendor
;
1534 if (!(*vendor_table
)->vendor_class
[vclass
]) {
1535 /* Allocate table for this management vendor class */
1536 vendor_class
= kzalloc(sizeof *vendor_class
, GFP_ATOMIC
);
1537 if (!vendor_class
) {
1538 dev_err(&agent_priv
->agent
.device
->dev
,
1539 "No memory for ib_mad_mgmt_vendor_class\n");
1543 (*vendor_table
)->vendor_class
[vclass
] = vendor_class
;
1545 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1546 /* Is there matching OUI for this vendor class ? */
1547 if (!memcmp((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1548 mad_reg_req
->oui
, 3)) {
1549 method
= &(*vendor_table
)->vendor_class
[
1550 vclass
]->method_table
[i
];
1555 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1556 /* OUI slot available ? */
1557 if (!is_vendor_oui((*vendor_table
)->vendor_class
[
1559 method
= &(*vendor_table
)->vendor_class
[
1560 vclass
]->method_table
[i
];
1562 /* Allocate method table for this OUI */
1563 if ((ret
= allocate_method_table(method
)))
1565 memcpy((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1566 mad_reg_req
->oui
, 3);
1570 dev_err(&agent_priv
->agent
.device
->dev
, "All OUI slots in use\n");
1574 /* Now, make sure methods are not already in use */
1575 if (method_in_use(method
, mad_reg_req
))
1578 /* Finally, add in methods being registered */
1579 for_each_set_bit(i
, mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
)
1580 (*method
)->agent
[i
] = agent_priv
;
1585 /* Remove any methods for this mad agent */
1586 remove_methods_mad_agent(*method
, agent_priv
);
1587 /* Now, check to see if there are any methods in use */
1588 if (!check_method_table(*method
)) {
1589 /* If not, release management method table */
1596 (*vendor_table
)->vendor_class
[vclass
] = NULL
;
1597 kfree(vendor_class
);
1601 *vendor_table
= NULL
;
1608 static void remove_mad_reg_req(struct ib_mad_agent_private
*agent_priv
)
1610 struct ib_mad_port_private
*port_priv
;
1611 struct ib_mad_mgmt_class_table
*class;
1612 struct ib_mad_mgmt_method_table
*method
;
1613 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1614 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1619 * Was MAD registration request supplied
1620 * with original registration ?
1622 if (!agent_priv
->reg_req
) {
1626 port_priv
= agent_priv
->qp_info
->port_priv
;
1627 mgmt_class
= convert_mgmt_class(agent_priv
->reg_req
->mgmt_class
);
1628 class = port_priv
->version
[
1629 agent_priv
->reg_req
->mgmt_class_version
].class;
1633 method
= class->method_table
[mgmt_class
];
1635 /* Remove any methods for this mad agent */
1636 remove_methods_mad_agent(method
, agent_priv
);
1637 /* Now, check to see if there are any methods still in use */
1638 if (!check_method_table(method
)) {
1639 /* If not, release management method table */
1641 class->method_table
[mgmt_class
] = NULL
;
1642 /* Any management classes left ? */
1643 if (!check_class_table(class)) {
1644 /* If not, release management class table */
1647 agent_priv
->reg_req
->
1648 mgmt_class_version
].class = NULL
;
1654 if (!is_vendor_class(mgmt_class
))
1657 /* normalize mgmt_class to vendor range 2 */
1658 mgmt_class
= vendor_class_index(agent_priv
->reg_req
->mgmt_class
);
1659 vendor
= port_priv
->version
[
1660 agent_priv
->reg_req
->mgmt_class_version
].vendor
;
1665 vendor_class
= vendor
->vendor_class
[mgmt_class
];
1667 index
= find_vendor_oui(vendor_class
, agent_priv
->reg_req
->oui
);
1670 method
= vendor_class
->method_table
[index
];
1672 /* Remove any methods for this mad agent */
1673 remove_methods_mad_agent(method
, agent_priv
);
1675 * Now, check to see if there are
1676 * any methods still in use
1678 if (!check_method_table(method
)) {
1679 /* If not, release management method table */
1681 vendor_class
->method_table
[index
] = NULL
;
1682 memset(vendor_class
->oui
[index
], 0, 3);
1683 /* Any OUIs left ? */
1684 if (!check_vendor_class(vendor_class
)) {
1685 /* If not, release vendor class table */
1686 kfree(vendor_class
);
1687 vendor
->vendor_class
[mgmt_class
] = NULL
;
1688 /* Any other vendor classes left ? */
1689 if (!check_vendor_table(vendor
)) {
1692 agent_priv
->reg_req
->
1693 mgmt_class_version
].
1705 static struct ib_mad_agent_private
*
1706 find_mad_agent(struct ib_mad_port_private
*port_priv
,
1707 const struct ib_mad_hdr
*mad_hdr
)
1709 struct ib_mad_agent_private
*mad_agent
= NULL
;
1710 unsigned long flags
;
1712 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
1713 if (ib_response_mad(mad_hdr
)) {
1715 struct ib_mad_agent_private
*entry
;
1718 * Routing is based on high 32 bits of transaction ID
1721 hi_tid
= be64_to_cpu(mad_hdr
->tid
) >> 32;
1722 list_for_each_entry(entry
, &port_priv
->agent_list
, agent_list
) {
1723 if (entry
->agent
.hi_tid
== hi_tid
) {
1729 struct ib_mad_mgmt_class_table
*class;
1730 struct ib_mad_mgmt_method_table
*method
;
1731 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1732 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1733 const struct ib_vendor_mad
*vendor_mad
;
1737 * Routing is based on version, class, and method
1738 * For "newer" vendor MADs, also based on OUI
1740 if (mad_hdr
->class_version
>= MAX_MGMT_VERSION
)
1742 if (!is_vendor_class(mad_hdr
->mgmt_class
)) {
1743 class = port_priv
->version
[
1744 mad_hdr
->class_version
].class;
1747 if (convert_mgmt_class(mad_hdr
->mgmt_class
) >=
1748 IB_MGMT_MAX_METHODS
)
1750 method
= class->method_table
[convert_mgmt_class(
1751 mad_hdr
->mgmt_class
)];
1753 mad_agent
= method
->agent
[mad_hdr
->method
&
1754 ~IB_MGMT_METHOD_RESP
];
1756 vendor
= port_priv
->version
[
1757 mad_hdr
->class_version
].vendor
;
1760 vendor_class
= vendor
->vendor_class
[vendor_class_index(
1761 mad_hdr
->mgmt_class
)];
1764 /* Find matching OUI */
1765 vendor_mad
= (const struct ib_vendor_mad
*)mad_hdr
;
1766 index
= find_vendor_oui(vendor_class
, vendor_mad
->oui
);
1769 method
= vendor_class
->method_table
[index
];
1771 mad_agent
= method
->agent
[mad_hdr
->method
&
1772 ~IB_MGMT_METHOD_RESP
];
1778 if (mad_agent
->agent
.recv_handler
)
1779 atomic_inc(&mad_agent
->refcount
);
1781 dev_notice(&port_priv
->device
->dev
,
1782 "No receive handler for client %p on port %d\n",
1783 &mad_agent
->agent
, port_priv
->port_num
);
1788 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
1793 static int validate_mad(const struct ib_mad_hdr
*mad_hdr
,
1794 const struct ib_mad_qp_info
*qp_info
,
1798 u32 qp_num
= qp_info
->qp
->qp_num
;
1800 /* Make sure MAD base version is understood */
1801 if (mad_hdr
->base_version
!= IB_MGMT_BASE_VERSION
&&
1802 (!opa
|| mad_hdr
->base_version
!= OPA_MGMT_BASE_VERSION
)) {
1803 pr_err("MAD received with unsupported base version %d %s\n",
1804 mad_hdr
->base_version
, opa
? "(opa)" : "");
1808 /* Filter SMI packets sent to other than QP0 */
1809 if ((mad_hdr
->mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
1810 (mad_hdr
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
1814 /* CM attributes other than ClassPortInfo only use Send method */
1815 if ((mad_hdr
->mgmt_class
== IB_MGMT_CLASS_CM
) &&
1816 (mad_hdr
->attr_id
!= IB_MGMT_CLASSPORTINFO_ATTR_ID
) &&
1817 (mad_hdr
->method
!= IB_MGMT_METHOD_SEND
))
1819 /* Filter GSI packets sent to QP0 */
1828 static int is_rmpp_data_mad(const struct ib_mad_agent_private
*mad_agent_priv
,
1829 const struct ib_mad_hdr
*mad_hdr
)
1831 struct ib_rmpp_mad
*rmpp_mad
;
1833 rmpp_mad
= (struct ib_rmpp_mad
*)mad_hdr
;
1834 return !mad_agent_priv
->agent
.rmpp_version
||
1835 !ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
) ||
1836 !(ib_get_rmpp_flags(&rmpp_mad
->rmpp_hdr
) &
1837 IB_MGMT_RMPP_FLAG_ACTIVE
) ||
1838 (rmpp_mad
->rmpp_hdr
.rmpp_type
== IB_MGMT_RMPP_TYPE_DATA
);
1841 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private
*wr
,
1842 const struct ib_mad_recv_wc
*rwc
)
1844 return ((struct ib_mad_hdr
*)(wr
->send_buf
.mad
))->mgmt_class
==
1845 rwc
->recv_buf
.mad
->mad_hdr
.mgmt_class
;
1848 static inline int rcv_has_same_gid(const struct ib_mad_agent_private
*mad_agent_priv
,
1849 const struct ib_mad_send_wr_private
*wr
,
1850 const struct ib_mad_recv_wc
*rwc
)
1852 struct ib_ah_attr attr
;
1853 u8 send_resp
, rcv_resp
;
1855 struct ib_device
*device
= mad_agent_priv
->agent
.device
;
1856 u8 port_num
= mad_agent_priv
->agent
.port_num
;
1859 send_resp
= ib_response_mad((struct ib_mad_hdr
*)wr
->send_buf
.mad
);
1860 rcv_resp
= ib_response_mad(&rwc
->recv_buf
.mad
->mad_hdr
);
1862 if (send_resp
== rcv_resp
)
1863 /* both requests, or both responses. GIDs different */
1866 if (ib_query_ah(wr
->send_buf
.ah
, &attr
))
1867 /* Assume not equal, to avoid false positives. */
1870 if (!!(attr
.ah_flags
& IB_AH_GRH
) !=
1871 !!(rwc
->wc
->wc_flags
& IB_WC_GRH
))
1872 /* one has GID, other does not. Assume different */
1875 if (!send_resp
&& rcv_resp
) {
1876 /* is request/response. */
1877 if (!(attr
.ah_flags
& IB_AH_GRH
)) {
1878 if (ib_get_cached_lmc(device
, port_num
, &lmc
))
1880 return (!lmc
|| !((attr
.src_path_bits
^
1881 rwc
->wc
->dlid_path_bits
) &
1884 if (ib_get_cached_gid(device
, port_num
,
1885 attr
.grh
.sgid_index
, &sgid
, NULL
))
1887 return !memcmp(sgid
.raw
, rwc
->recv_buf
.grh
->dgid
.raw
,
1892 if (!(attr
.ah_flags
& IB_AH_GRH
))
1893 return attr
.dlid
== rwc
->wc
->slid
;
1895 return !memcmp(attr
.grh
.dgid
.raw
, rwc
->recv_buf
.grh
->sgid
.raw
,
1899 static inline int is_direct(u8
class)
1901 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
);
1904 struct ib_mad_send_wr_private
*
1905 ib_find_send_mad(const struct ib_mad_agent_private
*mad_agent_priv
,
1906 const struct ib_mad_recv_wc
*wc
)
1908 struct ib_mad_send_wr_private
*wr
;
1909 const struct ib_mad_hdr
*mad_hdr
;
1911 mad_hdr
= &wc
->recv_buf
.mad
->mad_hdr
;
1913 list_for_each_entry(wr
, &mad_agent_priv
->wait_list
, agent_list
) {
1914 if ((wr
->tid
== mad_hdr
->tid
) &&
1915 rcv_has_same_class(wr
, wc
) &&
1917 * Don't check GID for direct routed MADs.
1918 * These might have permissive LIDs.
1920 (is_direct(mad_hdr
->mgmt_class
) ||
1921 rcv_has_same_gid(mad_agent_priv
, wr
, wc
)))
1922 return (wr
->status
== IB_WC_SUCCESS
) ? wr
: NULL
;
1926 * It's possible to receive the response before we've
1927 * been notified that the send has completed
1929 list_for_each_entry(wr
, &mad_agent_priv
->send_list
, agent_list
) {
1930 if (is_rmpp_data_mad(mad_agent_priv
, wr
->send_buf
.mad
) &&
1931 wr
->tid
== mad_hdr
->tid
&&
1933 rcv_has_same_class(wr
, wc
) &&
1935 * Don't check GID for direct routed MADs.
1936 * These might have permissive LIDs.
1938 (is_direct(mad_hdr
->mgmt_class
) ||
1939 rcv_has_same_gid(mad_agent_priv
, wr
, wc
)))
1940 /* Verify request has not been canceled */
1941 return (wr
->status
== IB_WC_SUCCESS
) ? wr
: NULL
;
1946 void ib_mark_mad_done(struct ib_mad_send_wr_private
*mad_send_wr
)
1948 mad_send_wr
->timeout
= 0;
1949 if (mad_send_wr
->refcount
== 1)
1950 list_move_tail(&mad_send_wr
->agent_list
,
1951 &mad_send_wr
->mad_agent_priv
->done_list
);
1954 static void ib_mad_complete_recv(struct ib_mad_agent_private
*mad_agent_priv
,
1955 struct ib_mad_recv_wc
*mad_recv_wc
)
1957 struct ib_mad_send_wr_private
*mad_send_wr
;
1958 struct ib_mad_send_wc mad_send_wc
;
1959 unsigned long flags
;
1961 INIT_LIST_HEAD(&mad_recv_wc
->rmpp_list
);
1962 list_add(&mad_recv_wc
->recv_buf
.list
, &mad_recv_wc
->rmpp_list
);
1963 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)) {
1964 mad_recv_wc
= ib_process_rmpp_recv_wc(mad_agent_priv
,
1967 deref_mad_agent(mad_agent_priv
);
1972 /* Complete corresponding request */
1973 if (ib_response_mad(&mad_recv_wc
->recv_buf
.mad
->mad_hdr
)) {
1974 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1975 mad_send_wr
= ib_find_send_mad(mad_agent_priv
, mad_recv_wc
);
1977 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1978 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)
1979 && ib_is_mad_class_rmpp(mad_recv_wc
->recv_buf
.mad
->mad_hdr
.mgmt_class
)
1980 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad
*)mad_recv_wc
->recv_buf
.mad
)->rmpp_hdr
)
1981 & IB_MGMT_RMPP_FLAG_ACTIVE
)) {
1982 /* user rmpp is in effect
1983 * and this is an active RMPP MAD
1985 mad_recv_wc
->wc
->wr_id
= 0;
1986 mad_agent_priv
->agent
.recv_handler(&mad_agent_priv
->agent
,
1988 atomic_dec(&mad_agent_priv
->refcount
);
1990 /* not user rmpp, revert to normal behavior and
1992 ib_free_recv_mad(mad_recv_wc
);
1993 deref_mad_agent(mad_agent_priv
);
1997 ib_mark_mad_done(mad_send_wr
);
1998 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2000 /* Defined behavior is to complete response before request */
2001 mad_recv_wc
->wc
->wr_id
= (unsigned long) &mad_send_wr
->send_buf
;
2002 mad_agent_priv
->agent
.recv_handler(&mad_agent_priv
->agent
,
2004 atomic_dec(&mad_agent_priv
->refcount
);
2006 mad_send_wc
.status
= IB_WC_SUCCESS
;
2007 mad_send_wc
.vendor_err
= 0;
2008 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2009 ib_mad_complete_send_wr(mad_send_wr
, &mad_send_wc
);
2012 mad_agent_priv
->agent
.recv_handler(&mad_agent_priv
->agent
,
2014 deref_mad_agent(mad_agent_priv
);
2018 static enum smi_action
handle_ib_smi(const struct ib_mad_port_private
*port_priv
,
2019 const struct ib_mad_qp_info
*qp_info
,
2020 const struct ib_wc
*wc
,
2022 struct ib_mad_private
*recv
,
2023 struct ib_mad_private
*response
)
2025 enum smi_forward_action retsmi
;
2026 struct ib_smp
*smp
= (struct ib_smp
*)recv
->mad
;
2028 if (smi_handle_dr_smp_recv(smp
,
2029 rdma_cap_ib_switch(port_priv
->device
),
2031 port_priv
->device
->phys_port_cnt
) ==
2033 return IB_SMI_DISCARD
;
2035 retsmi
= smi_check_forward_dr_smp(smp
);
2036 if (retsmi
== IB_SMI_LOCAL
)
2037 return IB_SMI_HANDLE
;
2039 if (retsmi
== IB_SMI_SEND
) { /* don't forward */
2040 if (smi_handle_dr_smp_send(smp
,
2041 rdma_cap_ib_switch(port_priv
->device
),
2042 port_num
) == IB_SMI_DISCARD
)
2043 return IB_SMI_DISCARD
;
2045 if (smi_check_local_smp(smp
, port_priv
->device
) == IB_SMI_DISCARD
)
2046 return IB_SMI_DISCARD
;
2047 } else if (rdma_cap_ib_switch(port_priv
->device
)) {
2048 /* forward case for switches */
2049 memcpy(response
, recv
, mad_priv_size(response
));
2050 response
->header
.recv_wc
.wc
= &response
->header
.wc
;
2051 response
->header
.recv_wc
.recv_buf
.mad
= (struct ib_mad
*)response
->mad
;
2052 response
->header
.recv_wc
.recv_buf
.grh
= &response
->grh
;
2054 agent_send_response((const struct ib_mad_hdr
*)response
->mad
,
2057 smi_get_fwd_port(smp
),
2058 qp_info
->qp
->qp_num
,
2062 return IB_SMI_DISCARD
;
2064 return IB_SMI_HANDLE
;
2067 static bool generate_unmatched_resp(const struct ib_mad_private
*recv
,
2068 struct ib_mad_private
*response
,
2069 size_t *resp_len
, bool opa
)
2071 const struct ib_mad_hdr
*recv_hdr
= (const struct ib_mad_hdr
*)recv
->mad
;
2072 struct ib_mad_hdr
*resp_hdr
= (struct ib_mad_hdr
*)response
->mad
;
2074 if (recv_hdr
->method
== IB_MGMT_METHOD_GET
||
2075 recv_hdr
->method
== IB_MGMT_METHOD_SET
) {
2076 memcpy(response
, recv
, mad_priv_size(response
));
2077 response
->header
.recv_wc
.wc
= &response
->header
.wc
;
2078 response
->header
.recv_wc
.recv_buf
.mad
= (struct ib_mad
*)response
->mad
;
2079 response
->header
.recv_wc
.recv_buf
.grh
= &response
->grh
;
2080 resp_hdr
->method
= IB_MGMT_METHOD_GET_RESP
;
2081 resp_hdr
->status
= cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB
);
2082 if (recv_hdr
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
2083 resp_hdr
->status
|= IB_SMP_DIRECTION
;
2085 if (opa
&& recv_hdr
->base_version
== OPA_MGMT_BASE_VERSION
) {
2086 if (recv_hdr
->mgmt_class
==
2087 IB_MGMT_CLASS_SUBN_LID_ROUTED
||
2088 recv_hdr
->mgmt_class
==
2089 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
2090 *resp_len
= opa_get_smp_header_size(
2091 (struct opa_smp
*)recv
->mad
);
2093 *resp_len
= sizeof(struct ib_mad_hdr
);
2102 static enum smi_action
2103 handle_opa_smi(struct ib_mad_port_private
*port_priv
,
2104 struct ib_mad_qp_info
*qp_info
,
2107 struct ib_mad_private
*recv
,
2108 struct ib_mad_private
*response
)
2110 enum smi_forward_action retsmi
;
2111 struct opa_smp
*smp
= (struct opa_smp
*)recv
->mad
;
2113 if (opa_smi_handle_dr_smp_recv(smp
,
2114 rdma_cap_ib_switch(port_priv
->device
),
2116 port_priv
->device
->phys_port_cnt
) ==
2118 return IB_SMI_DISCARD
;
2120 retsmi
= opa_smi_check_forward_dr_smp(smp
);
2121 if (retsmi
== IB_SMI_LOCAL
)
2122 return IB_SMI_HANDLE
;
2124 if (retsmi
== IB_SMI_SEND
) { /* don't forward */
2125 if (opa_smi_handle_dr_smp_send(smp
,
2126 rdma_cap_ib_switch(port_priv
->device
),
2127 port_num
) == IB_SMI_DISCARD
)
2128 return IB_SMI_DISCARD
;
2130 if (opa_smi_check_local_smp(smp
, port_priv
->device
) ==
2132 return IB_SMI_DISCARD
;
2134 } else if (rdma_cap_ib_switch(port_priv
->device
)) {
2135 /* forward case for switches */
2136 memcpy(response
, recv
, mad_priv_size(response
));
2137 response
->header
.recv_wc
.wc
= &response
->header
.wc
;
2138 response
->header
.recv_wc
.recv_buf
.opa_mad
=
2139 (struct opa_mad
*)response
->mad
;
2140 response
->header
.recv_wc
.recv_buf
.grh
= &response
->grh
;
2142 agent_send_response((const struct ib_mad_hdr
*)response
->mad
,
2145 opa_smi_get_fwd_port(smp
),
2146 qp_info
->qp
->qp_num
,
2147 recv
->header
.wc
.byte_len
,
2150 return IB_SMI_DISCARD
;
2153 return IB_SMI_HANDLE
;
2156 static enum smi_action
2157 handle_smi(struct ib_mad_port_private
*port_priv
,
2158 struct ib_mad_qp_info
*qp_info
,
2161 struct ib_mad_private
*recv
,
2162 struct ib_mad_private
*response
,
2165 struct ib_mad_hdr
*mad_hdr
= (struct ib_mad_hdr
*)recv
->mad
;
2167 if (opa
&& mad_hdr
->base_version
== OPA_MGMT_BASE_VERSION
&&
2168 mad_hdr
->class_version
== OPA_SMI_CLASS_VERSION
)
2169 return handle_opa_smi(port_priv
, qp_info
, wc
, port_num
, recv
,
2172 return handle_ib_smi(port_priv
, qp_info
, wc
, port_num
, recv
, response
);
2175 static void ib_mad_recv_done_handler(struct ib_mad_port_private
*port_priv
,
2178 struct ib_mad_qp_info
*qp_info
;
2179 struct ib_mad_private_header
*mad_priv_hdr
;
2180 struct ib_mad_private
*recv
, *response
= NULL
;
2181 struct ib_mad_list_head
*mad_list
;
2182 struct ib_mad_agent_private
*mad_agent
;
2184 int ret
= IB_MAD_RESULT_SUCCESS
;
2186 u16 resp_mad_pkey_index
= 0;
2189 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
2190 qp_info
= mad_list
->mad_queue
->qp_info
;
2191 dequeue_mad(mad_list
);
2193 opa
= rdma_cap_opa_mad(qp_info
->port_priv
->device
,
2194 qp_info
->port_priv
->port_num
);
2196 mad_priv_hdr
= container_of(mad_list
, struct ib_mad_private_header
,
2198 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
, header
);
2199 ib_dma_unmap_single(port_priv
->device
,
2200 recv
->header
.mapping
,
2201 mad_priv_dma_size(recv
),
2204 /* Setup MAD receive work completion from "normal" work completion */
2205 recv
->header
.wc
= *wc
;
2206 recv
->header
.recv_wc
.wc
= &recv
->header
.wc
;
2208 if (opa
&& ((struct ib_mad_hdr
*)(recv
->mad
))->base_version
== OPA_MGMT_BASE_VERSION
) {
2209 recv
->header
.recv_wc
.mad_len
= wc
->byte_len
- sizeof(struct ib_grh
);
2210 recv
->header
.recv_wc
.mad_seg_size
= sizeof(struct opa_mad
);
2212 recv
->header
.recv_wc
.mad_len
= sizeof(struct ib_mad
);
2213 recv
->header
.recv_wc
.mad_seg_size
= sizeof(struct ib_mad
);
2216 recv
->header
.recv_wc
.recv_buf
.mad
= (struct ib_mad
*)recv
->mad
;
2217 recv
->header
.recv_wc
.recv_buf
.grh
= &recv
->grh
;
2219 if (atomic_read(&qp_info
->snoop_count
))
2220 snoop_recv(qp_info
, &recv
->header
.recv_wc
, IB_MAD_SNOOP_RECVS
);
2223 if (!validate_mad((const struct ib_mad_hdr
*)recv
->mad
, qp_info
, opa
))
2226 mad_size
= recv
->mad_size
;
2227 response
= alloc_mad_private(mad_size
, GFP_KERNEL
);
2229 dev_err(&port_priv
->device
->dev
,
2230 "ib_mad_recv_done_handler no memory for response buffer\n");
2234 if (rdma_cap_ib_switch(port_priv
->device
))
2235 port_num
= wc
->port_num
;
2237 port_num
= port_priv
->port_num
;
2239 if (((struct ib_mad_hdr
*)recv
->mad
)->mgmt_class
==
2240 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
2241 if (handle_smi(port_priv
, qp_info
, wc
, port_num
, recv
,
2247 /* Give driver "right of first refusal" on incoming MAD */
2248 if (port_priv
->device
->process_mad
) {
2249 ret
= port_priv
->device
->process_mad(port_priv
->device
, 0,
2250 port_priv
->port_num
,
2252 (const struct ib_mad_hdr
*)recv
->mad
,
2254 (struct ib_mad_hdr
*)response
->mad
,
2255 &mad_size
, &resp_mad_pkey_index
);
2258 wc
->pkey_index
= resp_mad_pkey_index
;
2260 if (ret
& IB_MAD_RESULT_SUCCESS
) {
2261 if (ret
& IB_MAD_RESULT_CONSUMED
)
2263 if (ret
& IB_MAD_RESULT_REPLY
) {
2264 agent_send_response((const struct ib_mad_hdr
*)response
->mad
,
2268 qp_info
->qp
->qp_num
,
2275 mad_agent
= find_mad_agent(port_priv
, (const struct ib_mad_hdr
*)recv
->mad
);
2277 ib_mad_complete_recv(mad_agent
, &recv
->header
.recv_wc
);
2279 * recv is freed up in error cases in ib_mad_complete_recv
2280 * or via recv_handler in ib_mad_complete_recv()
2283 } else if ((ret
& IB_MAD_RESULT_SUCCESS
) &&
2284 generate_unmatched_resp(recv
, response
, &mad_size
, opa
)) {
2285 agent_send_response((const struct ib_mad_hdr
*)response
->mad
, &recv
->grh
, wc
,
2286 port_priv
->device
, port_num
,
2287 qp_info
->qp
->qp_num
, mad_size
, opa
);
2291 /* Post another receive request for this QP */
2293 ib_mad_post_receive_mads(qp_info
, response
);
2296 ib_mad_post_receive_mads(qp_info
, recv
);
2299 static void adjust_timeout(struct ib_mad_agent_private
*mad_agent_priv
)
2301 struct ib_mad_send_wr_private
*mad_send_wr
;
2302 unsigned long delay
;
2304 if (list_empty(&mad_agent_priv
->wait_list
)) {
2305 cancel_delayed_work(&mad_agent_priv
->timed_work
);
2307 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
2308 struct ib_mad_send_wr_private
,
2311 if (time_after(mad_agent_priv
->timeout
,
2312 mad_send_wr
->timeout
)) {
2313 mad_agent_priv
->timeout
= mad_send_wr
->timeout
;
2314 delay
= mad_send_wr
->timeout
- jiffies
;
2315 if ((long)delay
<= 0)
2317 mod_delayed_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
2318 &mad_agent_priv
->timed_work
, delay
);
2323 static void wait_for_response(struct ib_mad_send_wr_private
*mad_send_wr
)
2325 struct ib_mad_agent_private
*mad_agent_priv
;
2326 struct ib_mad_send_wr_private
*temp_mad_send_wr
;
2327 struct list_head
*list_item
;
2328 unsigned long delay
;
2330 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
2331 list_del(&mad_send_wr
->agent_list
);
2333 delay
= mad_send_wr
->timeout
;
2334 mad_send_wr
->timeout
+= jiffies
;
2337 list_for_each_prev(list_item
, &mad_agent_priv
->wait_list
) {
2338 temp_mad_send_wr
= list_entry(list_item
,
2339 struct ib_mad_send_wr_private
,
2341 if (time_after(mad_send_wr
->timeout
,
2342 temp_mad_send_wr
->timeout
))
2347 list_item
= &mad_agent_priv
->wait_list
;
2348 list_add(&mad_send_wr
->agent_list
, list_item
);
2350 /* Reschedule a work item if we have a shorter timeout */
2351 if (mad_agent_priv
->wait_list
.next
== &mad_send_wr
->agent_list
)
2352 mod_delayed_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
2353 &mad_agent_priv
->timed_work
, delay
);
2356 void ib_reset_mad_timeout(struct ib_mad_send_wr_private
*mad_send_wr
,
2359 mad_send_wr
->timeout
= msecs_to_jiffies(timeout_ms
);
2360 wait_for_response(mad_send_wr
);
2364 * Process a send work completion
2366 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private
*mad_send_wr
,
2367 struct ib_mad_send_wc
*mad_send_wc
)
2369 struct ib_mad_agent_private
*mad_agent_priv
;
2370 unsigned long flags
;
2373 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
2374 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2375 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)) {
2376 ret
= ib_process_rmpp_send_wc(mad_send_wr
, mad_send_wc
);
2377 if (ret
== IB_RMPP_RESULT_CONSUMED
)
2380 ret
= IB_RMPP_RESULT_UNHANDLED
;
2382 if (mad_send_wc
->status
!= IB_WC_SUCCESS
&&
2383 mad_send_wr
->status
== IB_WC_SUCCESS
) {
2384 mad_send_wr
->status
= mad_send_wc
->status
;
2385 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2388 if (--mad_send_wr
->refcount
> 0) {
2389 if (mad_send_wr
->refcount
== 1 && mad_send_wr
->timeout
&&
2390 mad_send_wr
->status
== IB_WC_SUCCESS
) {
2391 wait_for_response(mad_send_wr
);
2396 /* Remove send from MAD agent and notify client of completion */
2397 list_del(&mad_send_wr
->agent_list
);
2398 adjust_timeout(mad_agent_priv
);
2399 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2401 if (mad_send_wr
->status
!= IB_WC_SUCCESS
)
2402 mad_send_wc
->status
= mad_send_wr
->status
;
2403 if (ret
== IB_RMPP_RESULT_INTERNAL
)
2404 ib_rmpp_send_handler(mad_send_wc
);
2406 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2409 /* Release reference on agent taken when sending */
2410 deref_mad_agent(mad_agent_priv
);
2413 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2416 static void ib_mad_send_done_handler(struct ib_mad_port_private
*port_priv
,
2419 struct ib_mad_send_wr_private
*mad_send_wr
, *queued_send_wr
;
2420 struct ib_mad_list_head
*mad_list
;
2421 struct ib_mad_qp_info
*qp_info
;
2422 struct ib_mad_queue
*send_queue
;
2423 struct ib_send_wr
*bad_send_wr
;
2424 struct ib_mad_send_wc mad_send_wc
;
2425 unsigned long flags
;
2428 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
2429 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
2431 send_queue
= mad_list
->mad_queue
;
2432 qp_info
= send_queue
->qp_info
;
2435 ib_dma_unmap_single(mad_send_wr
->send_buf
.mad_agent
->device
,
2436 mad_send_wr
->header_mapping
,
2437 mad_send_wr
->sg_list
[0].length
, DMA_TO_DEVICE
);
2438 ib_dma_unmap_single(mad_send_wr
->send_buf
.mad_agent
->device
,
2439 mad_send_wr
->payload_mapping
,
2440 mad_send_wr
->sg_list
[1].length
, DMA_TO_DEVICE
);
2441 queued_send_wr
= NULL
;
2442 spin_lock_irqsave(&send_queue
->lock
, flags
);
2443 list_del(&mad_list
->list
);
2445 /* Move queued send to the send queue */
2446 if (send_queue
->count
-- > send_queue
->max_active
) {
2447 mad_list
= container_of(qp_info
->overflow_list
.next
,
2448 struct ib_mad_list_head
, list
);
2449 queued_send_wr
= container_of(mad_list
,
2450 struct ib_mad_send_wr_private
,
2452 list_move_tail(&mad_list
->list
, &send_queue
->list
);
2454 spin_unlock_irqrestore(&send_queue
->lock
, flags
);
2456 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2457 mad_send_wc
.status
= wc
->status
;
2458 mad_send_wc
.vendor_err
= wc
->vendor_err
;
2459 if (atomic_read(&qp_info
->snoop_count
))
2460 snoop_send(qp_info
, &mad_send_wr
->send_buf
, &mad_send_wc
,
2461 IB_MAD_SNOOP_SEND_COMPLETIONS
);
2462 ib_mad_complete_send_wr(mad_send_wr
, &mad_send_wc
);
2464 if (queued_send_wr
) {
2465 ret
= ib_post_send(qp_info
->qp
, &queued_send_wr
->send_wr
.wr
,
2468 dev_err(&port_priv
->device
->dev
,
2469 "ib_post_send failed: %d\n", ret
);
2470 mad_send_wr
= queued_send_wr
;
2471 wc
->status
= IB_WC_LOC_QP_OP_ERR
;
2477 static void mark_sends_for_retry(struct ib_mad_qp_info
*qp_info
)
2479 struct ib_mad_send_wr_private
*mad_send_wr
;
2480 struct ib_mad_list_head
*mad_list
;
2481 unsigned long flags
;
2483 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
2484 list_for_each_entry(mad_list
, &qp_info
->send_queue
.list
, list
) {
2485 mad_send_wr
= container_of(mad_list
,
2486 struct ib_mad_send_wr_private
,
2488 mad_send_wr
->retry
= 1;
2490 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
2493 static void mad_error_handler(struct ib_mad_port_private
*port_priv
,
2496 struct ib_mad_list_head
*mad_list
;
2497 struct ib_mad_qp_info
*qp_info
;
2498 struct ib_mad_send_wr_private
*mad_send_wr
;
2501 /* Determine if failure was a send or receive */
2502 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
2503 qp_info
= mad_list
->mad_queue
->qp_info
;
2504 if (mad_list
->mad_queue
== &qp_info
->recv_queue
)
2506 * Receive errors indicate that the QP has entered the error
2507 * state - error handling/shutdown code will cleanup
2512 * Send errors will transition the QP to SQE - move
2513 * QP to RTS and repost flushed work requests
2515 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
2517 if (wc
->status
== IB_WC_WR_FLUSH_ERR
) {
2518 if (mad_send_wr
->retry
) {
2520 struct ib_send_wr
*bad_send_wr
;
2522 mad_send_wr
->retry
= 0;
2523 ret
= ib_post_send(qp_info
->qp
, &mad_send_wr
->send_wr
.wr
,
2526 ib_mad_send_done_handler(port_priv
, wc
);
2528 ib_mad_send_done_handler(port_priv
, wc
);
2530 struct ib_qp_attr
*attr
;
2532 /* Transition QP to RTS and fail offending send */
2533 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2535 attr
->qp_state
= IB_QPS_RTS
;
2536 attr
->cur_qp_state
= IB_QPS_SQE
;
2537 ret
= ib_modify_qp(qp_info
->qp
, attr
,
2538 IB_QP_STATE
| IB_QP_CUR_STATE
);
2541 dev_err(&port_priv
->device
->dev
,
2542 "mad_error_handler - ib_modify_qp to RTS : %d\n",
2545 mark_sends_for_retry(qp_info
);
2547 ib_mad_send_done_handler(port_priv
, wc
);
2552 * IB MAD completion callback
2554 static void ib_mad_completion_handler(struct work_struct
*work
)
2556 struct ib_mad_port_private
*port_priv
;
2559 port_priv
= container_of(work
, struct ib_mad_port_private
, work
);
2560 ib_req_notify_cq(port_priv
->cq
, IB_CQ_NEXT_COMP
);
2562 while (ib_poll_cq(port_priv
->cq
, 1, &wc
) == 1) {
2563 if (wc
.status
== IB_WC_SUCCESS
) {
2564 switch (wc
.opcode
) {
2566 ib_mad_send_done_handler(port_priv
, &wc
);
2569 ib_mad_recv_done_handler(port_priv
, &wc
);
2576 mad_error_handler(port_priv
, &wc
);
2580 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
)
2582 unsigned long flags
;
2583 struct ib_mad_send_wr_private
*mad_send_wr
, *temp_mad_send_wr
;
2584 struct ib_mad_send_wc mad_send_wc
;
2585 struct list_head cancel_list
;
2587 INIT_LIST_HEAD(&cancel_list
);
2589 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2590 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
2591 &mad_agent_priv
->send_list
, agent_list
) {
2592 if (mad_send_wr
->status
== IB_WC_SUCCESS
) {
2593 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
2594 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2598 /* Empty wait list to prevent receives from finding a request */
2599 list_splice_init(&mad_agent_priv
->wait_list
, &cancel_list
);
2600 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2602 /* Report all cancelled requests */
2603 mad_send_wc
.status
= IB_WC_WR_FLUSH_ERR
;
2604 mad_send_wc
.vendor_err
= 0;
2606 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
2607 &cancel_list
, agent_list
) {
2608 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2609 list_del(&mad_send_wr
->agent_list
);
2610 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2612 atomic_dec(&mad_agent_priv
->refcount
);
2616 static struct ib_mad_send_wr_private
*
2617 find_send_wr(struct ib_mad_agent_private
*mad_agent_priv
,
2618 struct ib_mad_send_buf
*send_buf
)
2620 struct ib_mad_send_wr_private
*mad_send_wr
;
2622 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->wait_list
,
2624 if (&mad_send_wr
->send_buf
== send_buf
)
2628 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->send_list
,
2630 if (is_rmpp_data_mad(mad_agent_priv
,
2631 mad_send_wr
->send_buf
.mad
) &&
2632 &mad_send_wr
->send_buf
== send_buf
)
2638 int ib_modify_mad(struct ib_mad_agent
*mad_agent
,
2639 struct ib_mad_send_buf
*send_buf
, u32 timeout_ms
)
2641 struct ib_mad_agent_private
*mad_agent_priv
;
2642 struct ib_mad_send_wr_private
*mad_send_wr
;
2643 unsigned long flags
;
2646 mad_agent_priv
= container_of(mad_agent
, struct ib_mad_agent_private
,
2648 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2649 mad_send_wr
= find_send_wr(mad_agent_priv
, send_buf
);
2650 if (!mad_send_wr
|| mad_send_wr
->status
!= IB_WC_SUCCESS
) {
2651 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2655 active
= (!mad_send_wr
->timeout
|| mad_send_wr
->refcount
> 1);
2657 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
2658 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2661 mad_send_wr
->send_buf
.timeout_ms
= timeout_ms
;
2663 mad_send_wr
->timeout
= msecs_to_jiffies(timeout_ms
);
2665 ib_reset_mad_timeout(mad_send_wr
, timeout_ms
);
2667 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2670 EXPORT_SYMBOL(ib_modify_mad
);
2672 void ib_cancel_mad(struct ib_mad_agent
*mad_agent
,
2673 struct ib_mad_send_buf
*send_buf
)
2675 ib_modify_mad(mad_agent
, send_buf
, 0);
2677 EXPORT_SYMBOL(ib_cancel_mad
);
2679 static void local_completions(struct work_struct
*work
)
2681 struct ib_mad_agent_private
*mad_agent_priv
;
2682 struct ib_mad_local_private
*local
;
2683 struct ib_mad_agent_private
*recv_mad_agent
;
2684 unsigned long flags
;
2687 struct ib_mad_send_wc mad_send_wc
;
2691 container_of(work
, struct ib_mad_agent_private
, local_work
);
2693 opa
= rdma_cap_opa_mad(mad_agent_priv
->qp_info
->port_priv
->device
,
2694 mad_agent_priv
->qp_info
->port_priv
->port_num
);
2696 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2697 while (!list_empty(&mad_agent_priv
->local_list
)) {
2698 local
= list_entry(mad_agent_priv
->local_list
.next
,
2699 struct ib_mad_local_private
,
2701 list_del(&local
->completion_list
);
2702 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2704 if (local
->mad_priv
) {
2706 recv_mad_agent
= local
->recv_mad_agent
;
2707 if (!recv_mad_agent
) {
2708 dev_err(&mad_agent_priv
->agent
.device
->dev
,
2709 "No receive MAD agent for local completion\n");
2711 goto local_send_completion
;
2715 * Defined behavior is to complete response
2718 build_smp_wc(recv_mad_agent
->agent
.qp
,
2719 (unsigned long) local
->mad_send_wr
,
2720 be16_to_cpu(IB_LID_PERMISSIVE
),
2721 local
->mad_send_wr
->send_wr
.pkey_index
,
2722 recv_mad_agent
->agent
.port_num
, &wc
);
2724 local
->mad_priv
->header
.recv_wc
.wc
= &wc
;
2726 base_version
= ((struct ib_mad_hdr
*)(local
->mad_priv
->mad
))->base_version
;
2727 if (opa
&& base_version
== OPA_MGMT_BASE_VERSION
) {
2728 local
->mad_priv
->header
.recv_wc
.mad_len
= local
->return_wc_byte_len
;
2729 local
->mad_priv
->header
.recv_wc
.mad_seg_size
= sizeof(struct opa_mad
);
2731 local
->mad_priv
->header
.recv_wc
.mad_len
= sizeof(struct ib_mad
);
2732 local
->mad_priv
->header
.recv_wc
.mad_seg_size
= sizeof(struct ib_mad
);
2735 INIT_LIST_HEAD(&local
->mad_priv
->header
.recv_wc
.rmpp_list
);
2736 list_add(&local
->mad_priv
->header
.recv_wc
.recv_buf
.list
,
2737 &local
->mad_priv
->header
.recv_wc
.rmpp_list
);
2738 local
->mad_priv
->header
.recv_wc
.recv_buf
.grh
= NULL
;
2739 local
->mad_priv
->header
.recv_wc
.recv_buf
.mad
=
2740 (struct ib_mad
*)local
->mad_priv
->mad
;
2741 if (atomic_read(&recv_mad_agent
->qp_info
->snoop_count
))
2742 snoop_recv(recv_mad_agent
->qp_info
,
2743 &local
->mad_priv
->header
.recv_wc
,
2744 IB_MAD_SNOOP_RECVS
);
2745 recv_mad_agent
->agent
.recv_handler(
2746 &recv_mad_agent
->agent
,
2747 &local
->mad_priv
->header
.recv_wc
);
2748 spin_lock_irqsave(&recv_mad_agent
->lock
, flags
);
2749 atomic_dec(&recv_mad_agent
->refcount
);
2750 spin_unlock_irqrestore(&recv_mad_agent
->lock
, flags
);
2753 local_send_completion
:
2755 mad_send_wc
.status
= IB_WC_SUCCESS
;
2756 mad_send_wc
.vendor_err
= 0;
2757 mad_send_wc
.send_buf
= &local
->mad_send_wr
->send_buf
;
2758 if (atomic_read(&mad_agent_priv
->qp_info
->snoop_count
))
2759 snoop_send(mad_agent_priv
->qp_info
,
2760 &local
->mad_send_wr
->send_buf
,
2761 &mad_send_wc
, IB_MAD_SNOOP_SEND_COMPLETIONS
);
2762 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2765 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2766 atomic_dec(&mad_agent_priv
->refcount
);
2768 kfree(local
->mad_priv
);
2771 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2774 static int retry_send(struct ib_mad_send_wr_private
*mad_send_wr
)
2778 if (!mad_send_wr
->retries_left
)
2781 mad_send_wr
->retries_left
--;
2782 mad_send_wr
->send_buf
.retries
++;
2784 mad_send_wr
->timeout
= msecs_to_jiffies(mad_send_wr
->send_buf
.timeout_ms
);
2786 if (ib_mad_kernel_rmpp_agent(&mad_send_wr
->mad_agent_priv
->agent
)) {
2787 ret
= ib_retry_rmpp(mad_send_wr
);
2789 case IB_RMPP_RESULT_UNHANDLED
:
2790 ret
= ib_send_mad(mad_send_wr
);
2792 case IB_RMPP_RESULT_CONSUMED
:
2800 ret
= ib_send_mad(mad_send_wr
);
2803 mad_send_wr
->refcount
++;
2804 list_add_tail(&mad_send_wr
->agent_list
,
2805 &mad_send_wr
->mad_agent_priv
->send_list
);
2810 static void timeout_sends(struct work_struct
*work
)
2812 struct ib_mad_agent_private
*mad_agent_priv
;
2813 struct ib_mad_send_wr_private
*mad_send_wr
;
2814 struct ib_mad_send_wc mad_send_wc
;
2815 unsigned long flags
, delay
;
2817 mad_agent_priv
= container_of(work
, struct ib_mad_agent_private
,
2819 mad_send_wc
.vendor_err
= 0;
2821 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2822 while (!list_empty(&mad_agent_priv
->wait_list
)) {
2823 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
2824 struct ib_mad_send_wr_private
,
2827 if (time_after(mad_send_wr
->timeout
, jiffies
)) {
2828 delay
= mad_send_wr
->timeout
- jiffies
;
2829 if ((long)delay
<= 0)
2831 queue_delayed_work(mad_agent_priv
->qp_info
->
2833 &mad_agent_priv
->timed_work
, delay
);
2837 list_del(&mad_send_wr
->agent_list
);
2838 if (mad_send_wr
->status
== IB_WC_SUCCESS
&&
2839 !retry_send(mad_send_wr
))
2842 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2844 if (mad_send_wr
->status
== IB_WC_SUCCESS
)
2845 mad_send_wc
.status
= IB_WC_RESP_TIMEOUT_ERR
;
2847 mad_send_wc
.status
= mad_send_wr
->status
;
2848 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2849 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2852 atomic_dec(&mad_agent_priv
->refcount
);
2853 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2855 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2858 static void ib_mad_thread_completion_handler(struct ib_cq
*cq
, void *arg
)
2860 struct ib_mad_port_private
*port_priv
= cq
->cq_context
;
2861 unsigned long flags
;
2863 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
2864 if (!list_empty(&port_priv
->port_list
))
2865 queue_work(port_priv
->wq
, &port_priv
->work
);
2866 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2870 * Allocate receive MADs and post receive WRs for them
2872 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
2873 struct ib_mad_private
*mad
)
2875 unsigned long flags
;
2877 struct ib_mad_private
*mad_priv
;
2878 struct ib_sge sg_list
;
2879 struct ib_recv_wr recv_wr
, *bad_recv_wr
;
2880 struct ib_mad_queue
*recv_queue
= &qp_info
->recv_queue
;
2882 /* Initialize common scatter list fields */
2883 sg_list
.lkey
= qp_info
->port_priv
->pd
->local_dma_lkey
;
2885 /* Initialize common receive WR fields */
2886 recv_wr
.next
= NULL
;
2887 recv_wr
.sg_list
= &sg_list
;
2888 recv_wr
.num_sge
= 1;
2891 /* Allocate and map receive buffer */
2896 mad_priv
= alloc_mad_private(port_mad_size(qp_info
->port_priv
),
2899 dev_err(&qp_info
->port_priv
->device
->dev
,
2900 "No memory for receive buffer\n");
2905 sg_list
.length
= mad_priv_dma_size(mad_priv
);
2906 sg_list
.addr
= ib_dma_map_single(qp_info
->port_priv
->device
,
2908 mad_priv_dma_size(mad_priv
),
2910 if (unlikely(ib_dma_mapping_error(qp_info
->port_priv
->device
,
2915 mad_priv
->header
.mapping
= sg_list
.addr
;
2916 recv_wr
.wr_id
= (unsigned long)&mad_priv
->header
.mad_list
;
2917 mad_priv
->header
.mad_list
.mad_queue
= recv_queue
;
2919 /* Post receive WR */
2920 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2921 post
= (++recv_queue
->count
< recv_queue
->max_active
);
2922 list_add_tail(&mad_priv
->header
.mad_list
.list
, &recv_queue
->list
);
2923 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2924 ret
= ib_post_recv(qp_info
->qp
, &recv_wr
, &bad_recv_wr
);
2926 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2927 list_del(&mad_priv
->header
.mad_list
.list
);
2928 recv_queue
->count
--;
2929 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2930 ib_dma_unmap_single(qp_info
->port_priv
->device
,
2931 mad_priv
->header
.mapping
,
2932 mad_priv_dma_size(mad_priv
),
2935 dev_err(&qp_info
->port_priv
->device
->dev
,
2936 "ib_post_recv failed: %d\n", ret
);
2945 * Return all the posted receive MADs
2947 static void cleanup_recv_queue(struct ib_mad_qp_info
*qp_info
)
2949 struct ib_mad_private_header
*mad_priv_hdr
;
2950 struct ib_mad_private
*recv
;
2951 struct ib_mad_list_head
*mad_list
;
2956 while (!list_empty(&qp_info
->recv_queue
.list
)) {
2958 mad_list
= list_entry(qp_info
->recv_queue
.list
.next
,
2959 struct ib_mad_list_head
, list
);
2960 mad_priv_hdr
= container_of(mad_list
,
2961 struct ib_mad_private_header
,
2963 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
2966 /* Remove from posted receive MAD list */
2967 list_del(&mad_list
->list
);
2969 ib_dma_unmap_single(qp_info
->port_priv
->device
,
2970 recv
->header
.mapping
,
2971 mad_priv_dma_size(recv
),
2976 qp_info
->recv_queue
.count
= 0;
2982 static int ib_mad_port_start(struct ib_mad_port_private
*port_priv
)
2985 struct ib_qp_attr
*attr
;
2989 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2991 dev_err(&port_priv
->device
->dev
,
2992 "Couldn't kmalloc ib_qp_attr\n");
2996 ret
= ib_find_pkey(port_priv
->device
, port_priv
->port_num
,
2997 IB_DEFAULT_PKEY_FULL
, &pkey_index
);
3001 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
3002 qp
= port_priv
->qp_info
[i
].qp
;
3007 * PKey index for QP1 is irrelevant but
3008 * one is needed for the Reset to Init transition
3010 attr
->qp_state
= IB_QPS_INIT
;
3011 attr
->pkey_index
= pkey_index
;
3012 attr
->qkey
= (qp
->qp_num
== 0) ? 0 : IB_QP1_QKEY
;
3013 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
|
3014 IB_QP_PKEY_INDEX
| IB_QP_QKEY
);
3016 dev_err(&port_priv
->device
->dev
,
3017 "Couldn't change QP%d state to INIT: %d\n",
3022 attr
->qp_state
= IB_QPS_RTR
;
3023 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
);
3025 dev_err(&port_priv
->device
->dev
,
3026 "Couldn't change QP%d state to RTR: %d\n",
3031 attr
->qp_state
= IB_QPS_RTS
;
3032 attr
->sq_psn
= IB_MAD_SEND_Q_PSN
;
3033 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
3035 dev_err(&port_priv
->device
->dev
,
3036 "Couldn't change QP%d state to RTS: %d\n",
3042 ret
= ib_req_notify_cq(port_priv
->cq
, IB_CQ_NEXT_COMP
);
3044 dev_err(&port_priv
->device
->dev
,
3045 "Failed to request completion notification: %d\n",
3050 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
3051 if (!port_priv
->qp_info
[i
].qp
)
3054 ret
= ib_mad_post_receive_mads(&port_priv
->qp_info
[i
], NULL
);
3056 dev_err(&port_priv
->device
->dev
,
3057 "Couldn't post receive WRs\n");
3066 static void qp_event_handler(struct ib_event
*event
, void *qp_context
)
3068 struct ib_mad_qp_info
*qp_info
= qp_context
;
3070 /* It's worse than that! He's dead, Jim! */
3071 dev_err(&qp_info
->port_priv
->device
->dev
,
3072 "Fatal error (%d) on MAD QP (%d)\n",
3073 event
->event
, qp_info
->qp
->qp_num
);
3076 static void init_mad_queue(struct ib_mad_qp_info
*qp_info
,
3077 struct ib_mad_queue
*mad_queue
)
3079 mad_queue
->qp_info
= qp_info
;
3080 mad_queue
->count
= 0;
3081 spin_lock_init(&mad_queue
->lock
);
3082 INIT_LIST_HEAD(&mad_queue
->list
);
3085 static void init_mad_qp(struct ib_mad_port_private
*port_priv
,
3086 struct ib_mad_qp_info
*qp_info
)
3088 qp_info
->port_priv
= port_priv
;
3089 init_mad_queue(qp_info
, &qp_info
->send_queue
);
3090 init_mad_queue(qp_info
, &qp_info
->recv_queue
);
3091 INIT_LIST_HEAD(&qp_info
->overflow_list
);
3092 spin_lock_init(&qp_info
->snoop_lock
);
3093 qp_info
->snoop_table
= NULL
;
3094 qp_info
->snoop_table_size
= 0;
3095 atomic_set(&qp_info
->snoop_count
, 0);
3098 static int create_mad_qp(struct ib_mad_qp_info
*qp_info
,
3099 enum ib_qp_type qp_type
)
3101 struct ib_qp_init_attr qp_init_attr
;
3104 memset(&qp_init_attr
, 0, sizeof qp_init_attr
);
3105 qp_init_attr
.send_cq
= qp_info
->port_priv
->cq
;
3106 qp_init_attr
.recv_cq
= qp_info
->port_priv
->cq
;
3107 qp_init_attr
.sq_sig_type
= IB_SIGNAL_ALL_WR
;
3108 qp_init_attr
.cap
.max_send_wr
= mad_sendq_size
;
3109 qp_init_attr
.cap
.max_recv_wr
= mad_recvq_size
;
3110 qp_init_attr
.cap
.max_send_sge
= IB_MAD_SEND_REQ_MAX_SG
;
3111 qp_init_attr
.cap
.max_recv_sge
= IB_MAD_RECV_REQ_MAX_SG
;
3112 qp_init_attr
.qp_type
= qp_type
;
3113 qp_init_attr
.port_num
= qp_info
->port_priv
->port_num
;
3114 qp_init_attr
.qp_context
= qp_info
;
3115 qp_init_attr
.event_handler
= qp_event_handler
;
3116 qp_info
->qp
= ib_create_qp(qp_info
->port_priv
->pd
, &qp_init_attr
);
3117 if (IS_ERR(qp_info
->qp
)) {
3118 dev_err(&qp_info
->port_priv
->device
->dev
,
3119 "Couldn't create ib_mad QP%d\n",
3120 get_spl_qp_index(qp_type
));
3121 ret
= PTR_ERR(qp_info
->qp
);
3124 /* Use minimum queue sizes unless the CQ is resized */
3125 qp_info
->send_queue
.max_active
= mad_sendq_size
;
3126 qp_info
->recv_queue
.max_active
= mad_recvq_size
;
3133 static void destroy_mad_qp(struct ib_mad_qp_info
*qp_info
)
3138 ib_destroy_qp(qp_info
->qp
);
3139 kfree(qp_info
->snoop_table
);
3144 * Create the QP, PD, MR, and CQ if needed
3146 static int ib_mad_port_open(struct ib_device
*device
,
3150 struct ib_mad_port_private
*port_priv
;
3151 unsigned long flags
;
3152 char name
[sizeof "ib_mad123"];
3154 struct ib_cq_init_attr cq_attr
= {};
3156 if (WARN_ON(rdma_max_mad_size(device
, port_num
) < IB_MGMT_MAD_SIZE
))
3159 if (WARN_ON(rdma_cap_opa_mad(device
, port_num
) &&
3160 rdma_max_mad_size(device
, port_num
) < OPA_MGMT_MAD_SIZE
))
3163 /* Create new device info */
3164 port_priv
= kzalloc(sizeof *port_priv
, GFP_KERNEL
);
3166 dev_err(&device
->dev
, "No memory for ib_mad_port_private\n");
3170 port_priv
->device
= device
;
3171 port_priv
->port_num
= port_num
;
3172 spin_lock_init(&port_priv
->reg_lock
);
3173 INIT_LIST_HEAD(&port_priv
->agent_list
);
3174 init_mad_qp(port_priv
, &port_priv
->qp_info
[0]);
3175 init_mad_qp(port_priv
, &port_priv
->qp_info
[1]);
3177 cq_size
= mad_sendq_size
+ mad_recvq_size
;
3178 has_smi
= rdma_cap_ib_smi(device
, port_num
);
3182 cq_attr
.cqe
= cq_size
;
3183 port_priv
->cq
= ib_create_cq(port_priv
->device
,
3184 ib_mad_thread_completion_handler
,
3185 NULL
, port_priv
, &cq_attr
);
3186 if (IS_ERR(port_priv
->cq
)) {
3187 dev_err(&device
->dev
, "Couldn't create ib_mad CQ\n");
3188 ret
= PTR_ERR(port_priv
->cq
);
3192 port_priv
->pd
= ib_alloc_pd(device
);
3193 if (IS_ERR(port_priv
->pd
)) {
3194 dev_err(&device
->dev
, "Couldn't create ib_mad PD\n");
3195 ret
= PTR_ERR(port_priv
->pd
);
3200 ret
= create_mad_qp(&port_priv
->qp_info
[0], IB_QPT_SMI
);
3204 ret
= create_mad_qp(&port_priv
->qp_info
[1], IB_QPT_GSI
);
3208 snprintf(name
, sizeof name
, "ib_mad%d", port_num
);
3209 port_priv
->wq
= create_singlethread_workqueue(name
);
3210 if (!port_priv
->wq
) {
3214 INIT_WORK(&port_priv
->work
, ib_mad_completion_handler
);
3216 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
3217 list_add_tail(&port_priv
->port_list
, &ib_mad_port_list
);
3218 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3220 ret
= ib_mad_port_start(port_priv
);
3222 dev_err(&device
->dev
, "Couldn't start port\n");
3229 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
3230 list_del_init(&port_priv
->port_list
);
3231 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3233 destroy_workqueue(port_priv
->wq
);
3235 destroy_mad_qp(&port_priv
->qp_info
[1]);
3237 destroy_mad_qp(&port_priv
->qp_info
[0]);
3239 ib_dealloc_pd(port_priv
->pd
);
3241 ib_destroy_cq(port_priv
->cq
);
3242 cleanup_recv_queue(&port_priv
->qp_info
[1]);
3243 cleanup_recv_queue(&port_priv
->qp_info
[0]);
3252 * If there are no classes using the port, free the port
3253 * resources (CQ, MR, PD, QP) and remove the port's info structure
3255 static int ib_mad_port_close(struct ib_device
*device
, int port_num
)
3257 struct ib_mad_port_private
*port_priv
;
3258 unsigned long flags
;
3260 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
3261 port_priv
= __ib_get_mad_port(device
, port_num
);
3262 if (port_priv
== NULL
) {
3263 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3264 dev_err(&device
->dev
, "Port %d not found\n", port_num
);
3267 list_del_init(&port_priv
->port_list
);
3268 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3270 destroy_workqueue(port_priv
->wq
);
3271 destroy_mad_qp(&port_priv
->qp_info
[1]);
3272 destroy_mad_qp(&port_priv
->qp_info
[0]);
3273 ib_dealloc_pd(port_priv
->pd
);
3274 ib_destroy_cq(port_priv
->cq
);
3275 cleanup_recv_queue(&port_priv
->qp_info
[1]);
3276 cleanup_recv_queue(&port_priv
->qp_info
[0]);
3277 /* XXX: Handle deallocation of MAD registration tables */
3284 static void ib_mad_init_device(struct ib_device
*device
)
3288 start
= rdma_start_port(device
);
3290 for (i
= start
; i
<= rdma_end_port(device
); i
++) {
3291 if (!rdma_cap_ib_mad(device
, i
))
3294 if (ib_mad_port_open(device
, i
)) {
3295 dev_err(&device
->dev
, "Couldn't open port %d\n", i
);
3298 if (ib_agent_port_open(device
, i
)) {
3299 dev_err(&device
->dev
,
3300 "Couldn't open port %d for agents\n", i
);
3307 if (ib_mad_port_close(device
, i
))
3308 dev_err(&device
->dev
, "Couldn't close port %d\n", i
);
3311 while (--i
>= start
) {
3312 if (!rdma_cap_ib_mad(device
, i
))
3315 if (ib_agent_port_close(device
, i
))
3316 dev_err(&device
->dev
,
3317 "Couldn't close port %d for agents\n", i
);
3318 if (ib_mad_port_close(device
, i
))
3319 dev_err(&device
->dev
, "Couldn't close port %d\n", i
);
3323 static void ib_mad_remove_device(struct ib_device
*device
, void *client_data
)
3327 for (i
= rdma_start_port(device
); i
<= rdma_end_port(device
); i
++) {
3328 if (!rdma_cap_ib_mad(device
, i
))
3331 if (ib_agent_port_close(device
, i
))
3332 dev_err(&device
->dev
,
3333 "Couldn't close port %d for agents\n", i
);
3334 if (ib_mad_port_close(device
, i
))
3335 dev_err(&device
->dev
, "Couldn't close port %d\n", i
);
3339 static struct ib_client mad_client
= {
3341 .add
= ib_mad_init_device
,
3342 .remove
= ib_mad_remove_device
3345 static int __init
ib_mad_init_module(void)
3347 mad_recvq_size
= min(mad_recvq_size
, IB_MAD_QP_MAX_SIZE
);
3348 mad_recvq_size
= max(mad_recvq_size
, IB_MAD_QP_MIN_SIZE
);
3350 mad_sendq_size
= min(mad_sendq_size
, IB_MAD_QP_MAX_SIZE
);
3351 mad_sendq_size
= max(mad_sendq_size
, IB_MAD_QP_MIN_SIZE
);
3353 INIT_LIST_HEAD(&ib_mad_port_list
);
3355 if (ib_register_client(&mad_client
)) {
3356 pr_err("Couldn't register ib_mad client\n");
3363 static void __exit
ib_mad_cleanup_module(void)
3365 ib_unregister_client(&mad_client
);
3368 module_init(ib_mad_init_module
);
3369 module_exit(ib_mad_cleanup_module
);