2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * $Id: agent.c 1389 2004-12-27 22:56:47Z roland $
39 #include <linux/dma-mapping.h>
46 #include "agent_priv.h"
50 spinlock_t ib_agent_port_list_lock
;
51 static LIST_HEAD(ib_agent_port_list
);
54 * Caller must hold ib_agent_port_list_lock
56 static inline struct ib_agent_port_private
*
57 __ib_get_agent_port(struct ib_device
*device
, int port_num
,
58 struct ib_mad_agent
*mad_agent
)
60 struct ib_agent_port_private
*entry
;
62 BUG_ON(!(!!device
^ !!mad_agent
)); /* Exactly one MUST be (!NULL) */
65 list_for_each_entry(entry
, &ib_agent_port_list
, port_list
) {
66 if (entry
->smp_agent
->device
== device
&&
67 entry
->port_num
== port_num
)
71 list_for_each_entry(entry
, &ib_agent_port_list
, port_list
) {
72 if ((entry
->smp_agent
== mad_agent
) ||
73 (entry
->perf_mgmt_agent
== mad_agent
))
80 static inline struct ib_agent_port_private
*
81 ib_get_agent_port(struct ib_device
*device
, int port_num
,
82 struct ib_mad_agent
*mad_agent
)
84 struct ib_agent_port_private
*entry
;
87 spin_lock_irqsave(&ib_agent_port_list_lock
, flags
);
88 entry
= __ib_get_agent_port(device
, port_num
, mad_agent
);
89 spin_unlock_irqrestore(&ib_agent_port_list_lock
, flags
);
94 int smi_check_local_dr_smp(struct ib_smp
*smp
,
95 struct ib_device
*device
,
98 struct ib_agent_port_private
*port_priv
;
100 if (smp
->mgmt_class
!= IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
102 port_priv
= ib_get_agent_port(device
, port_num
, NULL
);
104 printk(KERN_DEBUG SPFX
"smi_check_local_dr_smp %s port %d "
106 device
->name
, port_num
);
110 return smi_check_local_smp(port_priv
->smp_agent
, smp
);
113 static int agent_mad_send(struct ib_mad_agent
*mad_agent
,
114 struct ib_agent_port_private
*port_priv
,
115 struct ib_mad_private
*mad_priv
,
119 struct ib_agent_send_wr
*agent_send_wr
;
120 struct ib_sge gather_list
;
121 struct ib_send_wr send_wr
;
122 struct ib_send_wr
*bad_send_wr
;
123 struct ib_ah_attr ah_attr
;
127 agent_send_wr
= kmalloc(sizeof(*agent_send_wr
), GFP_KERNEL
);
130 agent_send_wr
->mad
= mad_priv
;
132 gather_list
.addr
= dma_map_single(mad_agent
->device
->dma_device
,
134 sizeof(mad_priv
->mad
),
136 gather_list
.length
= sizeof(mad_priv
->mad
);
137 gather_list
.lkey
= mad_agent
->mr
->lkey
;
140 send_wr
.opcode
= IB_WR_SEND
;
141 send_wr
.sg_list
= &gather_list
;
143 send_wr
.wr
.ud
.remote_qpn
= wc
->src_qp
; /* DQPN */
144 send_wr
.wr
.ud
.timeout_ms
= 0;
145 send_wr
.send_flags
= IB_SEND_SIGNALED
| IB_SEND_SOLICITED
;
147 ah_attr
.dlid
= wc
->slid
;
148 ah_attr
.port_num
= mad_agent
->port_num
;
149 ah_attr
.src_path_bits
= wc
->dlid_path_bits
;
151 ah_attr
.static_rate
= 0;
152 ah_attr
.ah_flags
= 0; /* No GRH */
153 if (mad_priv
->mad
.mad
.mad_hdr
.mgmt_class
== IB_MGMT_CLASS_PERF_MGMT
) {
154 if (wc
->wc_flags
& IB_WC_GRH
) {
155 ah_attr
.ah_flags
= IB_AH_GRH
;
156 /* Should sgid be looked up ? */
157 ah_attr
.grh
.sgid_index
= 0;
158 ah_attr
.grh
.hop_limit
= grh
->hop_limit
;
159 ah_attr
.grh
.flow_label
= be32_to_cpu(
160 grh
->version_tclass_flow
) & 0xfffff;
161 ah_attr
.grh
.traffic_class
= (be32_to_cpu(
162 grh
->version_tclass_flow
) >> 20) & 0xff;
163 memcpy(ah_attr
.grh
.dgid
.raw
,
165 sizeof(ah_attr
.grh
.dgid
));
169 agent_send_wr
->ah
= ib_create_ah(mad_agent
->qp
->pd
, &ah_attr
);
170 if (IS_ERR(agent_send_wr
->ah
)) {
171 printk(KERN_ERR SPFX
"No memory for address handle\n");
172 kfree(agent_send_wr
);
176 send_wr
.wr
.ud
.ah
= agent_send_wr
->ah
;
177 if (mad_priv
->mad
.mad
.mad_hdr
.mgmt_class
== IB_MGMT_CLASS_PERF_MGMT
) {
178 send_wr
.wr
.ud
.pkey_index
= wc
->pkey_index
;
179 send_wr
.wr
.ud
.remote_qkey
= IB_QP1_QKEY
;
180 } else { /* for SMPs */
181 send_wr
.wr
.ud
.pkey_index
= 0;
182 send_wr
.wr
.ud
.remote_qkey
= 0;
184 send_wr
.wr
.ud
.mad_hdr
= &mad_priv
->mad
.mad
.mad_hdr
;
185 send_wr
.wr_id
= (unsigned long)agent_send_wr
;
187 pci_unmap_addr_set(agent_send_wr
, mapping
, gather_list
.addr
);
190 spin_lock_irqsave(&port_priv
->send_list_lock
, flags
);
191 if (ib_post_send_mad(mad_agent
, &send_wr
, &bad_send_wr
)) {
192 spin_unlock_irqrestore(&port_priv
->send_list_lock
, flags
);
193 dma_unmap_single(mad_agent
->device
->dma_device
,
194 pci_unmap_addr(agent_send_wr
, mapping
),
195 sizeof(mad_priv
->mad
),
197 ib_destroy_ah(agent_send_wr
->ah
);
198 kfree(agent_send_wr
);
200 list_add_tail(&agent_send_wr
->send_list
,
201 &port_priv
->send_posted_list
);
202 spin_unlock_irqrestore(&port_priv
->send_list_lock
, flags
);
210 int agent_send(struct ib_mad_private
*mad
,
213 struct ib_device
*device
,
216 struct ib_agent_port_private
*port_priv
;
217 struct ib_mad_agent
*mad_agent
;
219 port_priv
= ib_get_agent_port(device
, port_num
, NULL
);
221 printk(KERN_DEBUG SPFX
"agent_send %s port %d not open\n",
222 device
->name
, port_num
);
226 /* Get mad agent based on mgmt_class in MAD */
227 switch (mad
->mad
.mad
.mad_hdr
.mgmt_class
) {
228 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
:
229 case IB_MGMT_CLASS_SUBN_LID_ROUTED
:
230 mad_agent
= port_priv
->smp_agent
;
232 case IB_MGMT_CLASS_PERF_MGMT
:
233 mad_agent
= port_priv
->perf_mgmt_agent
;
239 return agent_mad_send(mad_agent
, port_priv
, mad
, grh
, wc
);
242 static void agent_send_handler(struct ib_mad_agent
*mad_agent
,
243 struct ib_mad_send_wc
*mad_send_wc
)
245 struct ib_agent_port_private
*port_priv
;
246 struct ib_agent_send_wr
*agent_send_wr
;
249 /* Find matching MAD agent */
250 port_priv
= ib_get_agent_port(NULL
, 0, mad_agent
);
252 printk(KERN_ERR SPFX
"agent_send_handler: no matching MAD "
253 "agent %p\n", mad_agent
);
257 agent_send_wr
= (struct ib_agent_send_wr
*)(unsigned long)mad_send_wc
->wr_id
;
258 spin_lock_irqsave(&port_priv
->send_list_lock
, flags
);
259 /* Remove completed send from posted send MAD list */
260 list_del(&agent_send_wr
->send_list
);
261 spin_unlock_irqrestore(&port_priv
->send_list_lock
, flags
);
263 dma_unmap_single(mad_agent
->device
->dma_device
,
264 pci_unmap_addr(agent_send_wr
, mapping
),
265 sizeof(agent_send_wr
->mad
->mad
),
268 ib_destroy_ah(agent_send_wr
->ah
);
270 /* Release allocated memory */
271 kmem_cache_free(ib_mad_cache
, agent_send_wr
->mad
);
272 kfree(agent_send_wr
);
275 int ib_agent_port_open(struct ib_device
*device
, int port_num
)
278 struct ib_agent_port_private
*port_priv
;
281 /* First, check if port already open for SMI */
282 port_priv
= ib_get_agent_port(device
, port_num
, NULL
);
284 printk(KERN_DEBUG SPFX
"%s port %d already open\n",
285 device
->name
, port_num
);
289 /* Create new device info */
290 port_priv
= kmalloc(sizeof *port_priv
, GFP_KERNEL
);
292 printk(KERN_ERR SPFX
"No memory for ib_agent_port_private\n");
297 memset(port_priv
, 0, sizeof *port_priv
);
298 port_priv
->port_num
= port_num
;
299 spin_lock_init(&port_priv
->send_list_lock
);
300 INIT_LIST_HEAD(&port_priv
->send_posted_list
);
302 /* Obtain send only MAD agent for SM class (SMI QP) */
303 port_priv
->smp_agent
= ib_register_mad_agent(device
, port_num
,
309 if (IS_ERR(port_priv
->smp_agent
)) {
310 ret
= PTR_ERR(port_priv
->smp_agent
);
314 /* Obtain send only MAD agent for PerfMgmt class (GSI QP) */
315 port_priv
->perf_mgmt_agent
= ib_register_mad_agent(device
, port_num
,
320 if (IS_ERR(port_priv
->perf_mgmt_agent
)) {
321 ret
= PTR_ERR(port_priv
->perf_mgmt_agent
);
325 spin_lock_irqsave(&ib_agent_port_list_lock
, flags
);
326 list_add_tail(&port_priv
->port_list
, &ib_agent_port_list
);
327 spin_unlock_irqrestore(&ib_agent_port_list_lock
, flags
);
332 ib_unregister_mad_agent(port_priv
->smp_agent
);
339 int ib_agent_port_close(struct ib_device
*device
, int port_num
)
341 struct ib_agent_port_private
*port_priv
;
344 spin_lock_irqsave(&ib_agent_port_list_lock
, flags
);
345 port_priv
= __ib_get_agent_port(device
, port_num
, NULL
);
346 if (port_priv
== NULL
) {
347 spin_unlock_irqrestore(&ib_agent_port_list_lock
, flags
);
348 printk(KERN_ERR SPFX
"Port %d not found\n", port_num
);
351 list_del(&port_priv
->port_list
);
352 spin_unlock_irqrestore(&ib_agent_port_list_lock
, flags
);
354 ib_unregister_mad_agent(port_priv
->perf_mgmt_agent
);
355 ib_unregister_mad_agent(port_priv
->smp_agent
);