[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / drivers / infiniband / core / agent.c
blob23d1957c4b29ef254d197375bedc568d3bff990a
1 /*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
36 * $Id: agent.c 1389 2004-12-27 22:56:47Z roland $
39 #include <linux/dma-mapping.h>
41 #include <asm/bug.h>
43 #include <ib_smi.h>
45 #include "smi.h"
46 #include "agent_priv.h"
47 #include "mad_priv.h"
48 #include "agent.h"
50 spinlock_t ib_agent_port_list_lock;
51 static LIST_HEAD(ib_agent_port_list);
54 * Caller must hold ib_agent_port_list_lock
56 static inline struct ib_agent_port_private *
57 __ib_get_agent_port(struct ib_device *device, int port_num,
58 struct ib_mad_agent *mad_agent)
60 struct ib_agent_port_private *entry;
62 BUG_ON(!(!!device ^ !!mad_agent)); /* Exactly one MUST be (!NULL) */
64 if (device) {
65 list_for_each_entry(entry, &ib_agent_port_list, port_list) {
66 if (entry->smp_agent->device == device &&
67 entry->port_num == port_num)
68 return entry;
70 } else {
71 list_for_each_entry(entry, &ib_agent_port_list, port_list) {
72 if ((entry->smp_agent == mad_agent) ||
73 (entry->perf_mgmt_agent == mad_agent))
74 return entry;
77 return NULL;
80 static inline struct ib_agent_port_private *
81 ib_get_agent_port(struct ib_device *device, int port_num,
82 struct ib_mad_agent *mad_agent)
84 struct ib_agent_port_private *entry;
85 unsigned long flags;
87 spin_lock_irqsave(&ib_agent_port_list_lock, flags);
88 entry = __ib_get_agent_port(device, port_num, mad_agent);
89 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
91 return entry;
94 int smi_check_local_dr_smp(struct ib_smp *smp,
95 struct ib_device *device,
96 int port_num)
98 struct ib_agent_port_private *port_priv;
100 if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
101 return 1;
102 port_priv = ib_get_agent_port(device, port_num, NULL);
103 if (!port_priv) {
104 printk(KERN_DEBUG SPFX "smi_check_local_dr_smp %s port %d "
105 "not open\n",
106 device->name, port_num);
107 return 1;
110 return smi_check_local_smp(port_priv->smp_agent, smp);
113 static int agent_mad_send(struct ib_mad_agent *mad_agent,
114 struct ib_agent_port_private *port_priv,
115 struct ib_mad_private *mad_priv,
116 struct ib_grh *grh,
117 struct ib_wc *wc)
119 struct ib_agent_send_wr *agent_send_wr;
120 struct ib_sge gather_list;
121 struct ib_send_wr send_wr;
122 struct ib_send_wr *bad_send_wr;
123 struct ib_ah_attr ah_attr;
124 unsigned long flags;
125 int ret = 1;
127 agent_send_wr = kmalloc(sizeof(*agent_send_wr), GFP_KERNEL);
128 if (!agent_send_wr)
129 goto out;
130 agent_send_wr->mad = mad_priv;
132 gather_list.addr = dma_map_single(mad_agent->device->dma_device,
133 &mad_priv->mad,
134 sizeof(mad_priv->mad),
135 DMA_TO_DEVICE);
136 gather_list.length = sizeof(mad_priv->mad);
137 gather_list.lkey = (*port_priv->mr).lkey;
139 send_wr.next = NULL;
140 send_wr.opcode = IB_WR_SEND;
141 send_wr.sg_list = &gather_list;
142 send_wr.num_sge = 1;
143 send_wr.wr.ud.remote_qpn = wc->src_qp; /* DQPN */
144 send_wr.wr.ud.timeout_ms = 0;
145 send_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
147 ah_attr.dlid = wc->slid;
148 ah_attr.port_num = mad_agent->port_num;
149 ah_attr.src_path_bits = wc->dlid_path_bits;
150 ah_attr.sl = wc->sl;
151 ah_attr.static_rate = 0;
152 ah_attr.ah_flags = 0; /* No GRH */
153 if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
154 if (wc->wc_flags & IB_WC_GRH) {
155 ah_attr.ah_flags = IB_AH_GRH;
156 /* Should sgid be looked up ? */
157 ah_attr.grh.sgid_index = 0;
158 ah_attr.grh.hop_limit = grh->hop_limit;
159 ah_attr.grh.flow_label = be32_to_cpup(
160 &grh->version_tclass_flow) & 0xfffff;
161 ah_attr.grh.traffic_class = (be32_to_cpup(
162 &grh->version_tclass_flow) >> 20) & 0xff;
163 memcpy(ah_attr.grh.dgid.raw,
164 grh->sgid.raw,
165 sizeof(ah_attr.grh.dgid));
169 agent_send_wr->ah = ib_create_ah(mad_agent->qp->pd, &ah_attr);
170 if (IS_ERR(agent_send_wr->ah)) {
171 printk(KERN_ERR SPFX "No memory for address handle\n");
172 kfree(agent_send_wr);
173 goto out;
176 send_wr.wr.ud.ah = agent_send_wr->ah;
177 if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
178 send_wr.wr.ud.pkey_index = wc->pkey_index;
179 send_wr.wr.ud.remote_qkey = IB_QP1_QKEY;
180 } else { /* for SMPs */
181 send_wr.wr.ud.pkey_index = 0;
182 send_wr.wr.ud.remote_qkey = 0;
184 send_wr.wr.ud.mad_hdr = &mad_priv->mad.mad.mad_hdr;
185 send_wr.wr_id = (unsigned long)agent_send_wr;
187 pci_unmap_addr_set(agent_send_wr, mapping, gather_list.addr);
189 /* Send */
190 spin_lock_irqsave(&port_priv->send_list_lock, flags);
191 if (ib_post_send_mad(mad_agent, &send_wr, &bad_send_wr)) {
192 spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
193 dma_unmap_single(mad_agent->device->dma_device,
194 pci_unmap_addr(agent_send_wr, mapping),
195 sizeof(mad_priv->mad),
196 DMA_TO_DEVICE);
197 ib_destroy_ah(agent_send_wr->ah);
198 kfree(agent_send_wr);
199 } else {
200 list_add_tail(&agent_send_wr->send_list,
201 &port_priv->send_posted_list);
202 spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
203 ret = 0;
206 out:
207 return ret;
210 int agent_send(struct ib_mad_private *mad,
211 struct ib_grh *grh,
212 struct ib_wc *wc,
213 struct ib_device *device,
214 int port_num)
216 struct ib_agent_port_private *port_priv;
217 struct ib_mad_agent *mad_agent;
219 port_priv = ib_get_agent_port(device, port_num, NULL);
220 if (!port_priv) {
221 printk(KERN_DEBUG SPFX "agent_send %s port %d not open\n",
222 device->name, port_num);
223 return 1;
226 /* Get mad agent based on mgmt_class in MAD */
227 switch (mad->mad.mad.mad_hdr.mgmt_class) {
228 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
229 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
230 mad_agent = port_priv->smp_agent;
231 break;
232 case IB_MGMT_CLASS_PERF_MGMT:
233 mad_agent = port_priv->perf_mgmt_agent;
234 break;
235 default:
236 return 1;
239 return agent_mad_send(mad_agent, port_priv, mad, grh, wc);
242 static void agent_send_handler(struct ib_mad_agent *mad_agent,
243 struct ib_mad_send_wc *mad_send_wc)
245 struct ib_agent_port_private *port_priv;
246 struct ib_agent_send_wr *agent_send_wr;
247 unsigned long flags;
249 /* Find matching MAD agent */
250 port_priv = ib_get_agent_port(NULL, 0, mad_agent);
251 if (!port_priv) {
252 printk(KERN_ERR SPFX "agent_send_handler: no matching MAD "
253 "agent %p\n", mad_agent);
254 return;
257 agent_send_wr = (struct ib_agent_send_wr *)(unsigned long)mad_send_wc->wr_id;
258 spin_lock_irqsave(&port_priv->send_list_lock, flags);
259 /* Remove completed send from posted send MAD list */
260 list_del(&agent_send_wr->send_list);
261 spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
263 dma_unmap_single(mad_agent->device->dma_device,
264 pci_unmap_addr(agent_send_wr, mapping),
265 sizeof(agent_send_wr->mad->mad),
266 DMA_TO_DEVICE);
268 ib_destroy_ah(agent_send_wr->ah);
270 /* Release allocated memory */
271 kmem_cache_free(ib_mad_cache, agent_send_wr->mad);
272 kfree(agent_send_wr);
275 int ib_agent_port_open(struct ib_device *device, int port_num)
277 int ret;
278 struct ib_agent_port_private *port_priv;
279 unsigned long flags;
281 /* First, check if port already open for SMI */
282 port_priv = ib_get_agent_port(device, port_num, NULL);
283 if (port_priv) {
284 printk(KERN_DEBUG SPFX "%s port %d already open\n",
285 device->name, port_num);
286 return 0;
289 /* Create new device info */
290 port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL);
291 if (!port_priv) {
292 printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n");
293 ret = -ENOMEM;
294 goto error1;
297 memset(port_priv, 0, sizeof *port_priv);
298 port_priv->port_num = port_num;
299 spin_lock_init(&port_priv->send_list_lock);
300 INIT_LIST_HEAD(&port_priv->send_posted_list);
302 /* Obtain send only MAD agent for SM class (SMI QP) */
303 port_priv->smp_agent = ib_register_mad_agent(device, port_num,
304 IB_QPT_SMI,
305 NULL, 0,
306 &agent_send_handler,
307 NULL, NULL);
309 if (IS_ERR(port_priv->smp_agent)) {
310 ret = PTR_ERR(port_priv->smp_agent);
311 goto error2;
314 /* Obtain send only MAD agent for PerfMgmt class (GSI QP) */
315 port_priv->perf_mgmt_agent = ib_register_mad_agent(device, port_num,
316 IB_QPT_GSI,
317 NULL, 0,
318 &agent_send_handler,
319 NULL, NULL);
320 if (IS_ERR(port_priv->perf_mgmt_agent)) {
321 ret = PTR_ERR(port_priv->perf_mgmt_agent);
322 goto error3;
325 port_priv->mr = ib_get_dma_mr(port_priv->smp_agent->qp->pd,
326 IB_ACCESS_LOCAL_WRITE);
327 if (IS_ERR(port_priv->mr)) {
328 printk(KERN_ERR SPFX "Couldn't get DMA MR\n");
329 ret = PTR_ERR(port_priv->mr);
330 goto error4;
333 spin_lock_irqsave(&ib_agent_port_list_lock, flags);
334 list_add_tail(&port_priv->port_list, &ib_agent_port_list);
335 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
337 return 0;
339 error4:
340 ib_unregister_mad_agent(port_priv->perf_mgmt_agent);
341 error3:
342 ib_unregister_mad_agent(port_priv->smp_agent);
343 error2:
344 kfree(port_priv);
345 error1:
346 return ret;
349 int ib_agent_port_close(struct ib_device *device, int port_num)
351 struct ib_agent_port_private *port_priv;
352 unsigned long flags;
354 spin_lock_irqsave(&ib_agent_port_list_lock, flags);
355 port_priv = __ib_get_agent_port(device, port_num, NULL);
356 if (port_priv == NULL) {
357 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
358 printk(KERN_ERR SPFX "Port %d not found\n", port_num);
359 return -ENODEV;
361 list_del(&port_priv->port_list);
362 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
364 ib_dereg_mr(port_priv->mr);
366 ib_unregister_mad_agent(port_priv->perf_mgmt_agent);
367 ib_unregister_mad_agent(port_priv->smp_agent);
368 kfree(port_priv);
370 return 0;