Revert "[PATCH] paravirt: Add startup infrastructure for paravirtualization"
[pv_ops_mirror.git] / drivers / infiniband / hw / ipath / ipath_layer.c
blob05a1d2b01d9dae8be479e912bbc98a5c5e1a83c4
1 /*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
35 * These are the routines used by layered drivers, currently just the
36 * layered ethernet driver and verbs layer.
39 #include <linux/io.h>
40 #include <asm/byteorder.h>
42 #include "ipath_kernel.h"
43 #include "ipath_layer.h"
44 #include "ipath_verbs.h"
45 #include "ipath_common.h"
47 /* Acquire before ipath_devs_lock. */
48 static DEFINE_MUTEX(ipath_layer_mutex);
50 u16 ipath_layer_rcv_opcode;
52 static int (*layer_intr)(void *, u32);
53 static int (*layer_rcv)(void *, void *, struct sk_buff *);
54 static int (*layer_rcv_lid)(void *, void *);
56 static void *(*layer_add_one)(int, struct ipath_devdata *);
57 static void (*layer_remove_one)(void *);
59 int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
61 int ret = -ENODEV;
63 if (dd->ipath_layer.l_arg && layer_intr)
64 ret = layer_intr(dd->ipath_layer.l_arg, arg);
66 return ret;
69 int ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
71 int ret;
73 mutex_lock(&ipath_layer_mutex);
75 ret = __ipath_layer_intr(dd, arg);
77 mutex_unlock(&ipath_layer_mutex);
79 return ret;
82 int __ipath_layer_rcv(struct ipath_devdata *dd, void *hdr,
83 struct sk_buff *skb)
85 int ret = -ENODEV;
87 if (dd->ipath_layer.l_arg && layer_rcv)
88 ret = layer_rcv(dd->ipath_layer.l_arg, hdr, skb);
90 return ret;
93 int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
95 int ret = -ENODEV;
97 if (dd->ipath_layer.l_arg && layer_rcv_lid)
98 ret = layer_rcv_lid(dd->ipath_layer.l_arg, hdr);
100 return ret;
103 void ipath_layer_lid_changed(struct ipath_devdata *dd)
105 mutex_lock(&ipath_layer_mutex);
107 if (dd->ipath_layer.l_arg && layer_intr)
108 layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
110 mutex_unlock(&ipath_layer_mutex);
113 void ipath_layer_add(struct ipath_devdata *dd)
115 mutex_lock(&ipath_layer_mutex);
117 if (layer_add_one)
118 dd->ipath_layer.l_arg =
119 layer_add_one(dd->ipath_unit, dd);
121 mutex_unlock(&ipath_layer_mutex);
124 void ipath_layer_remove(struct ipath_devdata *dd)
126 mutex_lock(&ipath_layer_mutex);
128 if (dd->ipath_layer.l_arg && layer_remove_one) {
129 layer_remove_one(dd->ipath_layer.l_arg);
130 dd->ipath_layer.l_arg = NULL;
133 mutex_unlock(&ipath_layer_mutex);
136 int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
137 void (*l_remove)(void *),
138 int (*l_intr)(void *, u32),
139 int (*l_rcv)(void *, void *, struct sk_buff *),
140 u16 l_rcv_opcode,
141 int (*l_rcv_lid)(void *, void *))
143 struct ipath_devdata *dd, *tmp;
144 unsigned long flags;
146 mutex_lock(&ipath_layer_mutex);
148 layer_add_one = l_add;
149 layer_remove_one = l_remove;
150 layer_intr = l_intr;
151 layer_rcv = l_rcv;
152 layer_rcv_lid = l_rcv_lid;
153 ipath_layer_rcv_opcode = l_rcv_opcode;
155 spin_lock_irqsave(&ipath_devs_lock, flags);
157 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
158 if (!(dd->ipath_flags & IPATH_INITTED))
159 continue;
161 if (dd->ipath_layer.l_arg)
162 continue;
164 spin_unlock_irqrestore(&ipath_devs_lock, flags);
165 dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
166 spin_lock_irqsave(&ipath_devs_lock, flags);
169 spin_unlock_irqrestore(&ipath_devs_lock, flags);
170 mutex_unlock(&ipath_layer_mutex);
172 return 0;
175 EXPORT_SYMBOL_GPL(ipath_layer_register);
177 void ipath_layer_unregister(void)
179 struct ipath_devdata *dd, *tmp;
180 unsigned long flags;
182 mutex_lock(&ipath_layer_mutex);
183 spin_lock_irqsave(&ipath_devs_lock, flags);
185 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
186 if (dd->ipath_layer.l_arg && layer_remove_one) {
187 spin_unlock_irqrestore(&ipath_devs_lock, flags);
188 layer_remove_one(dd->ipath_layer.l_arg);
189 spin_lock_irqsave(&ipath_devs_lock, flags);
190 dd->ipath_layer.l_arg = NULL;
194 spin_unlock_irqrestore(&ipath_devs_lock, flags);
196 layer_add_one = NULL;
197 layer_remove_one = NULL;
198 layer_intr = NULL;
199 layer_rcv = NULL;
200 layer_rcv_lid = NULL;
202 mutex_unlock(&ipath_layer_mutex);
205 EXPORT_SYMBOL_GPL(ipath_layer_unregister);
207 int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
209 int ret;
210 u32 intval = 0;
212 mutex_lock(&ipath_layer_mutex);
214 if (!dd->ipath_layer.l_arg) {
215 ret = -EINVAL;
216 goto bail;
219 ret = ipath_setrcvhdrsize(dd, IPATH_HEADER_QUEUE_WORDS);
221 if (ret < 0)
222 goto bail;
224 *pktmax = dd->ipath_ibmaxlen;
226 if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
227 intval |= IPATH_LAYER_INT_IF_UP;
228 if (dd->ipath_lid)
229 intval |= IPATH_LAYER_INT_LID;
230 if (dd->ipath_mlid)
231 intval |= IPATH_LAYER_INT_BCAST;
233 * do this on open, in case low level is already up and
234 * just layered driver was reloaded, etc.
236 if (intval)
237 layer_intr(dd->ipath_layer.l_arg, intval);
239 ret = 0;
240 bail:
241 mutex_unlock(&ipath_layer_mutex);
243 return ret;
246 EXPORT_SYMBOL_GPL(ipath_layer_open);
248 u16 ipath_layer_get_lid(struct ipath_devdata *dd)
250 return dd->ipath_lid;
253 EXPORT_SYMBOL_GPL(ipath_layer_get_lid);
256 * ipath_layer_get_mac - get the MAC address
257 * @dd: the infinipath device
258 * @mac: the MAC is put here
260 * This is the EUID-64 OUI octets (top 3), then
261 * skip the next 2 (which should both be zero or 0xff).
262 * The returned MAC is in network order
263 * mac points to at least 6 bytes of buffer
264 * We assume that by the time the LID is set, that the GUID is as valid
265 * as it's ever going to be, rather than adding yet another status bit.
268 int ipath_layer_get_mac(struct ipath_devdata *dd, u8 * mac)
270 u8 *guid;
272 guid = (u8 *) &dd->ipath_guid;
274 mac[0] = guid[0];
275 mac[1] = guid[1];
276 mac[2] = guid[2];
277 mac[3] = guid[5];
278 mac[4] = guid[6];
279 mac[5] = guid[7];
280 if ((guid[3] || guid[4]) && !(guid[3] == 0xff && guid[4] == 0xff))
281 ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
282 "%x %x\n", guid[3], guid[4]);
283 return 0;
286 EXPORT_SYMBOL_GPL(ipath_layer_get_mac);
288 u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
290 return dd->ipath_mlid;
293 EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
295 int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
297 int ret = 0;
298 u32 __iomem *piobuf;
299 u32 plen, *uhdr;
300 size_t count;
301 __be16 vlsllnh;
303 if (!(dd->ipath_flags & IPATH_RCVHDRSZ_SET)) {
304 ipath_dbg("send while not open\n");
305 ret = -EINVAL;
306 } else
307 if ((dd->ipath_flags & (IPATH_LINKUNK | IPATH_LINKDOWN)) ||
308 dd->ipath_lid == 0) {
310 * lid check is for when sma hasn't yet configured
312 ret = -ENETDOWN;
313 ipath_cdbg(VERBOSE, "send while not ready, "
314 "mylid=%u, flags=0x%x\n",
315 dd->ipath_lid, dd->ipath_flags);
318 vlsllnh = *((__be16 *) hdr);
319 if (vlsllnh != htons(IPATH_LRH_BTH)) {
320 ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
321 "not sending\n", be16_to_cpu(vlsllnh),
322 IPATH_LRH_BTH);
323 ret = -EINVAL;
325 if (ret)
326 goto done;
328 /* Get a PIO buffer to use. */
329 piobuf = ipath_getpiobuf(dd, NULL);
330 if (piobuf == NULL) {
331 ret = -EBUSY;
332 goto done;
335 plen = (sizeof(*hdr) >> 2); /* actual length */
336 ipath_cdbg(EPKT, "0x%x+1w pio %p\n", plen, piobuf);
338 writeq(plen+1, piobuf); /* len (+1 for pad) to pbc, no flags */
339 ipath_flush_wc();
340 piobuf += 2;
341 uhdr = (u32 *)hdr;
342 count = plen-1; /* amount we can copy before trigger word */
343 __iowrite32_copy(piobuf, uhdr, count);
344 ipath_flush_wc();
345 __raw_writel(uhdr[count], piobuf + count);
346 ipath_flush_wc(); /* ensure it's sent, now */
348 ipath_stats.sps_ether_spkts++; /* ether packet sent */
350 done:
351 return ret;
354 EXPORT_SYMBOL_GPL(ipath_layer_send_hdr);
356 int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
358 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
360 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
361 dd->ipath_sendctrl);
362 return 0;
365 EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);