Merge remote-tracking branch 'moduleh/module.h-split'
[linux-2.6/next.git] / drivers / infiniband / hw / ipath / ipath_mad.c
blob43f2d0424d4fb60e95c17a8d96cdd69ba6767f07
1 /*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
34 #include <rdma/ib_smi.h>
35 #include <rdma/ib_pma.h>
37 #include "ipath_kernel.h"
38 #include "ipath_verbs.h"
39 #include "ipath_common.h"
41 #define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
42 #define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
43 #define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
44 #define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C)
46 static int reply(struct ib_smp *smp)
49 * The verbs framework will handle the directed/LID route
50 * packet changes.
52 smp->method = IB_MGMT_METHOD_GET_RESP;
53 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
54 smp->status |= IB_SMP_DIRECTION;
55 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
58 static int recv_subn_get_nodedescription(struct ib_smp *smp,
59 struct ib_device *ibdev)
61 if (smp->attr_mod)
62 smp->status |= IB_SMP_INVALID_FIELD;
64 memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
66 return reply(smp);
69 struct nodeinfo {
70 u8 base_version;
71 u8 class_version;
72 u8 node_type;
73 u8 num_ports;
74 __be64 sys_guid;
75 __be64 node_guid;
76 __be64 port_guid;
77 __be16 partition_cap;
78 __be16 device_id;
79 __be32 revision;
80 u8 local_port_num;
81 u8 vendor_id[3];
82 } __attribute__ ((packed));
84 static int recv_subn_get_nodeinfo(struct ib_smp *smp,
85 struct ib_device *ibdev, u8 port)
87 struct nodeinfo *nip = (struct nodeinfo *)&smp->data;
88 struct ipath_devdata *dd = to_idev(ibdev)->dd;
89 u32 vendor, majrev, minrev;
91 /* GUID 0 is illegal */
92 if (smp->attr_mod || (dd->ipath_guid == 0))
93 smp->status |= IB_SMP_INVALID_FIELD;
95 nip->base_version = 1;
96 nip->class_version = 1;
97 nip->node_type = 1; /* channel adapter */
99 * XXX The num_ports value will need a layer function to get
100 * the value if we ever have more than one IB port on a chip.
101 * We will also need to get the GUID for the port.
103 nip->num_ports = ibdev->phys_port_cnt;
104 /* This is already in network order */
105 nip->sys_guid = to_idev(ibdev)->sys_image_guid;
106 nip->node_guid = dd->ipath_guid;
107 nip->port_guid = dd->ipath_guid;
108 nip->partition_cap = cpu_to_be16(ipath_get_npkeys(dd));
109 nip->device_id = cpu_to_be16(dd->ipath_deviceid);
110 majrev = dd->ipath_majrev;
111 minrev = dd->ipath_minrev;
112 nip->revision = cpu_to_be32((majrev << 16) | minrev);
113 nip->local_port_num = port;
114 vendor = dd->ipath_vendorid;
115 nip->vendor_id[0] = IPATH_SRC_OUI_1;
116 nip->vendor_id[1] = IPATH_SRC_OUI_2;
117 nip->vendor_id[2] = IPATH_SRC_OUI_3;
119 return reply(smp);
122 static int recv_subn_get_guidinfo(struct ib_smp *smp,
123 struct ib_device *ibdev)
125 u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
126 __be64 *p = (__be64 *) smp->data;
128 /* 32 blocks of 8 64-bit GUIDs per block */
130 memset(smp->data, 0, sizeof(smp->data));
133 * We only support one GUID for now. If this changes, the
134 * portinfo.guid_cap field needs to be updated too.
136 if (startgx == 0) {
137 __be64 g = to_idev(ibdev)->dd->ipath_guid;
138 if (g == 0)
139 /* GUID 0 is illegal */
140 smp->status |= IB_SMP_INVALID_FIELD;
141 else
142 /* The first is a copy of the read-only HW GUID. */
143 *p = g;
144 } else
145 smp->status |= IB_SMP_INVALID_FIELD;
147 return reply(smp);
150 static void set_link_width_enabled(struct ipath_devdata *dd, u32 w)
152 (void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, w);
155 static void set_link_speed_enabled(struct ipath_devdata *dd, u32 s)
157 (void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, s);
160 static int get_overrunthreshold(struct ipath_devdata *dd)
162 return (dd->ipath_ibcctrl >>
163 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
164 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
168 * set_overrunthreshold - set the overrun threshold
169 * @dd: the infinipath device
170 * @n: the new threshold
172 * Note that this will only take effect when the link state changes.
174 static int set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
176 unsigned v;
178 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
179 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
180 if (v != n) {
181 dd->ipath_ibcctrl &=
182 ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
183 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
184 dd->ipath_ibcctrl |=
185 (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
186 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
187 dd->ipath_ibcctrl);
189 return 0;
192 static int get_phyerrthreshold(struct ipath_devdata *dd)
194 return (dd->ipath_ibcctrl >>
195 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
196 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
200 * set_phyerrthreshold - set the physical error threshold
201 * @dd: the infinipath device
202 * @n: the new threshold
204 * Note that this will only take effect when the link state changes.
206 static int set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
208 unsigned v;
210 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
211 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
212 if (v != n) {
213 dd->ipath_ibcctrl &=
214 ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
215 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
216 dd->ipath_ibcctrl |=
217 (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
218 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
219 dd->ipath_ibcctrl);
221 return 0;
225 * get_linkdowndefaultstate - get the default linkdown state
226 * @dd: the infinipath device
228 * Returns zero if the default is POLL, 1 if the default is SLEEP.
230 static int get_linkdowndefaultstate(struct ipath_devdata *dd)
232 return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
235 static int recv_subn_get_portinfo(struct ib_smp *smp,
236 struct ib_device *ibdev, u8 port)
238 struct ipath_ibdev *dev;
239 struct ipath_devdata *dd;
240 struct ib_port_info *pip = (struct ib_port_info *)smp->data;
241 u16 lid;
242 u8 ibcstat;
243 u8 mtu;
244 int ret;
246 if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt) {
247 smp->status |= IB_SMP_INVALID_FIELD;
248 ret = reply(smp);
249 goto bail;
252 dev = to_idev(ibdev);
253 dd = dev->dd;
255 /* Clear all fields. Only set the non-zero fields. */
256 memset(smp->data, 0, sizeof(smp->data));
258 /* Only return the mkey if the protection field allows it. */
259 if (smp->method == IB_MGMT_METHOD_SET || dev->mkey == smp->mkey ||
260 dev->mkeyprot == 0)
261 pip->mkey = dev->mkey;
262 pip->gid_prefix = dev->gid_prefix;
263 lid = dd->ipath_lid;
264 pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
265 pip->sm_lid = cpu_to_be16(dev->sm_lid);
266 pip->cap_mask = cpu_to_be32(dev->port_cap_flags);
267 /* pip->diag_code; */
268 pip->mkey_lease_period = cpu_to_be16(dev->mkey_lease_period);
269 pip->local_port_num = port;
270 pip->link_width_enabled = dd->ipath_link_width_enabled;
271 pip->link_width_supported = dd->ipath_link_width_supported;
272 pip->link_width_active = dd->ipath_link_width_active;
273 pip->linkspeed_portstate = dd->ipath_link_speed_supported << 4;
274 ibcstat = dd->ipath_lastibcstat;
275 /* map LinkState to IB portinfo values. */
276 pip->linkspeed_portstate |= ipath_ib_linkstate(dd, ibcstat) + 1;
278 pip->portphysstate_linkdown =
279 (ipath_cvt_physportstate[ibcstat & dd->ibcs_lts_mask] << 4) |
280 (get_linkdowndefaultstate(dd) ? 1 : 2);
281 pip->mkeyprot_resv_lmc = (dev->mkeyprot << 6) | dd->ipath_lmc;
282 pip->linkspeedactive_enabled = (dd->ipath_link_speed_active << 4) |
283 dd->ipath_link_speed_enabled;
284 switch (dd->ipath_ibmtu) {
285 case 4096:
286 mtu = IB_MTU_4096;
287 break;
288 case 2048:
289 mtu = IB_MTU_2048;
290 break;
291 case 1024:
292 mtu = IB_MTU_1024;
293 break;
294 case 512:
295 mtu = IB_MTU_512;
296 break;
297 case 256:
298 mtu = IB_MTU_256;
299 break;
300 default: /* oops, something is wrong */
301 mtu = IB_MTU_2048;
302 break;
304 pip->neighbormtu_mastersmsl = (mtu << 4) | dev->sm_sl;
305 pip->vlcap_inittype = 0x10; /* VLCap = VL0, InitType = 0 */
306 pip->vl_high_limit = dev->vl_high_limit;
307 /* pip->vl_arb_high_cap; // only one VL */
308 /* pip->vl_arb_low_cap; // only one VL */
309 /* InitTypeReply = 0 */
310 /* our mtu cap depends on whether 4K MTU enabled or not */
311 pip->inittypereply_mtucap = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
312 /* HCAs ignore VLStallCount and HOQLife */
313 /* pip->vlstallcnt_hoqlife; */
314 pip->operationalvl_pei_peo_fpi_fpo = 0x10; /* OVLs = 1 */
315 pip->mkey_violations = cpu_to_be16(dev->mkey_violations);
316 /* P_KeyViolations are counted by hardware. */
317 pip->pkey_violations =
318 cpu_to_be16((ipath_get_cr_errpkey(dd) -
319 dev->z_pkey_violations) & 0xFFFF);
320 pip->qkey_violations = cpu_to_be16(dev->qkey_violations);
321 /* Only the hardware GUID is supported for now */
322 pip->guid_cap = 1;
323 pip->clientrereg_resv_subnetto = dev->subnet_timeout;
324 /* 32.768 usec. response time (guessing) */
325 pip->resv_resptimevalue = 3;
326 pip->localphyerrors_overrunerrors =
327 (get_phyerrthreshold(dd) << 4) |
328 get_overrunthreshold(dd);
329 /* pip->max_credit_hint; */
330 if (dev->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
331 u32 v;
333 v = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LINKLATENCY);
334 pip->link_roundtrip_latency[0] = v >> 16;
335 pip->link_roundtrip_latency[1] = v >> 8;
336 pip->link_roundtrip_latency[2] = v;
339 ret = reply(smp);
341 bail:
342 return ret;
346 * get_pkeys - return the PKEY table for port 0
347 * @dd: the infinipath device
348 * @pkeys: the pkey table is placed here
350 static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
352 /* always a kernel port, no locking needed */
353 struct ipath_portdata *pd = dd->ipath_pd[0];
355 memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
357 return 0;
360 static int recv_subn_get_pkeytable(struct ib_smp *smp,
361 struct ib_device *ibdev)
363 u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
364 u16 *p = (u16 *) smp->data;
365 __be16 *q = (__be16 *) smp->data;
367 /* 64 blocks of 32 16-bit P_Key entries */
369 memset(smp->data, 0, sizeof(smp->data));
370 if (startpx == 0) {
371 struct ipath_ibdev *dev = to_idev(ibdev);
372 unsigned i, n = ipath_get_npkeys(dev->dd);
374 get_pkeys(dev->dd, p);
376 for (i = 0; i < n; i++)
377 q[i] = cpu_to_be16(p[i]);
378 } else
379 smp->status |= IB_SMP_INVALID_FIELD;
381 return reply(smp);
384 static int recv_subn_set_guidinfo(struct ib_smp *smp,
385 struct ib_device *ibdev)
387 /* The only GUID we support is the first read-only entry. */
388 return recv_subn_get_guidinfo(smp, ibdev);
392 * set_linkdowndefaultstate - set the default linkdown state
393 * @dd: the infinipath device
394 * @sleep: the new state
396 * Note that this will only take effect when the link state changes.
398 static int set_linkdowndefaultstate(struct ipath_devdata *dd, int sleep)
400 if (sleep)
401 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
402 else
403 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
404 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
405 dd->ipath_ibcctrl);
406 return 0;
410 * recv_subn_set_portinfo - set port information
411 * @smp: the incoming SM packet
412 * @ibdev: the infiniband device
413 * @port: the port on the device
415 * Set Portinfo (see ch. 14.2.5.6).
417 static int recv_subn_set_portinfo(struct ib_smp *smp,
418 struct ib_device *ibdev, u8 port)
420 struct ib_port_info *pip = (struct ib_port_info *)smp->data;
421 struct ib_event event;
422 struct ipath_ibdev *dev;
423 struct ipath_devdata *dd;
424 char clientrereg = 0;
425 u16 lid, smlid;
426 u8 lwe;
427 u8 lse;
428 u8 state;
429 u16 lstate;
430 u32 mtu;
431 int ret, ore;
433 if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt)
434 goto err;
436 dev = to_idev(ibdev);
437 dd = dev->dd;
438 event.device = ibdev;
439 event.element.port_num = port;
441 dev->mkey = pip->mkey;
442 dev->gid_prefix = pip->gid_prefix;
443 dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
445 lid = be16_to_cpu(pip->lid);
446 if (dd->ipath_lid != lid ||
447 dd->ipath_lmc != (pip->mkeyprot_resv_lmc & 7)) {
448 /* Must be a valid unicast LID address. */
449 if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE)
450 goto err;
451 ipath_set_lid(dd, lid, pip->mkeyprot_resv_lmc & 7);
452 event.event = IB_EVENT_LID_CHANGE;
453 ib_dispatch_event(&event);
456 smlid = be16_to_cpu(pip->sm_lid);
457 if (smlid != dev->sm_lid) {
458 /* Must be a valid unicast LID address. */
459 if (smlid == 0 || smlid >= IPATH_MULTICAST_LID_BASE)
460 goto err;
461 dev->sm_lid = smlid;
462 event.event = IB_EVENT_SM_CHANGE;
463 ib_dispatch_event(&event);
466 /* Allow 1x or 4x to be set (see 14.2.6.6). */
467 lwe = pip->link_width_enabled;
468 if (lwe) {
469 if (lwe == 0xFF)
470 lwe = dd->ipath_link_width_supported;
471 else if (lwe >= 16 || (lwe & ~dd->ipath_link_width_supported))
472 goto err;
473 set_link_width_enabled(dd, lwe);
476 /* Allow 2.5 or 5.0 Gbs. */
477 lse = pip->linkspeedactive_enabled & 0xF;
478 if (lse) {
479 if (lse == 15)
480 lse = dd->ipath_link_speed_supported;
481 else if (lse >= 8 || (lse & ~dd->ipath_link_speed_supported))
482 goto err;
483 set_link_speed_enabled(dd, lse);
486 /* Set link down default state. */
487 switch (pip->portphysstate_linkdown & 0xF) {
488 case 0: /* NOP */
489 break;
490 case 1: /* SLEEP */
491 if (set_linkdowndefaultstate(dd, 1))
492 goto err;
493 break;
494 case 2: /* POLL */
495 if (set_linkdowndefaultstate(dd, 0))
496 goto err;
497 break;
498 default:
499 goto err;
502 dev->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
503 dev->vl_high_limit = pip->vl_high_limit;
505 switch ((pip->neighbormtu_mastersmsl >> 4) & 0xF) {
506 case IB_MTU_256:
507 mtu = 256;
508 break;
509 case IB_MTU_512:
510 mtu = 512;
511 break;
512 case IB_MTU_1024:
513 mtu = 1024;
514 break;
515 case IB_MTU_2048:
516 mtu = 2048;
517 break;
518 case IB_MTU_4096:
519 if (!ipath_mtu4096)
520 goto err;
521 mtu = 4096;
522 break;
523 default:
524 /* XXX We have already partially updated our state! */
525 goto err;
527 ipath_set_mtu(dd, mtu);
529 dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF;
531 /* We only support VL0 */
532 if (((pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF) > 1)
533 goto err;
535 if (pip->mkey_violations == 0)
536 dev->mkey_violations = 0;
539 * Hardware counter can't be reset so snapshot and subtract
540 * later.
542 if (pip->pkey_violations == 0)
543 dev->z_pkey_violations = ipath_get_cr_errpkey(dd);
545 if (pip->qkey_violations == 0)
546 dev->qkey_violations = 0;
548 ore = pip->localphyerrors_overrunerrors;
549 if (set_phyerrthreshold(dd, (ore >> 4) & 0xF))
550 goto err;
552 if (set_overrunthreshold(dd, (ore & 0xF)))
553 goto err;
555 dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
557 if (pip->clientrereg_resv_subnetto & 0x80) {
558 clientrereg = 1;
559 event.event = IB_EVENT_CLIENT_REREGISTER;
560 ib_dispatch_event(&event);
564 * Do the port state change now that the other link parameters
565 * have been set.
566 * Changing the port physical state only makes sense if the link
567 * is down or is being set to down.
569 state = pip->linkspeed_portstate & 0xF;
570 lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
571 if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
572 goto err;
575 * Only state changes of DOWN, ARM, and ACTIVE are valid
576 * and must be in the correct state to take effect (see 7.2.6).
578 switch (state) {
579 case IB_PORT_NOP:
580 if (lstate == 0)
581 break;
582 /* FALLTHROUGH */
583 case IB_PORT_DOWN:
584 if (lstate == 0)
585 lstate = IPATH_IB_LINKDOWN_ONLY;
586 else if (lstate == 1)
587 lstate = IPATH_IB_LINKDOWN_SLEEP;
588 else if (lstate == 2)
589 lstate = IPATH_IB_LINKDOWN;
590 else if (lstate == 3)
591 lstate = IPATH_IB_LINKDOWN_DISABLE;
592 else
593 goto err;
594 ipath_set_linkstate(dd, lstate);
595 if (lstate == IPATH_IB_LINKDOWN_DISABLE) {
596 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
597 goto done;
599 ipath_wait_linkstate(dd, IPATH_LINKINIT | IPATH_LINKARMED |
600 IPATH_LINKACTIVE, 1000);
601 break;
602 case IB_PORT_ARMED:
603 ipath_set_linkstate(dd, IPATH_IB_LINKARM);
604 break;
605 case IB_PORT_ACTIVE:
606 ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE);
607 break;
608 default:
609 /* XXX We have already partially updated our state! */
610 goto err;
613 ret = recv_subn_get_portinfo(smp, ibdev, port);
615 if (clientrereg)
616 pip->clientrereg_resv_subnetto |= 0x80;
618 goto done;
620 err:
621 smp->status |= IB_SMP_INVALID_FIELD;
622 ret = recv_subn_get_portinfo(smp, ibdev, port);
624 done:
625 return ret;
629 * rm_pkey - decrecment the reference count for the given PKEY
630 * @dd: the infinipath device
631 * @key: the PKEY index
633 * Return true if this was the last reference and the hardware table entry
634 * needs to be changed.
636 static int rm_pkey(struct ipath_devdata *dd, u16 key)
638 int i;
639 int ret;
641 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
642 if (dd->ipath_pkeys[i] != key)
643 continue;
644 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
645 dd->ipath_pkeys[i] = 0;
646 ret = 1;
647 goto bail;
649 break;
652 ret = 0;
654 bail:
655 return ret;
659 * add_pkey - add the given PKEY to the hardware table
660 * @dd: the infinipath device
661 * @key: the PKEY
663 * Return an error code if unable to add the entry, zero if no change,
664 * or 1 if the hardware PKEY register needs to be updated.
666 static int add_pkey(struct ipath_devdata *dd, u16 key)
668 int i;
669 u16 lkey = key & 0x7FFF;
670 int any = 0;
671 int ret;
673 if (lkey == 0x7FFF) {
674 ret = 0;
675 goto bail;
678 /* Look for an empty slot or a matching PKEY. */
679 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
680 if (!dd->ipath_pkeys[i]) {
681 any++;
682 continue;
684 /* If it matches exactly, try to increment the ref count */
685 if (dd->ipath_pkeys[i] == key) {
686 if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
687 ret = 0;
688 goto bail;
690 /* Lost the race. Look for an empty slot below. */
691 atomic_dec(&dd->ipath_pkeyrefs[i]);
692 any++;
695 * It makes no sense to have both the limited and unlimited
696 * PKEY set at the same time since the unlimited one will
697 * disable the limited one.
699 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
700 ret = -EEXIST;
701 goto bail;
704 if (!any) {
705 ret = -EBUSY;
706 goto bail;
708 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
709 if (!dd->ipath_pkeys[i] &&
710 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
711 /* for ipathstats, etc. */
712 ipath_stats.sps_pkeys[i] = lkey;
713 dd->ipath_pkeys[i] = key;
714 ret = 1;
715 goto bail;
718 ret = -EBUSY;
720 bail:
721 return ret;
725 * set_pkeys - set the PKEY table for port 0
726 * @dd: the infinipath device
727 * @pkeys: the PKEY table
729 static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys)
731 struct ipath_portdata *pd;
732 int i;
733 int changed = 0;
735 /* always a kernel port, no locking needed */
736 pd = dd->ipath_pd[0];
738 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
739 u16 key = pkeys[i];
740 u16 okey = pd->port_pkeys[i];
742 if (key == okey)
743 continue;
745 * The value of this PKEY table entry is changing.
746 * Remove the old entry in the hardware's array of PKEYs.
748 if (okey & 0x7FFF)
749 changed |= rm_pkey(dd, okey);
750 if (key & 0x7FFF) {
751 int ret = add_pkey(dd, key);
753 if (ret < 0)
754 key = 0;
755 else
756 changed |= ret;
758 pd->port_pkeys[i] = key;
760 if (changed) {
761 u64 pkey;
763 pkey = (u64) dd->ipath_pkeys[0] |
764 ((u64) dd->ipath_pkeys[1] << 16) |
765 ((u64) dd->ipath_pkeys[2] << 32) |
766 ((u64) dd->ipath_pkeys[3] << 48);
767 ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
768 (unsigned long long) pkey);
769 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
770 pkey);
772 return 0;
775 static int recv_subn_set_pkeytable(struct ib_smp *smp,
776 struct ib_device *ibdev)
778 u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
779 __be16 *p = (__be16 *) smp->data;
780 u16 *q = (u16 *) smp->data;
781 struct ipath_ibdev *dev = to_idev(ibdev);
782 unsigned i, n = ipath_get_npkeys(dev->dd);
784 for (i = 0; i < n; i++)
785 q[i] = be16_to_cpu(p[i]);
787 if (startpx != 0 || set_pkeys(dev->dd, q) != 0)
788 smp->status |= IB_SMP_INVALID_FIELD;
790 return recv_subn_get_pkeytable(smp, ibdev);
793 static int recv_pma_get_classportinfo(struct ib_pma_mad *pmp)
795 struct ib_class_port_info *p =
796 (struct ib_class_port_info *)pmp->data;
798 memset(pmp->data, 0, sizeof(pmp->data));
800 if (pmp->mad_hdr.attr_mod != 0)
801 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
803 /* Indicate AllPortSelect is valid (only one port anyway) */
804 p->capability_mask = cpu_to_be16(1 << 8);
805 p->base_version = 1;
806 p->class_version = 1;
808 * Expected response time is 4.096 usec. * 2^18 == 1.073741824
809 * sec.
811 p->resp_time_value = 18;
813 return reply((struct ib_smp *) pmp);
817 * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
818 * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
819 * We support 5 counters which only count the mandatory quantities.
821 #define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
822 #define COUNTER_MASK0_9 cpu_to_be32(COUNTER_MASK(1, 0) | \
823 COUNTER_MASK(1, 1) | \
824 COUNTER_MASK(1, 2) | \
825 COUNTER_MASK(1, 3) | \
826 COUNTER_MASK(1, 4))
828 static int recv_pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
829 struct ib_device *ibdev, u8 port)
831 struct ib_pma_portsamplescontrol *p =
832 (struct ib_pma_portsamplescontrol *)pmp->data;
833 struct ipath_ibdev *dev = to_idev(ibdev);
834 struct ipath_cregs const *crp = dev->dd->ipath_cregs;
835 unsigned long flags;
836 u8 port_select = p->port_select;
838 memset(pmp->data, 0, sizeof(pmp->data));
840 p->port_select = port_select;
841 if (pmp->mad_hdr.attr_mod != 0 ||
842 (port_select != port && port_select != 0xFF))
843 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
845 * Ticks are 10x the link transfer period which for 2.5Gbs is 4
846 * nsec. 0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec. Sample
847 * intervals are counted in ticks. Since we use Linux timers, that
848 * count in jiffies, we can't sample for less than 1000 ticks if HZ
849 * == 1000 (4000 ticks if HZ is 250). link_speed_active returns 2 for
850 * DDR, 1 for SDR, set the tick to 1 for DDR, 0 for SDR on chips that
851 * have hardware support for delaying packets.
853 if (crp->cr_psstat)
854 p->tick = dev->dd->ipath_link_speed_active - 1;
855 else
856 p->tick = 250; /* 1 usec. */
857 p->counter_width = 4; /* 32 bit counters */
858 p->counter_mask0_9 = COUNTER_MASK0_9;
859 spin_lock_irqsave(&dev->pending_lock, flags);
860 if (crp->cr_psstat)
861 p->sample_status = ipath_read_creg32(dev->dd, crp->cr_psstat);
862 else
863 p->sample_status = dev->pma_sample_status;
864 p->sample_start = cpu_to_be32(dev->pma_sample_start);
865 p->sample_interval = cpu_to_be32(dev->pma_sample_interval);
866 p->tag = cpu_to_be16(dev->pma_tag);
867 p->counter_select[0] = dev->pma_counter_select[0];
868 p->counter_select[1] = dev->pma_counter_select[1];
869 p->counter_select[2] = dev->pma_counter_select[2];
870 p->counter_select[3] = dev->pma_counter_select[3];
871 p->counter_select[4] = dev->pma_counter_select[4];
872 spin_unlock_irqrestore(&dev->pending_lock, flags);
874 return reply((struct ib_smp *) pmp);
877 static int recv_pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
878 struct ib_device *ibdev, u8 port)
880 struct ib_pma_portsamplescontrol *p =
881 (struct ib_pma_portsamplescontrol *)pmp->data;
882 struct ipath_ibdev *dev = to_idev(ibdev);
883 struct ipath_cregs const *crp = dev->dd->ipath_cregs;
884 unsigned long flags;
885 u8 status;
886 int ret;
888 if (pmp->mad_hdr.attr_mod != 0 ||
889 (p->port_select != port && p->port_select != 0xFF)) {
890 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
891 ret = reply((struct ib_smp *) pmp);
892 goto bail;
895 spin_lock_irqsave(&dev->pending_lock, flags);
896 if (crp->cr_psstat)
897 status = ipath_read_creg32(dev->dd, crp->cr_psstat);
898 else
899 status = dev->pma_sample_status;
900 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
901 dev->pma_sample_start = be32_to_cpu(p->sample_start);
902 dev->pma_sample_interval = be32_to_cpu(p->sample_interval);
903 dev->pma_tag = be16_to_cpu(p->tag);
904 dev->pma_counter_select[0] = p->counter_select[0];
905 dev->pma_counter_select[1] = p->counter_select[1];
906 dev->pma_counter_select[2] = p->counter_select[2];
907 dev->pma_counter_select[3] = p->counter_select[3];
908 dev->pma_counter_select[4] = p->counter_select[4];
909 if (crp->cr_psstat) {
910 ipath_write_creg(dev->dd, crp->cr_psinterval,
911 dev->pma_sample_interval);
912 ipath_write_creg(dev->dd, crp->cr_psstart,
913 dev->pma_sample_start);
914 } else
915 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;
917 spin_unlock_irqrestore(&dev->pending_lock, flags);
919 ret = recv_pma_get_portsamplescontrol(pmp, ibdev, port);
921 bail:
922 return ret;
925 static u64 get_counter(struct ipath_ibdev *dev,
926 struct ipath_cregs const *crp,
927 __be16 sel)
929 u64 ret;
931 switch (sel) {
932 case IB_PMA_PORT_XMIT_DATA:
933 ret = (crp->cr_psxmitdatacount) ?
934 ipath_read_creg32(dev->dd, crp->cr_psxmitdatacount) :
935 dev->ipath_sword;
936 break;
937 case IB_PMA_PORT_RCV_DATA:
938 ret = (crp->cr_psrcvdatacount) ?
939 ipath_read_creg32(dev->dd, crp->cr_psrcvdatacount) :
940 dev->ipath_rword;
941 break;
942 case IB_PMA_PORT_XMIT_PKTS:
943 ret = (crp->cr_psxmitpktscount) ?
944 ipath_read_creg32(dev->dd, crp->cr_psxmitpktscount) :
945 dev->ipath_spkts;
946 break;
947 case IB_PMA_PORT_RCV_PKTS:
948 ret = (crp->cr_psrcvpktscount) ?
949 ipath_read_creg32(dev->dd, crp->cr_psrcvpktscount) :
950 dev->ipath_rpkts;
951 break;
952 case IB_PMA_PORT_XMIT_WAIT:
953 ret = (crp->cr_psxmitwaitcount) ?
954 ipath_read_creg32(dev->dd, crp->cr_psxmitwaitcount) :
955 dev->ipath_xmit_wait;
956 break;
957 default:
958 ret = 0;
961 return ret;
964 static int recv_pma_get_portsamplesresult(struct ib_pma_mad *pmp,
965 struct ib_device *ibdev)
967 struct ib_pma_portsamplesresult *p =
968 (struct ib_pma_portsamplesresult *)pmp->data;
969 struct ipath_ibdev *dev = to_idev(ibdev);
970 struct ipath_cregs const *crp = dev->dd->ipath_cregs;
971 u8 status;
972 int i;
974 memset(pmp->data, 0, sizeof(pmp->data));
975 p->tag = cpu_to_be16(dev->pma_tag);
976 if (crp->cr_psstat)
977 status = ipath_read_creg32(dev->dd, crp->cr_psstat);
978 else
979 status = dev->pma_sample_status;
980 p->sample_status = cpu_to_be16(status);
981 for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
982 p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
983 cpu_to_be32(
984 get_counter(dev, crp, dev->pma_counter_select[i]));
986 return reply((struct ib_smp *) pmp);
989 static int recv_pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
990 struct ib_device *ibdev)
992 struct ib_pma_portsamplesresult_ext *p =
993 (struct ib_pma_portsamplesresult_ext *)pmp->data;
994 struct ipath_ibdev *dev = to_idev(ibdev);
995 struct ipath_cregs const *crp = dev->dd->ipath_cregs;
996 u8 status;
997 int i;
999 memset(pmp->data, 0, sizeof(pmp->data));
1000 p->tag = cpu_to_be16(dev->pma_tag);
1001 if (crp->cr_psstat)
1002 status = ipath_read_creg32(dev->dd, crp->cr_psstat);
1003 else
1004 status = dev->pma_sample_status;
1005 p->sample_status = cpu_to_be16(status);
1006 /* 64 bits */
1007 p->extended_width = cpu_to_be32(0x80000000);
1008 for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
1009 p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
1010 cpu_to_be64(
1011 get_counter(dev, crp, dev->pma_counter_select[i]));
1013 return reply((struct ib_smp *) pmp);
1016 static int recv_pma_get_portcounters(struct ib_pma_mad *pmp,
1017 struct ib_device *ibdev, u8 port)
1019 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1020 pmp->data;
1021 struct ipath_ibdev *dev = to_idev(ibdev);
1022 struct ipath_verbs_counters cntrs;
1023 u8 port_select = p->port_select;
1025 ipath_get_counters(dev->dd, &cntrs);
1027 /* Adjust counters for any resets done. */
1028 cntrs.symbol_error_counter -= dev->z_symbol_error_counter;
1029 cntrs.link_error_recovery_counter -=
1030 dev->z_link_error_recovery_counter;
1031 cntrs.link_downed_counter -= dev->z_link_downed_counter;
1032 cntrs.port_rcv_errors += dev->rcv_errors;
1033 cntrs.port_rcv_errors -= dev->z_port_rcv_errors;
1034 cntrs.port_rcv_remphys_errors -= dev->z_port_rcv_remphys_errors;
1035 cntrs.port_xmit_discards -= dev->z_port_xmit_discards;
1036 cntrs.port_xmit_data -= dev->z_port_xmit_data;
1037 cntrs.port_rcv_data -= dev->z_port_rcv_data;
1038 cntrs.port_xmit_packets -= dev->z_port_xmit_packets;
1039 cntrs.port_rcv_packets -= dev->z_port_rcv_packets;
1040 cntrs.local_link_integrity_errors -=
1041 dev->z_local_link_integrity_errors;
1042 cntrs.excessive_buffer_overrun_errors -=
1043 dev->z_excessive_buffer_overrun_errors;
1044 cntrs.vl15_dropped -= dev->z_vl15_dropped;
1045 cntrs.vl15_dropped += dev->n_vl15_dropped;
1047 memset(pmp->data, 0, sizeof(pmp->data));
1049 p->port_select = port_select;
1050 if (pmp->mad_hdr.attr_mod != 0 ||
1051 (port_select != port && port_select != 0xFF))
1052 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1054 if (cntrs.symbol_error_counter > 0xFFFFUL)
1055 p->symbol_error_counter = cpu_to_be16(0xFFFF);
1056 else
1057 p->symbol_error_counter =
1058 cpu_to_be16((u16)cntrs.symbol_error_counter);
1059 if (cntrs.link_error_recovery_counter > 0xFFUL)
1060 p->link_error_recovery_counter = 0xFF;
1061 else
1062 p->link_error_recovery_counter =
1063 (u8)cntrs.link_error_recovery_counter;
1064 if (cntrs.link_downed_counter > 0xFFUL)
1065 p->link_downed_counter = 0xFF;
1066 else
1067 p->link_downed_counter = (u8)cntrs.link_downed_counter;
1068 if (cntrs.port_rcv_errors > 0xFFFFUL)
1069 p->port_rcv_errors = cpu_to_be16(0xFFFF);
1070 else
1071 p->port_rcv_errors =
1072 cpu_to_be16((u16) cntrs.port_rcv_errors);
1073 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1074 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1075 else
1076 p->port_rcv_remphys_errors =
1077 cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
1078 if (cntrs.port_xmit_discards > 0xFFFFUL)
1079 p->port_xmit_discards = cpu_to_be16(0xFFFF);
1080 else
1081 p->port_xmit_discards =
1082 cpu_to_be16((u16)cntrs.port_xmit_discards);
1083 if (cntrs.local_link_integrity_errors > 0xFUL)
1084 cntrs.local_link_integrity_errors = 0xFUL;
1085 if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1086 cntrs.excessive_buffer_overrun_errors = 0xFUL;
1087 p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
1088 cntrs.excessive_buffer_overrun_errors;
1089 if (cntrs.vl15_dropped > 0xFFFFUL)
1090 p->vl15_dropped = cpu_to_be16(0xFFFF);
1091 else
1092 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1093 if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
1094 p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
1095 else
1096 p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
1097 if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
1098 p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
1099 else
1100 p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
1101 if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
1102 p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
1103 else
1104 p->port_xmit_packets =
1105 cpu_to_be32((u32)cntrs.port_xmit_packets);
1106 if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
1107 p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
1108 else
1109 p->port_rcv_packets =
1110 cpu_to_be32((u32) cntrs.port_rcv_packets);
1112 return reply((struct ib_smp *) pmp);
1115 static int recv_pma_get_portcounters_ext(struct ib_pma_mad *pmp,
1116 struct ib_device *ibdev, u8 port)
1118 struct ib_pma_portcounters_ext *p =
1119 (struct ib_pma_portcounters_ext *)pmp->data;
1120 struct ipath_ibdev *dev = to_idev(ibdev);
1121 u64 swords, rwords, spkts, rpkts, xwait;
1122 u8 port_select = p->port_select;
1124 ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
1125 &rpkts, &xwait);
1127 /* Adjust counters for any resets done. */
1128 swords -= dev->z_port_xmit_data;
1129 rwords -= dev->z_port_rcv_data;
1130 spkts -= dev->z_port_xmit_packets;
1131 rpkts -= dev->z_port_rcv_packets;
1133 memset(pmp->data, 0, sizeof(pmp->data));
1135 p->port_select = port_select;
1136 if (pmp->mad_hdr.attr_mod != 0 ||
1137 (port_select != port && port_select != 0xFF))
1138 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1140 p->port_xmit_data = cpu_to_be64(swords);
1141 p->port_rcv_data = cpu_to_be64(rwords);
1142 p->port_xmit_packets = cpu_to_be64(spkts);
1143 p->port_rcv_packets = cpu_to_be64(rpkts);
1144 p->port_unicast_xmit_packets = cpu_to_be64(dev->n_unicast_xmit);
1145 p->port_unicast_rcv_packets = cpu_to_be64(dev->n_unicast_rcv);
1146 p->port_multicast_xmit_packets = cpu_to_be64(dev->n_multicast_xmit);
1147 p->port_multicast_rcv_packets = cpu_to_be64(dev->n_multicast_rcv);
1149 return reply((struct ib_smp *) pmp);
1152 static int recv_pma_set_portcounters(struct ib_pma_mad *pmp,
1153 struct ib_device *ibdev, u8 port)
1155 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1156 pmp->data;
1157 struct ipath_ibdev *dev = to_idev(ibdev);
1158 struct ipath_verbs_counters cntrs;
1161 * Since the HW doesn't support clearing counters, we save the
1162 * current count and subtract it from future responses.
1164 ipath_get_counters(dev->dd, &cntrs);
1166 if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
1167 dev->z_symbol_error_counter = cntrs.symbol_error_counter;
1169 if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
1170 dev->z_link_error_recovery_counter =
1171 cntrs.link_error_recovery_counter;
1173 if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
1174 dev->z_link_downed_counter = cntrs.link_downed_counter;
1176 if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
1177 dev->z_port_rcv_errors =
1178 cntrs.port_rcv_errors + dev->rcv_errors;
1180 if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
1181 dev->z_port_rcv_remphys_errors =
1182 cntrs.port_rcv_remphys_errors;
1184 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
1185 dev->z_port_xmit_discards = cntrs.port_xmit_discards;
1187 if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
1188 dev->z_local_link_integrity_errors =
1189 cntrs.local_link_integrity_errors;
1191 if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
1192 dev->z_excessive_buffer_overrun_errors =
1193 cntrs.excessive_buffer_overrun_errors;
1195 if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
1196 dev->n_vl15_dropped = 0;
1197 dev->z_vl15_dropped = cntrs.vl15_dropped;
1200 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
1201 dev->z_port_xmit_data = cntrs.port_xmit_data;
1203 if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
1204 dev->z_port_rcv_data = cntrs.port_rcv_data;
1206 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
1207 dev->z_port_xmit_packets = cntrs.port_xmit_packets;
1209 if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
1210 dev->z_port_rcv_packets = cntrs.port_rcv_packets;
1212 return recv_pma_get_portcounters(pmp, ibdev, port);
1215 static int recv_pma_set_portcounters_ext(struct ib_pma_mad *pmp,
1216 struct ib_device *ibdev, u8 port)
1218 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1219 pmp->data;
1220 struct ipath_ibdev *dev = to_idev(ibdev);
1221 u64 swords, rwords, spkts, rpkts, xwait;
1223 ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
1224 &rpkts, &xwait);
1226 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
1227 dev->z_port_xmit_data = swords;
1229 if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
1230 dev->z_port_rcv_data = rwords;
1232 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
1233 dev->z_port_xmit_packets = spkts;
1235 if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
1236 dev->z_port_rcv_packets = rpkts;
1238 if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
1239 dev->n_unicast_xmit = 0;
1241 if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
1242 dev->n_unicast_rcv = 0;
1244 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
1245 dev->n_multicast_xmit = 0;
1247 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
1248 dev->n_multicast_rcv = 0;
1250 return recv_pma_get_portcounters_ext(pmp, ibdev, port);
1253 static int process_subn(struct ib_device *ibdev, int mad_flags,
1254 u8 port_num, struct ib_mad *in_mad,
1255 struct ib_mad *out_mad)
1257 struct ib_smp *smp = (struct ib_smp *)out_mad;
1258 struct ipath_ibdev *dev = to_idev(ibdev);
1259 int ret;
1261 *out_mad = *in_mad;
1262 if (smp->class_version != 1) {
1263 smp->status |= IB_SMP_UNSUP_VERSION;
1264 ret = reply(smp);
1265 goto bail;
1268 /* Is the mkey in the process of expiring? */
1269 if (dev->mkey_lease_timeout &&
1270 time_after_eq(jiffies, dev->mkey_lease_timeout)) {
1271 /* Clear timeout and mkey protection field. */
1272 dev->mkey_lease_timeout = 0;
1273 dev->mkeyprot = 0;
1277 * M_Key checking depends on
1278 * Portinfo:M_Key_protect_bits
1280 if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && dev->mkey != 0 &&
1281 dev->mkey != smp->mkey &&
1282 (smp->method == IB_MGMT_METHOD_SET ||
1283 (smp->method == IB_MGMT_METHOD_GET &&
1284 dev->mkeyprot >= 2))) {
1285 if (dev->mkey_violations != 0xFFFF)
1286 ++dev->mkey_violations;
1287 if (dev->mkey_lease_timeout ||
1288 dev->mkey_lease_period == 0) {
1289 ret = IB_MAD_RESULT_SUCCESS |
1290 IB_MAD_RESULT_CONSUMED;
1291 goto bail;
1293 dev->mkey_lease_timeout = jiffies +
1294 dev->mkey_lease_period * HZ;
1295 /* Future: Generate a trap notice. */
1296 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1297 goto bail;
1298 } else if (dev->mkey_lease_timeout)
1299 dev->mkey_lease_timeout = 0;
1301 switch (smp->method) {
1302 case IB_MGMT_METHOD_GET:
1303 switch (smp->attr_id) {
1304 case IB_SMP_ATTR_NODE_DESC:
1305 ret = recv_subn_get_nodedescription(smp, ibdev);
1306 goto bail;
1307 case IB_SMP_ATTR_NODE_INFO:
1308 ret = recv_subn_get_nodeinfo(smp, ibdev, port_num);
1309 goto bail;
1310 case IB_SMP_ATTR_GUID_INFO:
1311 ret = recv_subn_get_guidinfo(smp, ibdev);
1312 goto bail;
1313 case IB_SMP_ATTR_PORT_INFO:
1314 ret = recv_subn_get_portinfo(smp, ibdev, port_num);
1315 goto bail;
1316 case IB_SMP_ATTR_PKEY_TABLE:
1317 ret = recv_subn_get_pkeytable(smp, ibdev);
1318 goto bail;
1319 case IB_SMP_ATTR_SM_INFO:
1320 if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
1321 ret = IB_MAD_RESULT_SUCCESS |
1322 IB_MAD_RESULT_CONSUMED;
1323 goto bail;
1325 if (dev->port_cap_flags & IB_PORT_SM) {
1326 ret = IB_MAD_RESULT_SUCCESS;
1327 goto bail;
1329 /* FALLTHROUGH */
1330 default:
1331 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1332 ret = reply(smp);
1333 goto bail;
1336 case IB_MGMT_METHOD_SET:
1337 switch (smp->attr_id) {
1338 case IB_SMP_ATTR_GUID_INFO:
1339 ret = recv_subn_set_guidinfo(smp, ibdev);
1340 goto bail;
1341 case IB_SMP_ATTR_PORT_INFO:
1342 ret = recv_subn_set_portinfo(smp, ibdev, port_num);
1343 goto bail;
1344 case IB_SMP_ATTR_PKEY_TABLE:
1345 ret = recv_subn_set_pkeytable(smp, ibdev);
1346 goto bail;
1347 case IB_SMP_ATTR_SM_INFO:
1348 if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
1349 ret = IB_MAD_RESULT_SUCCESS |
1350 IB_MAD_RESULT_CONSUMED;
1351 goto bail;
1353 if (dev->port_cap_flags & IB_PORT_SM) {
1354 ret = IB_MAD_RESULT_SUCCESS;
1355 goto bail;
1357 /* FALLTHROUGH */
1358 default:
1359 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1360 ret = reply(smp);
1361 goto bail;
1364 case IB_MGMT_METHOD_TRAP:
1365 case IB_MGMT_METHOD_REPORT:
1366 case IB_MGMT_METHOD_REPORT_RESP:
1367 case IB_MGMT_METHOD_TRAP_REPRESS:
1368 case IB_MGMT_METHOD_GET_RESP:
1370 * The ib_mad module will call us to process responses
1371 * before checking for other consumers.
1372 * Just tell the caller to process it normally.
1374 ret = IB_MAD_RESULT_SUCCESS;
1375 goto bail;
1376 default:
1377 smp->status |= IB_SMP_UNSUP_METHOD;
1378 ret = reply(smp);
1381 bail:
1382 return ret;
1385 static int process_perf(struct ib_device *ibdev, u8 port_num,
1386 struct ib_mad *in_mad,
1387 struct ib_mad *out_mad)
1389 struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
1390 int ret;
1392 *out_mad = *in_mad;
1393 if (pmp->mad_hdr.class_version != 1) {
1394 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
1395 ret = reply((struct ib_smp *) pmp);
1396 goto bail;
1399 switch (pmp->mad_hdr.method) {
1400 case IB_MGMT_METHOD_GET:
1401 switch (pmp->mad_hdr.attr_id) {
1402 case IB_PMA_CLASS_PORT_INFO:
1403 ret = recv_pma_get_classportinfo(pmp);
1404 goto bail;
1405 case IB_PMA_PORT_SAMPLES_CONTROL:
1406 ret = recv_pma_get_portsamplescontrol(pmp, ibdev,
1407 port_num);
1408 goto bail;
1409 case IB_PMA_PORT_SAMPLES_RESULT:
1410 ret = recv_pma_get_portsamplesresult(pmp, ibdev);
1411 goto bail;
1412 case IB_PMA_PORT_SAMPLES_RESULT_EXT:
1413 ret = recv_pma_get_portsamplesresult_ext(pmp,
1414 ibdev);
1415 goto bail;
1416 case IB_PMA_PORT_COUNTERS:
1417 ret = recv_pma_get_portcounters(pmp, ibdev,
1418 port_num);
1419 goto bail;
1420 case IB_PMA_PORT_COUNTERS_EXT:
1421 ret = recv_pma_get_portcounters_ext(pmp, ibdev,
1422 port_num);
1423 goto bail;
1424 default:
1425 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
1426 ret = reply((struct ib_smp *) pmp);
1427 goto bail;
1430 case IB_MGMT_METHOD_SET:
1431 switch (pmp->mad_hdr.attr_id) {
1432 case IB_PMA_PORT_SAMPLES_CONTROL:
1433 ret = recv_pma_set_portsamplescontrol(pmp, ibdev,
1434 port_num);
1435 goto bail;
1436 case IB_PMA_PORT_COUNTERS:
1437 ret = recv_pma_set_portcounters(pmp, ibdev,
1438 port_num);
1439 goto bail;
1440 case IB_PMA_PORT_COUNTERS_EXT:
1441 ret = recv_pma_set_portcounters_ext(pmp, ibdev,
1442 port_num);
1443 goto bail;
1444 default:
1445 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
1446 ret = reply((struct ib_smp *) pmp);
1447 goto bail;
1450 case IB_MGMT_METHOD_GET_RESP:
1452 * The ib_mad module will call us to process responses
1453 * before checking for other consumers.
1454 * Just tell the caller to process it normally.
1456 ret = IB_MAD_RESULT_SUCCESS;
1457 goto bail;
1458 default:
1459 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
1460 ret = reply((struct ib_smp *) pmp);
1463 bail:
1464 return ret;
1468 * ipath_process_mad - process an incoming MAD packet
1469 * @ibdev: the infiniband device this packet came in on
1470 * @mad_flags: MAD flags
1471 * @port_num: the port number this packet came in on
1472 * @in_wc: the work completion entry for this packet
1473 * @in_grh: the global route header for this packet
1474 * @in_mad: the incoming MAD
1475 * @out_mad: any outgoing MAD reply
1477 * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
1478 * interested in processing.
1480 * Note that the verbs framework has already done the MAD sanity checks,
1481 * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
1482 * MADs.
1484 * This is called by the ib_mad module.
1486 int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
1487 struct ib_wc *in_wc, struct ib_grh *in_grh,
1488 struct ib_mad *in_mad, struct ib_mad *out_mad)
1490 int ret;
1492 switch (in_mad->mad_hdr.mgmt_class) {
1493 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
1494 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
1495 ret = process_subn(ibdev, mad_flags, port_num,
1496 in_mad, out_mad);
1497 goto bail;
1498 case IB_MGMT_CLASS_PERF_MGMT:
1499 ret = process_perf(ibdev, port_num, in_mad, out_mad);
1500 goto bail;
1501 default:
1502 ret = IB_MAD_RESULT_SUCCESS;
1505 bail:
1506 return ret;