2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include "core_priv.h"
37 #include <linux/slab.h>
38 #include <linux/stat.h>
39 #include <linux/string.h>
40 #include <linux/netdevice.h>
41 #include <linux/ethtool.h>
43 #include <rdma/ib_mad.h>
44 #include <rdma/ib_pma.h>
48 struct gid_attr_group
{
51 struct attribute_group ndev
;
52 struct attribute_group type
;
56 struct ib_device
*ibdev
;
57 struct gid_attr_group
*gid_attr_group
;
58 struct attribute_group gid_group
;
59 struct attribute_group pkey_group
;
60 struct attribute_group
*pma_table
;
61 struct attribute_group
*hw_stats_ag
;
62 struct rdma_hw_stats
*hw_stats
;
66 struct port_attribute
{
67 struct attribute attr
;
68 ssize_t (*show
)(struct ib_port
*, struct port_attribute
*, char *buf
);
69 ssize_t (*store
)(struct ib_port
*, struct port_attribute
*,
70 const char *buf
, size_t count
);
73 #define PORT_ATTR(_name, _mode, _show, _store) \
74 struct port_attribute port_attr_##_name = __ATTR(_name, _mode, _show, _store)
76 #define PORT_ATTR_RO(_name) \
77 struct port_attribute port_attr_##_name = __ATTR_RO(_name)
79 struct port_table_attribute
{
80 struct port_attribute attr
;
86 struct hw_stats_attribute
{
87 struct attribute attr
;
88 ssize_t (*show
)(struct kobject
*kobj
,
89 struct attribute
*attr
, char *buf
);
90 ssize_t (*store
)(struct kobject
*kobj
,
91 struct attribute
*attr
,
98 static ssize_t
port_attr_show(struct kobject
*kobj
,
99 struct attribute
*attr
, char *buf
)
101 struct port_attribute
*port_attr
=
102 container_of(attr
, struct port_attribute
, attr
);
103 struct ib_port
*p
= container_of(kobj
, struct ib_port
, kobj
);
105 if (!port_attr
->show
)
108 return port_attr
->show(p
, port_attr
, buf
);
111 static ssize_t
port_attr_store(struct kobject
*kobj
,
112 struct attribute
*attr
,
113 const char *buf
, size_t count
)
115 struct port_attribute
*port_attr
=
116 container_of(attr
, struct port_attribute
, attr
);
117 struct ib_port
*p
= container_of(kobj
, struct ib_port
, kobj
);
119 if (!port_attr
->store
)
121 return port_attr
->store(p
, port_attr
, buf
, count
);
124 static const struct sysfs_ops port_sysfs_ops
= {
125 .show
= port_attr_show
,
126 .store
= port_attr_store
129 static ssize_t
gid_attr_show(struct kobject
*kobj
,
130 struct attribute
*attr
, char *buf
)
132 struct port_attribute
*port_attr
=
133 container_of(attr
, struct port_attribute
, attr
);
134 struct ib_port
*p
= container_of(kobj
, struct gid_attr_group
,
137 if (!port_attr
->show
)
140 return port_attr
->show(p
, port_attr
, buf
);
143 static const struct sysfs_ops gid_attr_sysfs_ops
= {
144 .show
= gid_attr_show
147 static ssize_t
state_show(struct ib_port
*p
, struct port_attribute
*unused
,
150 struct ib_port_attr attr
;
153 static const char *state_name
[] = {
154 [IB_PORT_NOP
] = "NOP",
155 [IB_PORT_DOWN
] = "DOWN",
156 [IB_PORT_INIT
] = "INIT",
157 [IB_PORT_ARMED
] = "ARMED",
158 [IB_PORT_ACTIVE
] = "ACTIVE",
159 [IB_PORT_ACTIVE_DEFER
] = "ACTIVE_DEFER"
162 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
166 return sprintf(buf
, "%d: %s\n", attr
.state
,
167 attr
.state
>= 0 && attr
.state
< ARRAY_SIZE(state_name
) ?
168 state_name
[attr
.state
] : "UNKNOWN");
171 static ssize_t
lid_show(struct ib_port
*p
, struct port_attribute
*unused
,
174 struct ib_port_attr attr
;
177 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
181 return sprintf(buf
, "0x%x\n", attr
.lid
);
184 static ssize_t
lid_mask_count_show(struct ib_port
*p
,
185 struct port_attribute
*unused
,
188 struct ib_port_attr attr
;
191 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
195 return sprintf(buf
, "%d\n", attr
.lmc
);
198 static ssize_t
sm_lid_show(struct ib_port
*p
, struct port_attribute
*unused
,
201 struct ib_port_attr attr
;
204 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
208 return sprintf(buf
, "0x%x\n", attr
.sm_lid
);
211 static ssize_t
sm_sl_show(struct ib_port
*p
, struct port_attribute
*unused
,
214 struct ib_port_attr attr
;
217 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
221 return sprintf(buf
, "%d\n", attr
.sm_sl
);
224 static ssize_t
cap_mask_show(struct ib_port
*p
, struct port_attribute
*unused
,
227 struct ib_port_attr attr
;
230 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
234 return sprintf(buf
, "0x%08x\n", attr
.port_cap_flags
);
237 static ssize_t
rate_show(struct ib_port
*p
, struct port_attribute
*unused
,
240 struct ib_port_attr attr
;
242 int rate
; /* in deci-Gb/sec */
245 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
249 switch (attr
.active_speed
) {
275 default: /* default to SDR for invalid rates */
281 rate
*= ib_width_enum_to_int(attr
.active_width
);
285 return sprintf(buf
, "%d%s Gb/sec (%dX%s)\n",
286 rate
/ 10, rate
% 10 ? ".5" : "",
287 ib_width_enum_to_int(attr
.active_width
), speed
);
290 static ssize_t
phys_state_show(struct ib_port
*p
, struct port_attribute
*unused
,
293 struct ib_port_attr attr
;
297 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
301 switch (attr
.phys_state
) {
302 case 1: return sprintf(buf
, "1: Sleep\n");
303 case 2: return sprintf(buf
, "2: Polling\n");
304 case 3: return sprintf(buf
, "3: Disabled\n");
305 case 4: return sprintf(buf
, "4: PortConfigurationTraining\n");
306 case 5: return sprintf(buf
, "5: LinkUp\n");
307 case 6: return sprintf(buf
, "6: LinkErrorRecovery\n");
308 case 7: return sprintf(buf
, "7: Phy Test\n");
309 default: return sprintf(buf
, "%d: <unknown>\n", attr
.phys_state
);
313 static ssize_t
link_layer_show(struct ib_port
*p
, struct port_attribute
*unused
,
316 switch (rdma_port_get_link_layer(p
->ibdev
, p
->port_num
)) {
317 case IB_LINK_LAYER_INFINIBAND
:
318 return sprintf(buf
, "%s\n", "InfiniBand");
319 case IB_LINK_LAYER_ETHERNET
:
320 return sprintf(buf
, "%s\n", "Ethernet");
322 return sprintf(buf
, "%s\n", "Unknown");
326 static PORT_ATTR_RO(state
);
327 static PORT_ATTR_RO(lid
);
328 static PORT_ATTR_RO(lid_mask_count
);
329 static PORT_ATTR_RO(sm_lid
);
330 static PORT_ATTR_RO(sm_sl
);
331 static PORT_ATTR_RO(cap_mask
);
332 static PORT_ATTR_RO(rate
);
333 static PORT_ATTR_RO(phys_state
);
334 static PORT_ATTR_RO(link_layer
);
336 static struct attribute
*port_default_attrs
[] = {
337 &port_attr_state
.attr
,
339 &port_attr_lid_mask_count
.attr
,
340 &port_attr_sm_lid
.attr
,
341 &port_attr_sm_sl
.attr
,
342 &port_attr_cap_mask
.attr
,
343 &port_attr_rate
.attr
,
344 &port_attr_phys_state
.attr
,
345 &port_attr_link_layer
.attr
,
349 static size_t print_ndev(struct ib_gid_attr
*gid_attr
, char *buf
)
354 return sprintf(buf
, "%s\n", gid_attr
->ndev
->name
);
357 static size_t print_gid_type(struct ib_gid_attr
*gid_attr
, char *buf
)
359 return sprintf(buf
, "%s\n", ib_cache_gid_type_str(gid_attr
->gid_type
));
362 static ssize_t
_show_port_gid_attr(struct ib_port
*p
,
363 struct port_attribute
*attr
,
365 size_t (*print
)(struct ib_gid_attr
*gid_attr
,
368 struct port_table_attribute
*tab_attr
=
369 container_of(attr
, struct port_table_attribute
, attr
);
371 struct ib_gid_attr gid_attr
= {};
374 ret
= ib_query_gid(p
->ibdev
, p
->port_num
, tab_attr
->index
, &gid
,
379 ret
= print(&gid_attr
, buf
);
383 dev_put(gid_attr
.ndev
);
387 static ssize_t
show_port_gid(struct ib_port
*p
, struct port_attribute
*attr
,
390 struct port_table_attribute
*tab_attr
=
391 container_of(attr
, struct port_table_attribute
, attr
);
396 ret
= ib_query_gid(p
->ibdev
, p
->port_num
, tab_attr
->index
, &gid
, NULL
);
398 /* If reading GID fails, it is likely due to GID entry being empty
399 * (invalid) or reserved GID in the table.
400 * User space expects to read GID table entries as long as it given
401 * index is within GID table size.
402 * Administrative/debugging tool fails to query rest of the GID entries
403 * if it hits error while querying a GID of the given index.
404 * To avoid user space throwing such error on fail to read gid, return
405 * zero GID as before. This maintains backward compatibility.
411 return sprintf(buf
, "%pI6\n", pgid
->raw
);
414 static ssize_t
show_port_gid_attr_ndev(struct ib_port
*p
,
415 struct port_attribute
*attr
, char *buf
)
417 return _show_port_gid_attr(p
, attr
, buf
, print_ndev
);
420 static ssize_t
show_port_gid_attr_gid_type(struct ib_port
*p
,
421 struct port_attribute
*attr
,
424 return _show_port_gid_attr(p
, attr
, buf
, print_gid_type
);
427 static ssize_t
show_port_pkey(struct ib_port
*p
, struct port_attribute
*attr
,
430 struct port_table_attribute
*tab_attr
=
431 container_of(attr
, struct port_table_attribute
, attr
);
435 ret
= ib_query_pkey(p
->ibdev
, p
->port_num
, tab_attr
->index
, &pkey
);
439 return sprintf(buf
, "0x%04x\n", pkey
);
442 #define PORT_PMA_ATTR(_name, _counter, _width, _offset) \
443 struct port_table_attribute port_pma_attr_##_name = { \
444 .attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \
445 .index = (_offset) | ((_width) << 16) | ((_counter) << 24), \
446 .attr_id = IB_PMA_PORT_COUNTERS , \
449 #define PORT_PMA_ATTR_EXT(_name, _width, _offset) \
450 struct port_table_attribute port_pma_attr_ext_##_name = { \
451 .attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \
452 .index = (_offset) | ((_width) << 16), \
453 .attr_id = IB_PMA_PORT_COUNTERS_EXT , \
457 * Get a Perfmgmt MAD block of data.
458 * Returns error code or the number of bytes retrieved.
460 static int get_perf_mad(struct ib_device
*dev
, int port_num
, __be16 attr
,
461 void *data
, int offset
, size_t size
)
463 struct ib_mad
*in_mad
;
464 struct ib_mad
*out_mad
;
465 size_t mad_size
= sizeof(*out_mad
);
466 u16 out_mad_pkey_index
= 0;
469 if (!dev
->process_mad
)
472 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
473 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
474 if (!in_mad
|| !out_mad
) {
479 in_mad
->mad_hdr
.base_version
= 1;
480 in_mad
->mad_hdr
.mgmt_class
= IB_MGMT_CLASS_PERF_MGMT
;
481 in_mad
->mad_hdr
.class_version
= 1;
482 in_mad
->mad_hdr
.method
= IB_MGMT_METHOD_GET
;
483 in_mad
->mad_hdr
.attr_id
= attr
;
485 if (attr
!= IB_PMA_CLASS_PORT_INFO
)
486 in_mad
->data
[41] = port_num
; /* PortSelect field */
488 if ((dev
->process_mad(dev
, IB_MAD_IGNORE_MKEY
,
489 port_num
, NULL
, NULL
,
490 (const struct ib_mad_hdr
*)in_mad
, mad_size
,
491 (struct ib_mad_hdr
*)out_mad
, &mad_size
,
492 &out_mad_pkey_index
) &
493 (IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
)) !=
494 (IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
)) {
498 memcpy(data
, out_mad
->data
+ offset
, size
);
506 static ssize_t
show_pma_counter(struct ib_port
*p
, struct port_attribute
*attr
,
509 struct port_table_attribute
*tab_attr
=
510 container_of(attr
, struct port_table_attribute
, attr
);
511 int offset
= tab_attr
->index
& 0xffff;
512 int width
= (tab_attr
->index
>> 16) & 0xff;
516 ret
= get_perf_mad(p
->ibdev
, p
->port_num
, tab_attr
->attr_id
, &data
,
517 40 + offset
/ 8, sizeof(data
));
519 return sprintf(buf
, "N/A (no PMA)\n");
523 ret
= sprintf(buf
, "%u\n", (*data
>>
524 (4 - (offset
% 8))) & 0xf);
527 ret
= sprintf(buf
, "%u\n", *data
);
530 ret
= sprintf(buf
, "%u\n",
531 be16_to_cpup((__be16
*)data
));
534 ret
= sprintf(buf
, "%u\n",
535 be32_to_cpup((__be32
*)data
));
538 ret
= sprintf(buf
, "%llu\n",
539 be64_to_cpup((__be64
*)data
));
549 static PORT_PMA_ATTR(symbol_error
, 0, 16, 32);
550 static PORT_PMA_ATTR(link_error_recovery
, 1, 8, 48);
551 static PORT_PMA_ATTR(link_downed
, 2, 8, 56);
552 static PORT_PMA_ATTR(port_rcv_errors
, 3, 16, 64);
553 static PORT_PMA_ATTR(port_rcv_remote_physical_errors
, 4, 16, 80);
554 static PORT_PMA_ATTR(port_rcv_switch_relay_errors
, 5, 16, 96);
555 static PORT_PMA_ATTR(port_xmit_discards
, 6, 16, 112);
556 static PORT_PMA_ATTR(port_xmit_constraint_errors
, 7, 8, 128);
557 static PORT_PMA_ATTR(port_rcv_constraint_errors
, 8, 8, 136);
558 static PORT_PMA_ATTR(local_link_integrity_errors
, 9, 4, 152);
559 static PORT_PMA_ATTR(excessive_buffer_overrun_errors
, 10, 4, 156);
560 static PORT_PMA_ATTR(VL15_dropped
, 11, 16, 176);
561 static PORT_PMA_ATTR(port_xmit_data
, 12, 32, 192);
562 static PORT_PMA_ATTR(port_rcv_data
, 13, 32, 224);
563 static PORT_PMA_ATTR(port_xmit_packets
, 14, 32, 256);
564 static PORT_PMA_ATTR(port_rcv_packets
, 15, 32, 288);
565 static PORT_PMA_ATTR(port_xmit_wait
, 0, 32, 320);
568 * Counters added by extended set
570 static PORT_PMA_ATTR_EXT(port_xmit_data
, 64, 64);
571 static PORT_PMA_ATTR_EXT(port_rcv_data
, 64, 128);
572 static PORT_PMA_ATTR_EXT(port_xmit_packets
, 64, 192);
573 static PORT_PMA_ATTR_EXT(port_rcv_packets
, 64, 256);
574 static PORT_PMA_ATTR_EXT(unicast_xmit_packets
, 64, 320);
575 static PORT_PMA_ATTR_EXT(unicast_rcv_packets
, 64, 384);
576 static PORT_PMA_ATTR_EXT(multicast_xmit_packets
, 64, 448);
577 static PORT_PMA_ATTR_EXT(multicast_rcv_packets
, 64, 512);
579 static struct attribute
*pma_attrs
[] = {
580 &port_pma_attr_symbol_error
.attr
.attr
,
581 &port_pma_attr_link_error_recovery
.attr
.attr
,
582 &port_pma_attr_link_downed
.attr
.attr
,
583 &port_pma_attr_port_rcv_errors
.attr
.attr
,
584 &port_pma_attr_port_rcv_remote_physical_errors
.attr
.attr
,
585 &port_pma_attr_port_rcv_switch_relay_errors
.attr
.attr
,
586 &port_pma_attr_port_xmit_discards
.attr
.attr
,
587 &port_pma_attr_port_xmit_constraint_errors
.attr
.attr
,
588 &port_pma_attr_port_rcv_constraint_errors
.attr
.attr
,
589 &port_pma_attr_local_link_integrity_errors
.attr
.attr
,
590 &port_pma_attr_excessive_buffer_overrun_errors
.attr
.attr
,
591 &port_pma_attr_VL15_dropped
.attr
.attr
,
592 &port_pma_attr_port_xmit_data
.attr
.attr
,
593 &port_pma_attr_port_rcv_data
.attr
.attr
,
594 &port_pma_attr_port_xmit_packets
.attr
.attr
,
595 &port_pma_attr_port_rcv_packets
.attr
.attr
,
596 &port_pma_attr_port_xmit_wait
.attr
.attr
,
600 static struct attribute
*pma_attrs_ext
[] = {
601 &port_pma_attr_symbol_error
.attr
.attr
,
602 &port_pma_attr_link_error_recovery
.attr
.attr
,
603 &port_pma_attr_link_downed
.attr
.attr
,
604 &port_pma_attr_port_rcv_errors
.attr
.attr
,
605 &port_pma_attr_port_rcv_remote_physical_errors
.attr
.attr
,
606 &port_pma_attr_port_rcv_switch_relay_errors
.attr
.attr
,
607 &port_pma_attr_port_xmit_discards
.attr
.attr
,
608 &port_pma_attr_port_xmit_constraint_errors
.attr
.attr
,
609 &port_pma_attr_port_rcv_constraint_errors
.attr
.attr
,
610 &port_pma_attr_local_link_integrity_errors
.attr
.attr
,
611 &port_pma_attr_excessive_buffer_overrun_errors
.attr
.attr
,
612 &port_pma_attr_VL15_dropped
.attr
.attr
,
613 &port_pma_attr_ext_port_xmit_data
.attr
.attr
,
614 &port_pma_attr_ext_port_rcv_data
.attr
.attr
,
615 &port_pma_attr_ext_port_xmit_packets
.attr
.attr
,
616 &port_pma_attr_port_xmit_wait
.attr
.attr
,
617 &port_pma_attr_ext_port_rcv_packets
.attr
.attr
,
618 &port_pma_attr_ext_unicast_rcv_packets
.attr
.attr
,
619 &port_pma_attr_ext_unicast_xmit_packets
.attr
.attr
,
620 &port_pma_attr_ext_multicast_rcv_packets
.attr
.attr
,
621 &port_pma_attr_ext_multicast_xmit_packets
.attr
.attr
,
625 static struct attribute
*pma_attrs_noietf
[] = {
626 &port_pma_attr_symbol_error
.attr
.attr
,
627 &port_pma_attr_link_error_recovery
.attr
.attr
,
628 &port_pma_attr_link_downed
.attr
.attr
,
629 &port_pma_attr_port_rcv_errors
.attr
.attr
,
630 &port_pma_attr_port_rcv_remote_physical_errors
.attr
.attr
,
631 &port_pma_attr_port_rcv_switch_relay_errors
.attr
.attr
,
632 &port_pma_attr_port_xmit_discards
.attr
.attr
,
633 &port_pma_attr_port_xmit_constraint_errors
.attr
.attr
,
634 &port_pma_attr_port_rcv_constraint_errors
.attr
.attr
,
635 &port_pma_attr_local_link_integrity_errors
.attr
.attr
,
636 &port_pma_attr_excessive_buffer_overrun_errors
.attr
.attr
,
637 &port_pma_attr_VL15_dropped
.attr
.attr
,
638 &port_pma_attr_ext_port_xmit_data
.attr
.attr
,
639 &port_pma_attr_ext_port_rcv_data
.attr
.attr
,
640 &port_pma_attr_ext_port_xmit_packets
.attr
.attr
,
641 &port_pma_attr_ext_port_rcv_packets
.attr
.attr
,
642 &port_pma_attr_port_xmit_wait
.attr
.attr
,
646 static struct attribute_group pma_group
= {
651 static struct attribute_group pma_group_ext
= {
653 .attrs
= pma_attrs_ext
656 static struct attribute_group pma_group_noietf
= {
658 .attrs
= pma_attrs_noietf
661 static void ib_port_release(struct kobject
*kobj
)
663 struct ib_port
*p
= container_of(kobj
, struct ib_port
, kobj
);
667 if (p
->gid_group
.attrs
) {
668 for (i
= 0; (a
= p
->gid_group
.attrs
[i
]); ++i
)
671 kfree(p
->gid_group
.attrs
);
674 if (p
->pkey_group
.attrs
) {
675 for (i
= 0; (a
= p
->pkey_group
.attrs
[i
]); ++i
)
678 kfree(p
->pkey_group
.attrs
);
684 static void ib_port_gid_attr_release(struct kobject
*kobj
)
686 struct gid_attr_group
*g
= container_of(kobj
, struct gid_attr_group
,
692 for (i
= 0; (a
= g
->ndev
.attrs
[i
]); ++i
)
695 kfree(g
->ndev
.attrs
);
699 for (i
= 0; (a
= g
->type
.attrs
[i
]); ++i
)
702 kfree(g
->type
.attrs
);
708 static struct kobj_type port_type
= {
709 .release
= ib_port_release
,
710 .sysfs_ops
= &port_sysfs_ops
,
711 .default_attrs
= port_default_attrs
714 static struct kobj_type gid_attr_type
= {
715 .sysfs_ops
= &gid_attr_sysfs_ops
,
716 .release
= ib_port_gid_attr_release
719 static struct attribute
**
720 alloc_group_attrs(ssize_t (*show
)(struct ib_port
*,
721 struct port_attribute
*, char *buf
),
724 struct attribute
**tab_attr
;
725 struct port_table_attribute
*element
;
728 tab_attr
= kcalloc(1 + len
, sizeof(struct attribute
*), GFP_KERNEL
);
732 for (i
= 0; i
< len
; i
++) {
733 element
= kzalloc(sizeof(struct port_table_attribute
),
738 if (snprintf(element
->name
, sizeof(element
->name
),
739 "%d", i
) >= sizeof(element
->name
)) {
744 element
->attr
.attr
.name
= element
->name
;
745 element
->attr
.attr
.mode
= S_IRUGO
;
746 element
->attr
.show
= show
;
748 sysfs_attr_init(&element
->attr
.attr
);
750 tab_attr
[i
] = &element
->attr
.attr
;
763 * Figure out which counter table to use depending on
764 * the device capabilities.
766 static struct attribute_group
*get_counter_table(struct ib_device
*dev
,
769 struct ib_class_port_info cpi
;
771 if (get_perf_mad(dev
, port_num
, IB_PMA_CLASS_PORT_INFO
,
772 &cpi
, 40, sizeof(cpi
)) >= 0) {
773 if (cpi
.capability_mask
& IB_PMA_CLASS_CAP_EXT_WIDTH
)
774 /* We have extended counters */
775 return &pma_group_ext
;
777 if (cpi
.capability_mask
& IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF
)
778 /* But not the IETF ones */
779 return &pma_group_noietf
;
782 /* Fall back to normal counters */
786 static int update_hw_stats(struct ib_device
*dev
, struct rdma_hw_stats
*stats
,
787 u8 port_num
, int index
)
791 if (time_is_after_eq_jiffies(stats
->timestamp
+ stats
->lifespan
))
793 ret
= dev
->get_hw_stats(dev
, stats
, port_num
, index
);
796 if (ret
== stats
->num_counters
)
797 stats
->timestamp
= jiffies
;
802 static ssize_t
print_hw_stat(struct rdma_hw_stats
*stats
, int index
, char *buf
)
804 return sprintf(buf
, "%llu\n", stats
->value
[index
]);
807 static ssize_t
show_hw_stats(struct kobject
*kobj
, struct attribute
*attr
,
810 struct ib_device
*dev
;
811 struct ib_port
*port
;
812 struct hw_stats_attribute
*hsa
;
813 struct rdma_hw_stats
*stats
;
816 hsa
= container_of(attr
, struct hw_stats_attribute
, attr
);
817 if (!hsa
->port_num
) {
818 dev
= container_of((struct device
*)kobj
,
819 struct ib_device
, dev
);
820 stats
= dev
->hw_stats
;
822 port
= container_of(kobj
, struct ib_port
, kobj
);
824 stats
= port
->hw_stats
;
826 mutex_lock(&stats
->lock
);
827 ret
= update_hw_stats(dev
, stats
, hsa
->port_num
, hsa
->index
);
830 ret
= print_hw_stat(stats
, hsa
->index
, buf
);
832 mutex_unlock(&stats
->lock
);
837 static ssize_t
show_stats_lifespan(struct kobject
*kobj
,
838 struct attribute
*attr
,
841 struct hw_stats_attribute
*hsa
;
842 struct rdma_hw_stats
*stats
;
845 hsa
= container_of(attr
, struct hw_stats_attribute
, attr
);
846 if (!hsa
->port_num
) {
847 struct ib_device
*dev
= container_of((struct device
*)kobj
,
848 struct ib_device
, dev
);
850 stats
= dev
->hw_stats
;
852 struct ib_port
*p
= container_of(kobj
, struct ib_port
, kobj
);
857 mutex_lock(&stats
->lock
);
858 msecs
= jiffies_to_msecs(stats
->lifespan
);
859 mutex_unlock(&stats
->lock
);
861 return sprintf(buf
, "%d\n", msecs
);
864 static ssize_t
set_stats_lifespan(struct kobject
*kobj
,
865 struct attribute
*attr
,
866 const char *buf
, size_t count
)
868 struct hw_stats_attribute
*hsa
;
869 struct rdma_hw_stats
*stats
;
874 ret
= kstrtoint(buf
, 10, &msecs
);
877 if (msecs
< 0 || msecs
> 10000)
879 jiffies
= msecs_to_jiffies(msecs
);
880 hsa
= container_of(attr
, struct hw_stats_attribute
, attr
);
881 if (!hsa
->port_num
) {
882 struct ib_device
*dev
= container_of((struct device
*)kobj
,
883 struct ib_device
, dev
);
885 stats
= dev
->hw_stats
;
887 struct ib_port
*p
= container_of(kobj
, struct ib_port
, kobj
);
892 mutex_lock(&stats
->lock
);
893 stats
->lifespan
= jiffies
;
894 mutex_unlock(&stats
->lock
);
899 static void free_hsag(struct kobject
*kobj
, struct attribute_group
*attr_group
)
901 struct attribute
**attr
;
903 sysfs_remove_group(kobj
, attr_group
);
905 for (attr
= attr_group
->attrs
; *attr
; attr
++)
910 static struct attribute
*alloc_hsa(int index
, u8 port_num
, const char *name
)
912 struct hw_stats_attribute
*hsa
;
914 hsa
= kmalloc(sizeof(*hsa
), GFP_KERNEL
);
918 hsa
->attr
.name
= (char *)name
;
919 hsa
->attr
.mode
= S_IRUGO
;
920 hsa
->show
= show_hw_stats
;
923 hsa
->port_num
= port_num
;
928 static struct attribute
*alloc_hsa_lifespan(char *name
, u8 port_num
)
930 struct hw_stats_attribute
*hsa
;
932 hsa
= kmalloc(sizeof(*hsa
), GFP_KERNEL
);
936 hsa
->attr
.name
= name
;
937 hsa
->attr
.mode
= S_IWUSR
| S_IRUGO
;
938 hsa
->show
= show_stats_lifespan
;
939 hsa
->store
= set_stats_lifespan
;
941 hsa
->port_num
= port_num
;
946 static void setup_hw_stats(struct ib_device
*device
, struct ib_port
*port
,
949 struct attribute_group
*hsag
;
950 struct rdma_hw_stats
*stats
;
953 stats
= device
->alloc_hw_stats(device
, port_num
);
958 if (!stats
->names
|| stats
->num_counters
<= 0)
962 * Two extra attribue elements here, one for the lifespan entry and
963 * one to NULL terminate the list for the sysfs core code
965 hsag
= kzalloc(sizeof(*hsag
) +
966 sizeof(void *) * (stats
->num_counters
+ 2),
971 ret
= device
->get_hw_stats(device
, stats
, port_num
,
972 stats
->num_counters
);
973 if (ret
!= stats
->num_counters
)
976 stats
->timestamp
= jiffies
;
978 hsag
->name
= "hw_counters";
979 hsag
->attrs
= (void *)hsag
+ sizeof(*hsag
);
981 for (i
= 0; i
< stats
->num_counters
; i
++) {
982 hsag
->attrs
[i
] = alloc_hsa(i
, port_num
, stats
->names
[i
]);
985 sysfs_attr_init(hsag
->attrs
[i
]);
988 mutex_init(&stats
->lock
);
989 /* treat an error here as non-fatal */
990 hsag
->attrs
[i
] = alloc_hsa_lifespan("lifespan", port_num
);
992 sysfs_attr_init(hsag
->attrs
[i
]);
995 struct kobject
*kobj
= &port
->kobj
;
996 ret
= sysfs_create_group(kobj
, hsag
);
999 port
->hw_stats_ag
= hsag
;
1000 port
->hw_stats
= stats
;
1002 struct kobject
*kobj
= &device
->dev
.kobj
;
1003 ret
= sysfs_create_group(kobj
, hsag
);
1006 device
->hw_stats_ag
= hsag
;
1007 device
->hw_stats
= stats
;
1014 kfree(hsag
->attrs
[i
]);
1022 static int add_port(struct ib_device
*device
, int port_num
,
1023 int (*port_callback
)(struct ib_device
*,
1024 u8
, struct kobject
*))
1027 struct ib_port_attr attr
;
1031 ret
= ib_query_port(device
, port_num
, &attr
);
1035 p
= kzalloc(sizeof *p
, GFP_KERNEL
);
1040 p
->port_num
= port_num
;
1042 ret
= kobject_init_and_add(&p
->kobj
, &port_type
,
1043 device
->ports_parent
,
1050 p
->gid_attr_group
= kzalloc(sizeof(*p
->gid_attr_group
), GFP_KERNEL
);
1051 if (!p
->gid_attr_group
) {
1056 p
->gid_attr_group
->port
= p
;
1057 ret
= kobject_init_and_add(&p
->gid_attr_group
->kobj
, &gid_attr_type
,
1058 &p
->kobj
, "gid_attrs");
1060 kfree(p
->gid_attr_group
);
1064 p
->pma_table
= get_counter_table(device
, port_num
);
1065 ret
= sysfs_create_group(&p
->kobj
, p
->pma_table
);
1067 goto err_put_gid_attrs
;
1069 p
->gid_group
.name
= "gids";
1070 p
->gid_group
.attrs
= alloc_group_attrs(show_port_gid
, attr
.gid_tbl_len
);
1071 if (!p
->gid_group
.attrs
) {
1073 goto err_remove_pma
;
1076 ret
= sysfs_create_group(&p
->kobj
, &p
->gid_group
);
1080 p
->gid_attr_group
->ndev
.name
= "ndevs";
1081 p
->gid_attr_group
->ndev
.attrs
= alloc_group_attrs(show_port_gid_attr_ndev
,
1083 if (!p
->gid_attr_group
->ndev
.attrs
) {
1085 goto err_remove_gid
;
1088 ret
= sysfs_create_group(&p
->gid_attr_group
->kobj
,
1089 &p
->gid_attr_group
->ndev
);
1091 goto err_free_gid_ndev
;
1093 p
->gid_attr_group
->type
.name
= "types";
1094 p
->gid_attr_group
->type
.attrs
= alloc_group_attrs(show_port_gid_attr_gid_type
,
1096 if (!p
->gid_attr_group
->type
.attrs
) {
1098 goto err_remove_gid_ndev
;
1101 ret
= sysfs_create_group(&p
->gid_attr_group
->kobj
,
1102 &p
->gid_attr_group
->type
);
1104 goto err_free_gid_type
;
1106 p
->pkey_group
.name
= "pkeys";
1107 p
->pkey_group
.attrs
= alloc_group_attrs(show_port_pkey
,
1109 if (!p
->pkey_group
.attrs
) {
1111 goto err_remove_gid_type
;
1114 ret
= sysfs_create_group(&p
->kobj
, &p
->pkey_group
);
1118 if (port_callback
) {
1119 ret
= port_callback(device
, port_num
, &p
->kobj
);
1121 goto err_remove_pkey
;
1125 * If port == 0, it means we have only one port and the parent
1126 * device, not this port device, should be the holder of the
1129 if (device
->alloc_hw_stats
&& port_num
)
1130 setup_hw_stats(device
, p
, port_num
);
1132 list_add_tail(&p
->kobj
.entry
, &device
->port_list
);
1134 kobject_uevent(&p
->kobj
, KOBJ_ADD
);
1138 sysfs_remove_group(&p
->kobj
, &p
->pkey_group
);
1141 for (i
= 0; i
< attr
.pkey_tbl_len
; ++i
)
1142 kfree(p
->pkey_group
.attrs
[i
]);
1144 kfree(p
->pkey_group
.attrs
);
1145 p
->pkey_group
.attrs
= NULL
;
1147 err_remove_gid_type
:
1148 sysfs_remove_group(&p
->gid_attr_group
->kobj
,
1149 &p
->gid_attr_group
->type
);
1152 for (i
= 0; i
< attr
.gid_tbl_len
; ++i
)
1153 kfree(p
->gid_attr_group
->type
.attrs
[i
]);
1155 kfree(p
->gid_attr_group
->type
.attrs
);
1156 p
->gid_attr_group
->type
.attrs
= NULL
;
1158 err_remove_gid_ndev
:
1159 sysfs_remove_group(&p
->gid_attr_group
->kobj
,
1160 &p
->gid_attr_group
->ndev
);
1163 for (i
= 0; i
< attr
.gid_tbl_len
; ++i
)
1164 kfree(p
->gid_attr_group
->ndev
.attrs
[i
]);
1166 kfree(p
->gid_attr_group
->ndev
.attrs
);
1167 p
->gid_attr_group
->ndev
.attrs
= NULL
;
1170 sysfs_remove_group(&p
->kobj
, &p
->gid_group
);
1173 for (i
= 0; i
< attr
.gid_tbl_len
; ++i
)
1174 kfree(p
->gid_group
.attrs
[i
]);
1176 kfree(p
->gid_group
.attrs
);
1177 p
->gid_group
.attrs
= NULL
;
1180 sysfs_remove_group(&p
->kobj
, p
->pma_table
);
1183 kobject_put(&p
->gid_attr_group
->kobj
);
1186 kobject_put(&p
->kobj
);
1190 static ssize_t
show_node_type(struct device
*device
,
1191 struct device_attribute
*attr
, char *buf
)
1193 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
1195 switch (dev
->node_type
) {
1196 case RDMA_NODE_IB_CA
: return sprintf(buf
, "%d: CA\n", dev
->node_type
);
1197 case RDMA_NODE_RNIC
: return sprintf(buf
, "%d: RNIC\n", dev
->node_type
);
1198 case RDMA_NODE_USNIC
: return sprintf(buf
, "%d: usNIC\n", dev
->node_type
);
1199 case RDMA_NODE_USNIC_UDP
: return sprintf(buf
, "%d: usNIC UDP\n", dev
->node_type
);
1200 case RDMA_NODE_IB_SWITCH
: return sprintf(buf
, "%d: switch\n", dev
->node_type
);
1201 case RDMA_NODE_IB_ROUTER
: return sprintf(buf
, "%d: router\n", dev
->node_type
);
1202 default: return sprintf(buf
, "%d: <unknown>\n", dev
->node_type
);
1206 static ssize_t
show_sys_image_guid(struct device
*device
,
1207 struct device_attribute
*dev_attr
, char *buf
)
1209 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
1211 return sprintf(buf
, "%04x:%04x:%04x:%04x\n",
1212 be16_to_cpu(((__be16
*) &dev
->attrs
.sys_image_guid
)[0]),
1213 be16_to_cpu(((__be16
*) &dev
->attrs
.sys_image_guid
)[1]),
1214 be16_to_cpu(((__be16
*) &dev
->attrs
.sys_image_guid
)[2]),
1215 be16_to_cpu(((__be16
*) &dev
->attrs
.sys_image_guid
)[3]));
1218 static ssize_t
show_node_guid(struct device
*device
,
1219 struct device_attribute
*attr
, char *buf
)
1221 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
1223 return sprintf(buf
, "%04x:%04x:%04x:%04x\n",
1224 be16_to_cpu(((__be16
*) &dev
->node_guid
)[0]),
1225 be16_to_cpu(((__be16
*) &dev
->node_guid
)[1]),
1226 be16_to_cpu(((__be16
*) &dev
->node_guid
)[2]),
1227 be16_to_cpu(((__be16
*) &dev
->node_guid
)[3]));
1230 static ssize_t
show_node_desc(struct device
*device
,
1231 struct device_attribute
*attr
, char *buf
)
1233 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
1235 return sprintf(buf
, "%.64s\n", dev
->node_desc
);
1238 static ssize_t
set_node_desc(struct device
*device
,
1239 struct device_attribute
*attr
,
1240 const char *buf
, size_t count
)
1242 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
1243 struct ib_device_modify desc
= {};
1246 if (!dev
->modify_device
)
1249 memcpy(desc
.node_desc
, buf
, min_t(int, count
, IB_DEVICE_NODE_DESC_MAX
));
1250 ret
= ib_modify_device(dev
, IB_DEVICE_MODIFY_NODE_DESC
, &desc
);
1257 static ssize_t
show_fw_ver(struct device
*device
, struct device_attribute
*attr
,
1260 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
1262 ib_get_device_fw_str(dev
, buf
);
1263 strlcat(buf
, "\n", IB_FW_VERSION_NAME_MAX
);
1267 static DEVICE_ATTR(node_type
, S_IRUGO
, show_node_type
, NULL
);
1268 static DEVICE_ATTR(sys_image_guid
, S_IRUGO
, show_sys_image_guid
, NULL
);
1269 static DEVICE_ATTR(node_guid
, S_IRUGO
, show_node_guid
, NULL
);
1270 static DEVICE_ATTR(node_desc
, S_IRUGO
| S_IWUSR
, show_node_desc
, set_node_desc
);
1271 static DEVICE_ATTR(fw_ver
, S_IRUGO
, show_fw_ver
, NULL
);
1273 static struct device_attribute
*ib_class_attributes
[] = {
1274 &dev_attr_node_type
,
1275 &dev_attr_sys_image_guid
,
1276 &dev_attr_node_guid
,
1277 &dev_attr_node_desc
,
1281 static void free_port_list_attributes(struct ib_device
*device
)
1283 struct kobject
*p
, *t
;
1285 list_for_each_entry_safe(p
, t
, &device
->port_list
, entry
) {
1286 struct ib_port
*port
= container_of(p
, struct ib_port
, kobj
);
1287 list_del(&p
->entry
);
1288 if (port
->hw_stats
) {
1289 kfree(port
->hw_stats
);
1290 free_hsag(&port
->kobj
, port
->hw_stats_ag
);
1292 sysfs_remove_group(p
, port
->pma_table
);
1293 sysfs_remove_group(p
, &port
->pkey_group
);
1294 sysfs_remove_group(p
, &port
->gid_group
);
1295 sysfs_remove_group(&port
->gid_attr_group
->kobj
,
1296 &port
->gid_attr_group
->ndev
);
1297 sysfs_remove_group(&port
->gid_attr_group
->kobj
,
1298 &port
->gid_attr_group
->type
);
1299 kobject_put(&port
->gid_attr_group
->kobj
);
1303 kobject_put(device
->ports_parent
);
1306 int ib_device_register_sysfs(struct ib_device
*device
,
1307 int (*port_callback
)(struct ib_device
*,
1308 u8
, struct kobject
*))
1310 struct device
*class_dev
= &device
->dev
;
1314 ret
= dev_set_name(class_dev
, "%s", device
->name
);
1318 ret
= device_add(class_dev
);
1322 for (i
= 0; i
< ARRAY_SIZE(ib_class_attributes
); ++i
) {
1323 ret
= device_create_file(class_dev
, ib_class_attributes
[i
]);
1325 goto err_unregister
;
1328 device
->ports_parent
= kobject_create_and_add("ports",
1330 if (!device
->ports_parent
) {
1335 if (rdma_cap_ib_switch(device
)) {
1336 ret
= add_port(device
, 0, port_callback
);
1340 for (i
= 1; i
<= device
->phys_port_cnt
; ++i
) {
1341 ret
= add_port(device
, i
, port_callback
);
1347 if (device
->alloc_hw_stats
)
1348 setup_hw_stats(device
, NULL
, 0);
1353 free_port_list_attributes(device
);
1356 device_del(class_dev
);
1362 void ib_device_unregister_sysfs(struct ib_device
*device
)
1366 /* Hold kobject until ib_dealloc_device() */
1367 kobject_get(&device
->dev
.kobj
);
1369 free_port_list_attributes(device
);
1371 if (device
->hw_stats
) {
1372 kfree(device
->hw_stats
);
1373 free_hsag(&device
->dev
.kobj
, device
->hw_stats_ag
);
1376 for (i
= 0; i
< ARRAY_SIZE(ib_class_attributes
); ++i
)
1377 device_remove_file(&device
->dev
, ib_class_attributes
[i
]);
1379 device_unregister(&device
->dev
);