2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include "core_priv.h"
37 #include <linux/slab.h>
38 #include <linux/stat.h>
39 #include <linux/string.h>
40 #include <linux/netdevice.h>
41 #include <linux/ethtool.h>
43 #include <rdma/ib_mad.h>
44 #include <rdma/ib_pma.h>
48 struct gid_attr_group
{
51 struct attribute_group ndev
;
52 struct attribute_group type
;
56 struct ib_device
*ibdev
;
57 struct gid_attr_group
*gid_attr_group
;
58 struct attribute_group gid_group
;
59 struct attribute_group pkey_group
;
60 struct attribute_group
*pma_table
;
61 struct attribute_group
*hw_stats_ag
;
62 struct rdma_hw_stats
*hw_stats
;
66 struct port_attribute
{
67 struct attribute attr
;
68 ssize_t (*show
)(struct ib_port
*, struct port_attribute
*, char *buf
);
69 ssize_t (*store
)(struct ib_port
*, struct port_attribute
*,
70 const char *buf
, size_t count
);
73 #define PORT_ATTR(_name, _mode, _show, _store) \
74 struct port_attribute port_attr_##_name = __ATTR(_name, _mode, _show, _store)
76 #define PORT_ATTR_RO(_name) \
77 struct port_attribute port_attr_##_name = __ATTR_RO(_name)
79 struct port_table_attribute
{
80 struct port_attribute attr
;
86 struct hw_stats_attribute
{
87 struct attribute attr
;
88 ssize_t (*show
)(struct kobject
*kobj
,
89 struct attribute
*attr
, char *buf
);
90 ssize_t (*store
)(struct kobject
*kobj
,
91 struct attribute
*attr
,
98 static ssize_t
port_attr_show(struct kobject
*kobj
,
99 struct attribute
*attr
, char *buf
)
101 struct port_attribute
*port_attr
=
102 container_of(attr
, struct port_attribute
, attr
);
103 struct ib_port
*p
= container_of(kobj
, struct ib_port
, kobj
);
105 if (!port_attr
->show
)
108 return port_attr
->show(p
, port_attr
, buf
);
111 static ssize_t
port_attr_store(struct kobject
*kobj
,
112 struct attribute
*attr
,
113 const char *buf
, size_t count
)
115 struct port_attribute
*port_attr
=
116 container_of(attr
, struct port_attribute
, attr
);
117 struct ib_port
*p
= container_of(kobj
, struct ib_port
, kobj
);
119 if (!port_attr
->store
)
121 return port_attr
->store(p
, port_attr
, buf
, count
);
124 static const struct sysfs_ops port_sysfs_ops
= {
125 .show
= port_attr_show
,
126 .store
= port_attr_store
129 static ssize_t
gid_attr_show(struct kobject
*kobj
,
130 struct attribute
*attr
, char *buf
)
132 struct port_attribute
*port_attr
=
133 container_of(attr
, struct port_attribute
, attr
);
134 struct ib_port
*p
= container_of(kobj
, struct gid_attr_group
,
137 if (!port_attr
->show
)
140 return port_attr
->show(p
, port_attr
, buf
);
143 static const struct sysfs_ops gid_attr_sysfs_ops
= {
144 .show
= gid_attr_show
147 static ssize_t
state_show(struct ib_port
*p
, struct port_attribute
*unused
,
150 struct ib_port_attr attr
;
153 static const char *state_name
[] = {
154 [IB_PORT_NOP
] = "NOP",
155 [IB_PORT_DOWN
] = "DOWN",
156 [IB_PORT_INIT
] = "INIT",
157 [IB_PORT_ARMED
] = "ARMED",
158 [IB_PORT_ACTIVE
] = "ACTIVE",
159 [IB_PORT_ACTIVE_DEFER
] = "ACTIVE_DEFER"
162 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
166 return sprintf(buf
, "%d: %s\n", attr
.state
,
167 attr
.state
>= 0 && attr
.state
< ARRAY_SIZE(state_name
) ?
168 state_name
[attr
.state
] : "UNKNOWN");
171 static ssize_t
lid_show(struct ib_port
*p
, struct port_attribute
*unused
,
174 struct ib_port_attr attr
;
177 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
181 return sprintf(buf
, "0x%x\n", attr
.lid
);
184 static ssize_t
lid_mask_count_show(struct ib_port
*p
,
185 struct port_attribute
*unused
,
188 struct ib_port_attr attr
;
191 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
195 return sprintf(buf
, "%d\n", attr
.lmc
);
198 static ssize_t
sm_lid_show(struct ib_port
*p
, struct port_attribute
*unused
,
201 struct ib_port_attr attr
;
204 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
208 return sprintf(buf
, "0x%x\n", attr
.sm_lid
);
211 static ssize_t
sm_sl_show(struct ib_port
*p
, struct port_attribute
*unused
,
214 struct ib_port_attr attr
;
217 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
221 return sprintf(buf
, "%d\n", attr
.sm_sl
);
224 static ssize_t
cap_mask_show(struct ib_port
*p
, struct port_attribute
*unused
,
227 struct ib_port_attr attr
;
230 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
234 return sprintf(buf
, "0x%08x\n", attr
.port_cap_flags
);
237 static ssize_t
rate_show(struct ib_port
*p
, struct port_attribute
*unused
,
240 struct ib_port_attr attr
;
242 int rate
; /* in deci-Gb/sec */
245 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
249 switch (attr
.active_speed
) {
275 default: /* default to SDR for invalid rates */
280 rate
*= ib_width_enum_to_int(attr
.active_width
);
284 return sprintf(buf
, "%d%s Gb/sec (%dX%s)\n",
285 rate
/ 10, rate
% 10 ? ".5" : "",
286 ib_width_enum_to_int(attr
.active_width
), speed
);
289 static ssize_t
phys_state_show(struct ib_port
*p
, struct port_attribute
*unused
,
292 struct ib_port_attr attr
;
296 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
300 switch (attr
.phys_state
) {
301 case 1: return sprintf(buf
, "1: Sleep\n");
302 case 2: return sprintf(buf
, "2: Polling\n");
303 case 3: return sprintf(buf
, "3: Disabled\n");
304 case 4: return sprintf(buf
, "4: PortConfigurationTraining\n");
305 case 5: return sprintf(buf
, "5: LinkUp\n");
306 case 6: return sprintf(buf
, "6: LinkErrorRecovery\n");
307 case 7: return sprintf(buf
, "7: Phy Test\n");
308 default: return sprintf(buf
, "%d: <unknown>\n", attr
.phys_state
);
312 static ssize_t
link_layer_show(struct ib_port
*p
, struct port_attribute
*unused
,
315 switch (rdma_port_get_link_layer(p
->ibdev
, p
->port_num
)) {
316 case IB_LINK_LAYER_INFINIBAND
:
317 return sprintf(buf
, "%s\n", "InfiniBand");
318 case IB_LINK_LAYER_ETHERNET
:
319 return sprintf(buf
, "%s\n", "Ethernet");
321 return sprintf(buf
, "%s\n", "Unknown");
325 static PORT_ATTR_RO(state
);
326 static PORT_ATTR_RO(lid
);
327 static PORT_ATTR_RO(lid_mask_count
);
328 static PORT_ATTR_RO(sm_lid
);
329 static PORT_ATTR_RO(sm_sl
);
330 static PORT_ATTR_RO(cap_mask
);
331 static PORT_ATTR_RO(rate
);
332 static PORT_ATTR_RO(phys_state
);
333 static PORT_ATTR_RO(link_layer
);
335 static struct attribute
*port_default_attrs
[] = {
336 &port_attr_state
.attr
,
338 &port_attr_lid_mask_count
.attr
,
339 &port_attr_sm_lid
.attr
,
340 &port_attr_sm_sl
.attr
,
341 &port_attr_cap_mask
.attr
,
342 &port_attr_rate
.attr
,
343 &port_attr_phys_state
.attr
,
344 &port_attr_link_layer
.attr
,
348 static size_t print_ndev(struct ib_gid_attr
*gid_attr
, char *buf
)
353 return sprintf(buf
, "%s\n", gid_attr
->ndev
->name
);
356 static size_t print_gid_type(struct ib_gid_attr
*gid_attr
, char *buf
)
358 return sprintf(buf
, "%s\n", ib_cache_gid_type_str(gid_attr
->gid_type
));
361 static ssize_t
_show_port_gid_attr(struct ib_port
*p
,
362 struct port_attribute
*attr
,
364 size_t (*print
)(struct ib_gid_attr
*gid_attr
,
367 struct port_table_attribute
*tab_attr
=
368 container_of(attr
, struct port_table_attribute
, attr
);
370 struct ib_gid_attr gid_attr
= {};
373 ret
= ib_query_gid(p
->ibdev
, p
->port_num
, tab_attr
->index
, &gid
,
378 ret
= print(&gid_attr
, buf
);
382 dev_put(gid_attr
.ndev
);
386 static ssize_t
show_port_gid(struct ib_port
*p
, struct port_attribute
*attr
,
389 struct port_table_attribute
*tab_attr
=
390 container_of(attr
, struct port_table_attribute
, attr
);
394 ret
= ib_query_gid(p
->ibdev
, p
->port_num
, tab_attr
->index
, &gid
, NULL
);
398 return sprintf(buf
, "%pI6\n", gid
.raw
);
401 static ssize_t
show_port_gid_attr_ndev(struct ib_port
*p
,
402 struct port_attribute
*attr
, char *buf
)
404 return _show_port_gid_attr(p
, attr
, buf
, print_ndev
);
407 static ssize_t
show_port_gid_attr_gid_type(struct ib_port
*p
,
408 struct port_attribute
*attr
,
411 return _show_port_gid_attr(p
, attr
, buf
, print_gid_type
);
414 static ssize_t
show_port_pkey(struct ib_port
*p
, struct port_attribute
*attr
,
417 struct port_table_attribute
*tab_attr
=
418 container_of(attr
, struct port_table_attribute
, attr
);
422 ret
= ib_query_pkey(p
->ibdev
, p
->port_num
, tab_attr
->index
, &pkey
);
426 return sprintf(buf
, "0x%04x\n", pkey
);
429 #define PORT_PMA_ATTR(_name, _counter, _width, _offset) \
430 struct port_table_attribute port_pma_attr_##_name = { \
431 .attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \
432 .index = (_offset) | ((_width) << 16) | ((_counter) << 24), \
433 .attr_id = IB_PMA_PORT_COUNTERS , \
436 #define PORT_PMA_ATTR_EXT(_name, _width, _offset) \
437 struct port_table_attribute port_pma_attr_ext_##_name = { \
438 .attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \
439 .index = (_offset) | ((_width) << 16), \
440 .attr_id = IB_PMA_PORT_COUNTERS_EXT , \
444 * Get a Perfmgmt MAD block of data.
445 * Returns error code or the number of bytes retrieved.
447 static int get_perf_mad(struct ib_device
*dev
, int port_num
, __be16 attr
,
448 void *data
, int offset
, size_t size
)
450 struct ib_mad
*in_mad
;
451 struct ib_mad
*out_mad
;
452 size_t mad_size
= sizeof(*out_mad
);
453 u16 out_mad_pkey_index
= 0;
456 if (!dev
->process_mad
)
459 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
460 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
461 if (!in_mad
|| !out_mad
) {
466 in_mad
->mad_hdr
.base_version
= 1;
467 in_mad
->mad_hdr
.mgmt_class
= IB_MGMT_CLASS_PERF_MGMT
;
468 in_mad
->mad_hdr
.class_version
= 1;
469 in_mad
->mad_hdr
.method
= IB_MGMT_METHOD_GET
;
470 in_mad
->mad_hdr
.attr_id
= attr
;
472 if (attr
!= IB_PMA_CLASS_PORT_INFO
)
473 in_mad
->data
[41] = port_num
; /* PortSelect field */
475 if ((dev
->process_mad(dev
, IB_MAD_IGNORE_MKEY
,
476 port_num
, NULL
, NULL
,
477 (const struct ib_mad_hdr
*)in_mad
, mad_size
,
478 (struct ib_mad_hdr
*)out_mad
, &mad_size
,
479 &out_mad_pkey_index
) &
480 (IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
)) !=
481 (IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
)) {
485 memcpy(data
, out_mad
->data
+ offset
, size
);
493 static ssize_t
show_pma_counter(struct ib_port
*p
, struct port_attribute
*attr
,
496 struct port_table_attribute
*tab_attr
=
497 container_of(attr
, struct port_table_attribute
, attr
);
498 int offset
= tab_attr
->index
& 0xffff;
499 int width
= (tab_attr
->index
>> 16) & 0xff;
503 ret
= get_perf_mad(p
->ibdev
, p
->port_num
, tab_attr
->attr_id
, &data
,
504 40 + offset
/ 8, sizeof(data
));
506 return sprintf(buf
, "N/A (no PMA)\n");
510 ret
= sprintf(buf
, "%u\n", (*data
>>
511 (4 - (offset
% 8))) & 0xf);
514 ret
= sprintf(buf
, "%u\n", *data
);
517 ret
= sprintf(buf
, "%u\n",
518 be16_to_cpup((__be16
*)data
));
521 ret
= sprintf(buf
, "%u\n",
522 be32_to_cpup((__be32
*)data
));
525 ret
= sprintf(buf
, "%llu\n",
526 be64_to_cpup((__be64
*)data
));
536 static PORT_PMA_ATTR(symbol_error
, 0, 16, 32);
537 static PORT_PMA_ATTR(link_error_recovery
, 1, 8, 48);
538 static PORT_PMA_ATTR(link_downed
, 2, 8, 56);
539 static PORT_PMA_ATTR(port_rcv_errors
, 3, 16, 64);
540 static PORT_PMA_ATTR(port_rcv_remote_physical_errors
, 4, 16, 80);
541 static PORT_PMA_ATTR(port_rcv_switch_relay_errors
, 5, 16, 96);
542 static PORT_PMA_ATTR(port_xmit_discards
, 6, 16, 112);
543 static PORT_PMA_ATTR(port_xmit_constraint_errors
, 7, 8, 128);
544 static PORT_PMA_ATTR(port_rcv_constraint_errors
, 8, 8, 136);
545 static PORT_PMA_ATTR(local_link_integrity_errors
, 9, 4, 152);
546 static PORT_PMA_ATTR(excessive_buffer_overrun_errors
, 10, 4, 156);
547 static PORT_PMA_ATTR(VL15_dropped
, 11, 16, 176);
548 static PORT_PMA_ATTR(port_xmit_data
, 12, 32, 192);
549 static PORT_PMA_ATTR(port_rcv_data
, 13, 32, 224);
550 static PORT_PMA_ATTR(port_xmit_packets
, 14, 32, 256);
551 static PORT_PMA_ATTR(port_rcv_packets
, 15, 32, 288);
552 static PORT_PMA_ATTR(port_xmit_wait
, 0, 32, 320);
555 * Counters added by extended set
557 static PORT_PMA_ATTR_EXT(port_xmit_data
, 64, 64);
558 static PORT_PMA_ATTR_EXT(port_rcv_data
, 64, 128);
559 static PORT_PMA_ATTR_EXT(port_xmit_packets
, 64, 192);
560 static PORT_PMA_ATTR_EXT(port_rcv_packets
, 64, 256);
561 static PORT_PMA_ATTR_EXT(unicast_xmit_packets
, 64, 320);
562 static PORT_PMA_ATTR_EXT(unicast_rcv_packets
, 64, 384);
563 static PORT_PMA_ATTR_EXT(multicast_xmit_packets
, 64, 448);
564 static PORT_PMA_ATTR_EXT(multicast_rcv_packets
, 64, 512);
566 static struct attribute
*pma_attrs
[] = {
567 &port_pma_attr_symbol_error
.attr
.attr
,
568 &port_pma_attr_link_error_recovery
.attr
.attr
,
569 &port_pma_attr_link_downed
.attr
.attr
,
570 &port_pma_attr_port_rcv_errors
.attr
.attr
,
571 &port_pma_attr_port_rcv_remote_physical_errors
.attr
.attr
,
572 &port_pma_attr_port_rcv_switch_relay_errors
.attr
.attr
,
573 &port_pma_attr_port_xmit_discards
.attr
.attr
,
574 &port_pma_attr_port_xmit_constraint_errors
.attr
.attr
,
575 &port_pma_attr_port_rcv_constraint_errors
.attr
.attr
,
576 &port_pma_attr_local_link_integrity_errors
.attr
.attr
,
577 &port_pma_attr_excessive_buffer_overrun_errors
.attr
.attr
,
578 &port_pma_attr_VL15_dropped
.attr
.attr
,
579 &port_pma_attr_port_xmit_data
.attr
.attr
,
580 &port_pma_attr_port_rcv_data
.attr
.attr
,
581 &port_pma_attr_port_xmit_packets
.attr
.attr
,
582 &port_pma_attr_port_rcv_packets
.attr
.attr
,
583 &port_pma_attr_port_xmit_wait
.attr
.attr
,
587 static struct attribute
*pma_attrs_ext
[] = {
588 &port_pma_attr_symbol_error
.attr
.attr
,
589 &port_pma_attr_link_error_recovery
.attr
.attr
,
590 &port_pma_attr_link_downed
.attr
.attr
,
591 &port_pma_attr_port_rcv_errors
.attr
.attr
,
592 &port_pma_attr_port_rcv_remote_physical_errors
.attr
.attr
,
593 &port_pma_attr_port_rcv_switch_relay_errors
.attr
.attr
,
594 &port_pma_attr_port_xmit_discards
.attr
.attr
,
595 &port_pma_attr_port_xmit_constraint_errors
.attr
.attr
,
596 &port_pma_attr_port_rcv_constraint_errors
.attr
.attr
,
597 &port_pma_attr_local_link_integrity_errors
.attr
.attr
,
598 &port_pma_attr_excessive_buffer_overrun_errors
.attr
.attr
,
599 &port_pma_attr_VL15_dropped
.attr
.attr
,
600 &port_pma_attr_ext_port_xmit_data
.attr
.attr
,
601 &port_pma_attr_ext_port_rcv_data
.attr
.attr
,
602 &port_pma_attr_ext_port_xmit_packets
.attr
.attr
,
603 &port_pma_attr_port_xmit_wait
.attr
.attr
,
604 &port_pma_attr_ext_port_rcv_packets
.attr
.attr
,
605 &port_pma_attr_ext_unicast_rcv_packets
.attr
.attr
,
606 &port_pma_attr_ext_unicast_xmit_packets
.attr
.attr
,
607 &port_pma_attr_ext_multicast_rcv_packets
.attr
.attr
,
608 &port_pma_attr_ext_multicast_xmit_packets
.attr
.attr
,
612 static struct attribute
*pma_attrs_noietf
[] = {
613 &port_pma_attr_symbol_error
.attr
.attr
,
614 &port_pma_attr_link_error_recovery
.attr
.attr
,
615 &port_pma_attr_link_downed
.attr
.attr
,
616 &port_pma_attr_port_rcv_errors
.attr
.attr
,
617 &port_pma_attr_port_rcv_remote_physical_errors
.attr
.attr
,
618 &port_pma_attr_port_rcv_switch_relay_errors
.attr
.attr
,
619 &port_pma_attr_port_xmit_discards
.attr
.attr
,
620 &port_pma_attr_port_xmit_constraint_errors
.attr
.attr
,
621 &port_pma_attr_port_rcv_constraint_errors
.attr
.attr
,
622 &port_pma_attr_local_link_integrity_errors
.attr
.attr
,
623 &port_pma_attr_excessive_buffer_overrun_errors
.attr
.attr
,
624 &port_pma_attr_VL15_dropped
.attr
.attr
,
625 &port_pma_attr_ext_port_xmit_data
.attr
.attr
,
626 &port_pma_attr_ext_port_rcv_data
.attr
.attr
,
627 &port_pma_attr_ext_port_xmit_packets
.attr
.attr
,
628 &port_pma_attr_ext_port_rcv_packets
.attr
.attr
,
629 &port_pma_attr_port_xmit_wait
.attr
.attr
,
633 static struct attribute_group pma_group
= {
638 static struct attribute_group pma_group_ext
= {
640 .attrs
= pma_attrs_ext
643 static struct attribute_group pma_group_noietf
= {
645 .attrs
= pma_attrs_noietf
648 static void ib_port_release(struct kobject
*kobj
)
650 struct ib_port
*p
= container_of(kobj
, struct ib_port
, kobj
);
654 if (p
->gid_group
.attrs
) {
655 for (i
= 0; (a
= p
->gid_group
.attrs
[i
]); ++i
)
658 kfree(p
->gid_group
.attrs
);
661 if (p
->pkey_group
.attrs
) {
662 for (i
= 0; (a
= p
->pkey_group
.attrs
[i
]); ++i
)
665 kfree(p
->pkey_group
.attrs
);
671 static void ib_port_gid_attr_release(struct kobject
*kobj
)
673 struct gid_attr_group
*g
= container_of(kobj
, struct gid_attr_group
,
679 for (i
= 0; (a
= g
->ndev
.attrs
[i
]); ++i
)
682 kfree(g
->ndev
.attrs
);
686 for (i
= 0; (a
= g
->type
.attrs
[i
]); ++i
)
689 kfree(g
->type
.attrs
);
695 static struct kobj_type port_type
= {
696 .release
= ib_port_release
,
697 .sysfs_ops
= &port_sysfs_ops
,
698 .default_attrs
= port_default_attrs
701 static struct kobj_type gid_attr_type
= {
702 .sysfs_ops
= &gid_attr_sysfs_ops
,
703 .release
= ib_port_gid_attr_release
706 static struct attribute
**
707 alloc_group_attrs(ssize_t (*show
)(struct ib_port
*,
708 struct port_attribute
*, char *buf
),
711 struct attribute
**tab_attr
;
712 struct port_table_attribute
*element
;
715 tab_attr
= kcalloc(1 + len
, sizeof(struct attribute
*), GFP_KERNEL
);
719 for (i
= 0; i
< len
; i
++) {
720 element
= kzalloc(sizeof(struct port_table_attribute
),
725 if (snprintf(element
->name
, sizeof(element
->name
),
726 "%d", i
) >= sizeof(element
->name
)) {
731 element
->attr
.attr
.name
= element
->name
;
732 element
->attr
.attr
.mode
= S_IRUGO
;
733 element
->attr
.show
= show
;
735 sysfs_attr_init(&element
->attr
.attr
);
737 tab_attr
[i
] = &element
->attr
.attr
;
750 * Figure out which counter table to use depending on
751 * the device capabilities.
753 static struct attribute_group
*get_counter_table(struct ib_device
*dev
,
756 struct ib_class_port_info cpi
;
758 if (get_perf_mad(dev
, port_num
, IB_PMA_CLASS_PORT_INFO
,
759 &cpi
, 40, sizeof(cpi
)) >= 0) {
760 if (cpi
.capability_mask
& IB_PMA_CLASS_CAP_EXT_WIDTH
)
761 /* We have extended counters */
762 return &pma_group_ext
;
764 if (cpi
.capability_mask
& IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF
)
765 /* But not the IETF ones */
766 return &pma_group_noietf
;
769 /* Fall back to normal counters */
773 static int update_hw_stats(struct ib_device
*dev
, struct rdma_hw_stats
*stats
,
774 u8 port_num
, int index
)
778 if (time_is_after_eq_jiffies(stats
->timestamp
+ stats
->lifespan
))
780 ret
= dev
->get_hw_stats(dev
, stats
, port_num
, index
);
783 if (ret
== stats
->num_counters
)
784 stats
->timestamp
= jiffies
;
789 static ssize_t
print_hw_stat(struct rdma_hw_stats
*stats
, int index
, char *buf
)
791 return sprintf(buf
, "%llu\n", stats
->value
[index
]);
794 static ssize_t
show_hw_stats(struct kobject
*kobj
, struct attribute
*attr
,
797 struct ib_device
*dev
;
798 struct ib_port
*port
;
799 struct hw_stats_attribute
*hsa
;
800 struct rdma_hw_stats
*stats
;
803 hsa
= container_of(attr
, struct hw_stats_attribute
, attr
);
804 if (!hsa
->port_num
) {
805 dev
= container_of((struct device
*)kobj
,
806 struct ib_device
, dev
);
807 stats
= dev
->hw_stats
;
809 port
= container_of(kobj
, struct ib_port
, kobj
);
811 stats
= port
->hw_stats
;
813 ret
= update_hw_stats(dev
, stats
, hsa
->port_num
, hsa
->index
);
816 return print_hw_stat(stats
, hsa
->index
, buf
);
819 static ssize_t
show_stats_lifespan(struct kobject
*kobj
,
820 struct attribute
*attr
,
823 struct hw_stats_attribute
*hsa
;
826 hsa
= container_of(attr
, struct hw_stats_attribute
, attr
);
827 if (!hsa
->port_num
) {
828 struct ib_device
*dev
= container_of((struct device
*)kobj
,
829 struct ib_device
, dev
);
830 msecs
= jiffies_to_msecs(dev
->hw_stats
->lifespan
);
832 struct ib_port
*p
= container_of(kobj
, struct ib_port
, kobj
);
833 msecs
= jiffies_to_msecs(p
->hw_stats
->lifespan
);
835 return sprintf(buf
, "%d\n", msecs
);
838 static ssize_t
set_stats_lifespan(struct kobject
*kobj
,
839 struct attribute
*attr
,
840 const char *buf
, size_t count
)
842 struct hw_stats_attribute
*hsa
;
847 ret
= kstrtoint(buf
, 10, &msecs
);
850 if (msecs
< 0 || msecs
> 10000)
852 jiffies
= msecs_to_jiffies(msecs
);
853 hsa
= container_of(attr
, struct hw_stats_attribute
, attr
);
854 if (!hsa
->port_num
) {
855 struct ib_device
*dev
= container_of((struct device
*)kobj
,
856 struct ib_device
, dev
);
857 dev
->hw_stats
->lifespan
= jiffies
;
859 struct ib_port
*p
= container_of(kobj
, struct ib_port
, kobj
);
860 p
->hw_stats
->lifespan
= jiffies
;
865 static void free_hsag(struct kobject
*kobj
, struct attribute_group
*attr_group
)
867 struct attribute
**attr
;
869 sysfs_remove_group(kobj
, attr_group
);
871 for (attr
= attr_group
->attrs
; *attr
; attr
++)
876 static struct attribute
*alloc_hsa(int index
, u8 port_num
, const char *name
)
878 struct hw_stats_attribute
*hsa
;
880 hsa
= kmalloc(sizeof(*hsa
), GFP_KERNEL
);
884 hsa
->attr
.name
= (char *)name
;
885 hsa
->attr
.mode
= S_IRUGO
;
886 hsa
->show
= show_hw_stats
;
889 hsa
->port_num
= port_num
;
894 static struct attribute
*alloc_hsa_lifespan(char *name
, u8 port_num
)
896 struct hw_stats_attribute
*hsa
;
898 hsa
= kmalloc(sizeof(*hsa
), GFP_KERNEL
);
902 hsa
->attr
.name
= name
;
903 hsa
->attr
.mode
= S_IWUSR
| S_IRUGO
;
904 hsa
->show
= show_stats_lifespan
;
905 hsa
->store
= set_stats_lifespan
;
907 hsa
->port_num
= port_num
;
912 static void setup_hw_stats(struct ib_device
*device
, struct ib_port
*port
,
915 struct attribute_group
*hsag
;
916 struct rdma_hw_stats
*stats
;
919 stats
= device
->alloc_hw_stats(device
, port_num
);
924 if (!stats
->names
|| stats
->num_counters
<= 0)
928 * Two extra attribue elements here, one for the lifespan entry and
929 * one to NULL terminate the list for the sysfs core code
931 hsag
= kzalloc(sizeof(*hsag
) +
932 sizeof(void *) * (stats
->num_counters
+ 2),
937 ret
= device
->get_hw_stats(device
, stats
, port_num
,
938 stats
->num_counters
);
939 if (ret
!= stats
->num_counters
)
942 stats
->timestamp
= jiffies
;
944 hsag
->name
= "hw_counters";
945 hsag
->attrs
= (void *)hsag
+ sizeof(*hsag
);
947 for (i
= 0; i
< stats
->num_counters
; i
++) {
948 hsag
->attrs
[i
] = alloc_hsa(i
, port_num
, stats
->names
[i
]);
951 sysfs_attr_init(hsag
->attrs
[i
]);
954 /* treat an error here as non-fatal */
955 hsag
->attrs
[i
] = alloc_hsa_lifespan("lifespan", port_num
);
957 sysfs_attr_init(hsag
->attrs
[i
]);
960 struct kobject
*kobj
= &port
->kobj
;
961 ret
= sysfs_create_group(kobj
, hsag
);
964 port
->hw_stats_ag
= hsag
;
965 port
->hw_stats
= stats
;
967 struct kobject
*kobj
= &device
->dev
.kobj
;
968 ret
= sysfs_create_group(kobj
, hsag
);
971 device
->hw_stats_ag
= hsag
;
972 device
->hw_stats
= stats
;
979 kfree(hsag
->attrs
[i
]);
987 static int add_port(struct ib_device
*device
, int port_num
,
988 int (*port_callback
)(struct ib_device
*,
989 u8
, struct kobject
*))
992 struct ib_port_attr attr
;
996 ret
= ib_query_port(device
, port_num
, &attr
);
1000 p
= kzalloc(sizeof *p
, GFP_KERNEL
);
1005 p
->port_num
= port_num
;
1007 ret
= kobject_init_and_add(&p
->kobj
, &port_type
,
1008 device
->ports_parent
,
1015 p
->gid_attr_group
= kzalloc(sizeof(*p
->gid_attr_group
), GFP_KERNEL
);
1016 if (!p
->gid_attr_group
) {
1021 p
->gid_attr_group
->port
= p
;
1022 ret
= kobject_init_and_add(&p
->gid_attr_group
->kobj
, &gid_attr_type
,
1023 &p
->kobj
, "gid_attrs");
1025 kfree(p
->gid_attr_group
);
1029 p
->pma_table
= get_counter_table(device
, port_num
);
1030 ret
= sysfs_create_group(&p
->kobj
, p
->pma_table
);
1032 goto err_put_gid_attrs
;
1034 p
->gid_group
.name
= "gids";
1035 p
->gid_group
.attrs
= alloc_group_attrs(show_port_gid
, attr
.gid_tbl_len
);
1036 if (!p
->gid_group
.attrs
) {
1038 goto err_remove_pma
;
1041 ret
= sysfs_create_group(&p
->kobj
, &p
->gid_group
);
1045 p
->gid_attr_group
->ndev
.name
= "ndevs";
1046 p
->gid_attr_group
->ndev
.attrs
= alloc_group_attrs(show_port_gid_attr_ndev
,
1048 if (!p
->gid_attr_group
->ndev
.attrs
) {
1050 goto err_remove_gid
;
1053 ret
= sysfs_create_group(&p
->gid_attr_group
->kobj
,
1054 &p
->gid_attr_group
->ndev
);
1056 goto err_free_gid_ndev
;
1058 p
->gid_attr_group
->type
.name
= "types";
1059 p
->gid_attr_group
->type
.attrs
= alloc_group_attrs(show_port_gid_attr_gid_type
,
1061 if (!p
->gid_attr_group
->type
.attrs
) {
1063 goto err_remove_gid_ndev
;
1066 ret
= sysfs_create_group(&p
->gid_attr_group
->kobj
,
1067 &p
->gid_attr_group
->type
);
1069 goto err_free_gid_type
;
1071 p
->pkey_group
.name
= "pkeys";
1072 p
->pkey_group
.attrs
= alloc_group_attrs(show_port_pkey
,
1074 if (!p
->pkey_group
.attrs
) {
1076 goto err_remove_gid_type
;
1079 ret
= sysfs_create_group(&p
->kobj
, &p
->pkey_group
);
1083 if (port_callback
) {
1084 ret
= port_callback(device
, port_num
, &p
->kobj
);
1086 goto err_remove_pkey
;
1090 * If port == 0, it means we have only one port and the parent
1091 * device, not this port device, should be the holder of the
1094 if (device
->alloc_hw_stats
&& port_num
)
1095 setup_hw_stats(device
, p
, port_num
);
1097 list_add_tail(&p
->kobj
.entry
, &device
->port_list
);
1099 kobject_uevent(&p
->kobj
, KOBJ_ADD
);
1103 sysfs_remove_group(&p
->kobj
, &p
->pkey_group
);
1106 for (i
= 0; i
< attr
.pkey_tbl_len
; ++i
)
1107 kfree(p
->pkey_group
.attrs
[i
]);
1109 kfree(p
->pkey_group
.attrs
);
1110 p
->pkey_group
.attrs
= NULL
;
1112 err_remove_gid_type
:
1113 sysfs_remove_group(&p
->gid_attr_group
->kobj
,
1114 &p
->gid_attr_group
->type
);
1117 for (i
= 0; i
< attr
.gid_tbl_len
; ++i
)
1118 kfree(p
->gid_attr_group
->type
.attrs
[i
]);
1120 kfree(p
->gid_attr_group
->type
.attrs
);
1121 p
->gid_attr_group
->type
.attrs
= NULL
;
1123 err_remove_gid_ndev
:
1124 sysfs_remove_group(&p
->gid_attr_group
->kobj
,
1125 &p
->gid_attr_group
->ndev
);
1128 for (i
= 0; i
< attr
.gid_tbl_len
; ++i
)
1129 kfree(p
->gid_attr_group
->ndev
.attrs
[i
]);
1131 kfree(p
->gid_attr_group
->ndev
.attrs
);
1132 p
->gid_attr_group
->ndev
.attrs
= NULL
;
1135 sysfs_remove_group(&p
->kobj
, &p
->gid_group
);
1138 for (i
= 0; i
< attr
.gid_tbl_len
; ++i
)
1139 kfree(p
->gid_group
.attrs
[i
]);
1141 kfree(p
->gid_group
.attrs
);
1142 p
->gid_group
.attrs
= NULL
;
1145 sysfs_remove_group(&p
->kobj
, p
->pma_table
);
1148 kobject_put(&p
->gid_attr_group
->kobj
);
1151 kobject_put(&p
->kobj
);
1155 static ssize_t
show_node_type(struct device
*device
,
1156 struct device_attribute
*attr
, char *buf
)
1158 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
1160 switch (dev
->node_type
) {
1161 case RDMA_NODE_IB_CA
: return sprintf(buf
, "%d: CA\n", dev
->node_type
);
1162 case RDMA_NODE_RNIC
: return sprintf(buf
, "%d: RNIC\n", dev
->node_type
);
1163 case RDMA_NODE_USNIC
: return sprintf(buf
, "%d: usNIC\n", dev
->node_type
);
1164 case RDMA_NODE_USNIC_UDP
: return sprintf(buf
, "%d: usNIC UDP\n", dev
->node_type
);
1165 case RDMA_NODE_IB_SWITCH
: return sprintf(buf
, "%d: switch\n", dev
->node_type
);
1166 case RDMA_NODE_IB_ROUTER
: return sprintf(buf
, "%d: router\n", dev
->node_type
);
1167 default: return sprintf(buf
, "%d: <unknown>\n", dev
->node_type
);
1171 static ssize_t
show_sys_image_guid(struct device
*device
,
1172 struct device_attribute
*dev_attr
, char *buf
)
1174 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
1176 return sprintf(buf
, "%04x:%04x:%04x:%04x\n",
1177 be16_to_cpu(((__be16
*) &dev
->attrs
.sys_image_guid
)[0]),
1178 be16_to_cpu(((__be16
*) &dev
->attrs
.sys_image_guid
)[1]),
1179 be16_to_cpu(((__be16
*) &dev
->attrs
.sys_image_guid
)[2]),
1180 be16_to_cpu(((__be16
*) &dev
->attrs
.sys_image_guid
)[3]));
1183 static ssize_t
show_node_guid(struct device
*device
,
1184 struct device_attribute
*attr
, char *buf
)
1186 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
1188 return sprintf(buf
, "%04x:%04x:%04x:%04x\n",
1189 be16_to_cpu(((__be16
*) &dev
->node_guid
)[0]),
1190 be16_to_cpu(((__be16
*) &dev
->node_guid
)[1]),
1191 be16_to_cpu(((__be16
*) &dev
->node_guid
)[2]),
1192 be16_to_cpu(((__be16
*) &dev
->node_guid
)[3]));
1195 static ssize_t
show_node_desc(struct device
*device
,
1196 struct device_attribute
*attr
, char *buf
)
1198 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
1200 return sprintf(buf
, "%.64s\n", dev
->node_desc
);
1203 static ssize_t
set_node_desc(struct device
*device
,
1204 struct device_attribute
*attr
,
1205 const char *buf
, size_t count
)
1207 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
1208 struct ib_device_modify desc
= {};
1211 if (!dev
->modify_device
)
1214 memcpy(desc
.node_desc
, buf
, min_t(int, count
, IB_DEVICE_NODE_DESC_MAX
));
1215 ret
= ib_modify_device(dev
, IB_DEVICE_MODIFY_NODE_DESC
, &desc
);
1222 static ssize_t
show_fw_ver(struct device
*device
, struct device_attribute
*attr
,
1225 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
1227 ib_get_device_fw_str(dev
, buf
);
1228 strlcat(buf
, "\n", IB_FW_VERSION_NAME_MAX
);
1232 static DEVICE_ATTR(node_type
, S_IRUGO
, show_node_type
, NULL
);
1233 static DEVICE_ATTR(sys_image_guid
, S_IRUGO
, show_sys_image_guid
, NULL
);
1234 static DEVICE_ATTR(node_guid
, S_IRUGO
, show_node_guid
, NULL
);
1235 static DEVICE_ATTR(node_desc
, S_IRUGO
| S_IWUSR
, show_node_desc
, set_node_desc
);
1236 static DEVICE_ATTR(fw_ver
, S_IRUGO
, show_fw_ver
, NULL
);
1238 static struct device_attribute
*ib_class_attributes
[] = {
1239 &dev_attr_node_type
,
1240 &dev_attr_sys_image_guid
,
1241 &dev_attr_node_guid
,
1242 &dev_attr_node_desc
,
1246 static void free_port_list_attributes(struct ib_device
*device
)
1248 struct kobject
*p
, *t
;
1250 list_for_each_entry_safe(p
, t
, &device
->port_list
, entry
) {
1251 struct ib_port
*port
= container_of(p
, struct ib_port
, kobj
);
1252 list_del(&p
->entry
);
1253 if (port
->hw_stats
) {
1254 kfree(port
->hw_stats
);
1255 free_hsag(&port
->kobj
, port
->hw_stats_ag
);
1257 sysfs_remove_group(p
, port
->pma_table
);
1258 sysfs_remove_group(p
, &port
->pkey_group
);
1259 sysfs_remove_group(p
, &port
->gid_group
);
1260 sysfs_remove_group(&port
->gid_attr_group
->kobj
,
1261 &port
->gid_attr_group
->ndev
);
1262 sysfs_remove_group(&port
->gid_attr_group
->kobj
,
1263 &port
->gid_attr_group
->type
);
1264 kobject_put(&port
->gid_attr_group
->kobj
);
1268 kobject_put(device
->ports_parent
);
1271 int ib_device_register_sysfs(struct ib_device
*device
,
1272 int (*port_callback
)(struct ib_device
*,
1273 u8
, struct kobject
*))
1275 struct device
*class_dev
= &device
->dev
;
1279 ret
= dev_set_name(class_dev
, "%s", device
->name
);
1283 ret
= device_add(class_dev
);
1287 for (i
= 0; i
< ARRAY_SIZE(ib_class_attributes
); ++i
) {
1288 ret
= device_create_file(class_dev
, ib_class_attributes
[i
]);
1290 goto err_unregister
;
1293 device
->ports_parent
= kobject_create_and_add("ports",
1295 if (!device
->ports_parent
) {
1300 if (rdma_cap_ib_switch(device
)) {
1301 ret
= add_port(device
, 0, port_callback
);
1305 for (i
= 1; i
<= device
->phys_port_cnt
; ++i
) {
1306 ret
= add_port(device
, i
, port_callback
);
1312 if (device
->alloc_hw_stats
)
1313 setup_hw_stats(device
, NULL
, 0);
1318 free_port_list_attributes(device
);
1321 device_del(class_dev
);
1327 void ib_device_unregister_sysfs(struct ib_device
*device
)
1331 /* Hold kobject until ib_dealloc_device() */
1332 kobject_get(&device
->dev
.kobj
);
1334 free_port_list_attributes(device
);
1336 if (device
->hw_stats
) {
1337 kfree(device
->hw_stats
);
1338 free_hsag(&device
->dev
.kobj
, device
->hw_stats_ag
);
1341 for (i
= 0; i
< ARRAY_SIZE(ib_class_attributes
); ++i
)
1342 device_remove_file(&device
->dev
, ib_class_attributes
[i
]);
1344 device_unregister(&device
->dev
);