2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include "core_priv.h"
37 #include <linux/slab.h>
38 #include <linux/stat.h>
39 #include <linux/string.h>
40 #include <linux/netdevice.h>
42 #include <rdma/ib_mad.h>
43 #include <rdma/ib_pma.h>
47 struct gid_attr_group
{
50 struct attribute_group ndev
;
51 struct attribute_group type
;
55 struct ib_device
*ibdev
;
56 struct gid_attr_group
*gid_attr_group
;
57 struct attribute_group gid_group
;
58 struct attribute_group pkey_group
;
60 struct attribute_group
*pma_table
;
63 struct port_attribute
{
64 struct attribute attr
;
65 ssize_t (*show
)(struct ib_port
*, struct port_attribute
*, char *buf
);
66 ssize_t (*store
)(struct ib_port
*, struct port_attribute
*,
67 const char *buf
, size_t count
);
70 #define PORT_ATTR(_name, _mode, _show, _store) \
71 struct port_attribute port_attr_##_name = __ATTR(_name, _mode, _show, _store)
73 #define PORT_ATTR_RO(_name) \
74 struct port_attribute port_attr_##_name = __ATTR_RO(_name)
76 struct port_table_attribute
{
77 struct port_attribute attr
;
83 static ssize_t
port_attr_show(struct kobject
*kobj
,
84 struct attribute
*attr
, char *buf
)
86 struct port_attribute
*port_attr
=
87 container_of(attr
, struct port_attribute
, attr
);
88 struct ib_port
*p
= container_of(kobj
, struct ib_port
, kobj
);
93 return port_attr
->show(p
, port_attr
, buf
);
96 static const struct sysfs_ops port_sysfs_ops
= {
97 .show
= port_attr_show
100 static ssize_t
gid_attr_show(struct kobject
*kobj
,
101 struct attribute
*attr
, char *buf
)
103 struct port_attribute
*port_attr
=
104 container_of(attr
, struct port_attribute
, attr
);
105 struct ib_port
*p
= container_of(kobj
, struct gid_attr_group
,
108 if (!port_attr
->show
)
111 return port_attr
->show(p
, port_attr
, buf
);
114 static const struct sysfs_ops gid_attr_sysfs_ops
= {
115 .show
= gid_attr_show
118 static ssize_t
state_show(struct ib_port
*p
, struct port_attribute
*unused
,
121 struct ib_port_attr attr
;
124 static const char *state_name
[] = {
125 [IB_PORT_NOP
] = "NOP",
126 [IB_PORT_DOWN
] = "DOWN",
127 [IB_PORT_INIT
] = "INIT",
128 [IB_PORT_ARMED
] = "ARMED",
129 [IB_PORT_ACTIVE
] = "ACTIVE",
130 [IB_PORT_ACTIVE_DEFER
] = "ACTIVE_DEFER"
133 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
137 return sprintf(buf
, "%d: %s\n", attr
.state
,
138 attr
.state
>= 0 && attr
.state
< ARRAY_SIZE(state_name
) ?
139 state_name
[attr
.state
] : "UNKNOWN");
142 static ssize_t
lid_show(struct ib_port
*p
, struct port_attribute
*unused
,
145 struct ib_port_attr attr
;
148 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
152 return sprintf(buf
, "0x%x\n", attr
.lid
);
155 static ssize_t
lid_mask_count_show(struct ib_port
*p
,
156 struct port_attribute
*unused
,
159 struct ib_port_attr attr
;
162 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
166 return sprintf(buf
, "%d\n", attr
.lmc
);
169 static ssize_t
sm_lid_show(struct ib_port
*p
, struct port_attribute
*unused
,
172 struct ib_port_attr attr
;
175 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
179 return sprintf(buf
, "0x%x\n", attr
.sm_lid
);
182 static ssize_t
sm_sl_show(struct ib_port
*p
, struct port_attribute
*unused
,
185 struct ib_port_attr attr
;
188 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
192 return sprintf(buf
, "%d\n", attr
.sm_sl
);
195 static ssize_t
cap_mask_show(struct ib_port
*p
, struct port_attribute
*unused
,
198 struct ib_port_attr attr
;
201 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
205 return sprintf(buf
, "0x%08x\n", attr
.port_cap_flags
);
208 static ssize_t
rate_show(struct ib_port
*p
, struct port_attribute
*unused
,
211 struct ib_port_attr attr
;
213 int rate
; /* in deci-Gb/sec */
216 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
220 switch (attr
.active_speed
) {
242 default: /* default to SDR for invalid rates */
247 rate
*= ib_width_enum_to_int(attr
.active_width
);
251 return sprintf(buf
, "%d%s Gb/sec (%dX%s)\n",
252 rate
/ 10, rate
% 10 ? ".5" : "",
253 ib_width_enum_to_int(attr
.active_width
), speed
);
256 static ssize_t
phys_state_show(struct ib_port
*p
, struct port_attribute
*unused
,
259 struct ib_port_attr attr
;
263 ret
= ib_query_port(p
->ibdev
, p
->port_num
, &attr
);
267 switch (attr
.phys_state
) {
268 case 1: return sprintf(buf
, "1: Sleep\n");
269 case 2: return sprintf(buf
, "2: Polling\n");
270 case 3: return sprintf(buf
, "3: Disabled\n");
271 case 4: return sprintf(buf
, "4: PortConfigurationTraining\n");
272 case 5: return sprintf(buf
, "5: LinkUp\n");
273 case 6: return sprintf(buf
, "6: LinkErrorRecovery\n");
274 case 7: return sprintf(buf
, "7: Phy Test\n");
275 default: return sprintf(buf
, "%d: <unknown>\n", attr
.phys_state
);
279 static ssize_t
link_layer_show(struct ib_port
*p
, struct port_attribute
*unused
,
282 switch (rdma_port_get_link_layer(p
->ibdev
, p
->port_num
)) {
283 case IB_LINK_LAYER_INFINIBAND
:
284 return sprintf(buf
, "%s\n", "InfiniBand");
285 case IB_LINK_LAYER_ETHERNET
:
286 return sprintf(buf
, "%s\n", "Ethernet");
288 return sprintf(buf
, "%s\n", "Unknown");
292 static PORT_ATTR_RO(state
);
293 static PORT_ATTR_RO(lid
);
294 static PORT_ATTR_RO(lid_mask_count
);
295 static PORT_ATTR_RO(sm_lid
);
296 static PORT_ATTR_RO(sm_sl
);
297 static PORT_ATTR_RO(cap_mask
);
298 static PORT_ATTR_RO(rate
);
299 static PORT_ATTR_RO(phys_state
);
300 static PORT_ATTR_RO(link_layer
);
302 static struct attribute
*port_default_attrs
[] = {
303 &port_attr_state
.attr
,
305 &port_attr_lid_mask_count
.attr
,
306 &port_attr_sm_lid
.attr
,
307 &port_attr_sm_sl
.attr
,
308 &port_attr_cap_mask
.attr
,
309 &port_attr_rate
.attr
,
310 &port_attr_phys_state
.attr
,
311 &port_attr_link_layer
.attr
,
315 static size_t print_ndev(struct ib_gid_attr
*gid_attr
, char *buf
)
320 return sprintf(buf
, "%s\n", gid_attr
->ndev
->name
);
323 static size_t print_gid_type(struct ib_gid_attr
*gid_attr
, char *buf
)
325 return sprintf(buf
, "%s\n", ib_cache_gid_type_str(gid_attr
->gid_type
));
328 static ssize_t
_show_port_gid_attr(struct ib_port
*p
,
329 struct port_attribute
*attr
,
331 size_t (*print
)(struct ib_gid_attr
*gid_attr
,
334 struct port_table_attribute
*tab_attr
=
335 container_of(attr
, struct port_table_attribute
, attr
);
337 struct ib_gid_attr gid_attr
= {};
340 ret
= ib_query_gid(p
->ibdev
, p
->port_num
, tab_attr
->index
, &gid
,
345 ret
= print(&gid_attr
, buf
);
349 dev_put(gid_attr
.ndev
);
353 static ssize_t
show_port_gid(struct ib_port
*p
, struct port_attribute
*attr
,
356 struct port_table_attribute
*tab_attr
=
357 container_of(attr
, struct port_table_attribute
, attr
);
361 ret
= ib_query_gid(p
->ibdev
, p
->port_num
, tab_attr
->index
, &gid
, NULL
);
365 return sprintf(buf
, "%pI6\n", gid
.raw
);
368 static ssize_t
show_port_gid_attr_ndev(struct ib_port
*p
,
369 struct port_attribute
*attr
, char *buf
)
371 return _show_port_gid_attr(p
, attr
, buf
, print_ndev
);
374 static ssize_t
show_port_gid_attr_gid_type(struct ib_port
*p
,
375 struct port_attribute
*attr
,
378 return _show_port_gid_attr(p
, attr
, buf
, print_gid_type
);
381 static ssize_t
show_port_pkey(struct ib_port
*p
, struct port_attribute
*attr
,
384 struct port_table_attribute
*tab_attr
=
385 container_of(attr
, struct port_table_attribute
, attr
);
389 ret
= ib_query_pkey(p
->ibdev
, p
->port_num
, tab_attr
->index
, &pkey
);
393 return sprintf(buf
, "0x%04x\n", pkey
);
396 #define PORT_PMA_ATTR(_name, _counter, _width, _offset) \
397 struct port_table_attribute port_pma_attr_##_name = { \
398 .attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \
399 .index = (_offset) | ((_width) << 16) | ((_counter) << 24), \
400 .attr_id = IB_PMA_PORT_COUNTERS , \
403 #define PORT_PMA_ATTR_EXT(_name, _width, _offset) \
404 struct port_table_attribute port_pma_attr_ext_##_name = { \
405 .attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \
406 .index = (_offset) | ((_width) << 16), \
407 .attr_id = IB_PMA_PORT_COUNTERS_EXT , \
411 * Get a Perfmgmt MAD block of data.
412 * Returns error code or the number of bytes retrieved.
414 static int get_perf_mad(struct ib_device
*dev
, int port_num
, __be16 attr
,
415 void *data
, int offset
, size_t size
)
417 struct ib_mad
*in_mad
;
418 struct ib_mad
*out_mad
;
419 size_t mad_size
= sizeof(*out_mad
);
420 u16 out_mad_pkey_index
= 0;
423 if (!dev
->process_mad
)
426 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
427 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
428 if (!in_mad
|| !out_mad
) {
433 in_mad
->mad_hdr
.base_version
= 1;
434 in_mad
->mad_hdr
.mgmt_class
= IB_MGMT_CLASS_PERF_MGMT
;
435 in_mad
->mad_hdr
.class_version
= 1;
436 in_mad
->mad_hdr
.method
= IB_MGMT_METHOD_GET
;
437 in_mad
->mad_hdr
.attr_id
= attr
;
439 if (attr
!= IB_PMA_CLASS_PORT_INFO
)
440 in_mad
->data
[41] = port_num
; /* PortSelect field */
442 if ((dev
->process_mad(dev
, IB_MAD_IGNORE_MKEY
,
443 port_num
, NULL
, NULL
,
444 (const struct ib_mad_hdr
*)in_mad
, mad_size
,
445 (struct ib_mad_hdr
*)out_mad
, &mad_size
,
446 &out_mad_pkey_index
) &
447 (IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
)) !=
448 (IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
)) {
452 memcpy(data
, out_mad
->data
+ offset
, size
);
460 static ssize_t
show_pma_counter(struct ib_port
*p
, struct port_attribute
*attr
,
463 struct port_table_attribute
*tab_attr
=
464 container_of(attr
, struct port_table_attribute
, attr
);
465 int offset
= tab_attr
->index
& 0xffff;
466 int width
= (tab_attr
->index
>> 16) & 0xff;
470 ret
= get_perf_mad(p
->ibdev
, p
->port_num
, tab_attr
->attr_id
, &data
,
471 40 + offset
/ 8, sizeof(data
));
473 return sprintf(buf
, "N/A (no PMA)\n");
477 ret
= sprintf(buf
, "%u\n", (*data
>>
478 (4 - (offset
% 8))) & 0xf);
481 ret
= sprintf(buf
, "%u\n", *data
);
484 ret
= sprintf(buf
, "%u\n",
485 be16_to_cpup((__be16
*)data
));
488 ret
= sprintf(buf
, "%u\n",
489 be32_to_cpup((__be32
*)data
));
492 ret
= sprintf(buf
, "%llu\n",
493 be64_to_cpup((__be64
*)data
));
503 static PORT_PMA_ATTR(symbol_error
, 0, 16, 32);
504 static PORT_PMA_ATTR(link_error_recovery
, 1, 8, 48);
505 static PORT_PMA_ATTR(link_downed
, 2, 8, 56);
506 static PORT_PMA_ATTR(port_rcv_errors
, 3, 16, 64);
507 static PORT_PMA_ATTR(port_rcv_remote_physical_errors
, 4, 16, 80);
508 static PORT_PMA_ATTR(port_rcv_switch_relay_errors
, 5, 16, 96);
509 static PORT_PMA_ATTR(port_xmit_discards
, 6, 16, 112);
510 static PORT_PMA_ATTR(port_xmit_constraint_errors
, 7, 8, 128);
511 static PORT_PMA_ATTR(port_rcv_constraint_errors
, 8, 8, 136);
512 static PORT_PMA_ATTR(local_link_integrity_errors
, 9, 4, 152);
513 static PORT_PMA_ATTR(excessive_buffer_overrun_errors
, 10, 4, 156);
514 static PORT_PMA_ATTR(VL15_dropped
, 11, 16, 176);
515 static PORT_PMA_ATTR(port_xmit_data
, 12, 32, 192);
516 static PORT_PMA_ATTR(port_rcv_data
, 13, 32, 224);
517 static PORT_PMA_ATTR(port_xmit_packets
, 14, 32, 256);
518 static PORT_PMA_ATTR(port_rcv_packets
, 15, 32, 288);
521 * Counters added by extended set
523 static PORT_PMA_ATTR_EXT(port_xmit_data
, 64, 64);
524 static PORT_PMA_ATTR_EXT(port_rcv_data
, 64, 128);
525 static PORT_PMA_ATTR_EXT(port_xmit_packets
, 64, 192);
526 static PORT_PMA_ATTR_EXT(port_rcv_packets
, 64, 256);
527 static PORT_PMA_ATTR_EXT(unicast_xmit_packets
, 64, 320);
528 static PORT_PMA_ATTR_EXT(unicast_rcv_packets
, 64, 384);
529 static PORT_PMA_ATTR_EXT(multicast_xmit_packets
, 64, 448);
530 static PORT_PMA_ATTR_EXT(multicast_rcv_packets
, 64, 512);
532 static struct attribute
*pma_attrs
[] = {
533 &port_pma_attr_symbol_error
.attr
.attr
,
534 &port_pma_attr_link_error_recovery
.attr
.attr
,
535 &port_pma_attr_link_downed
.attr
.attr
,
536 &port_pma_attr_port_rcv_errors
.attr
.attr
,
537 &port_pma_attr_port_rcv_remote_physical_errors
.attr
.attr
,
538 &port_pma_attr_port_rcv_switch_relay_errors
.attr
.attr
,
539 &port_pma_attr_port_xmit_discards
.attr
.attr
,
540 &port_pma_attr_port_xmit_constraint_errors
.attr
.attr
,
541 &port_pma_attr_port_rcv_constraint_errors
.attr
.attr
,
542 &port_pma_attr_local_link_integrity_errors
.attr
.attr
,
543 &port_pma_attr_excessive_buffer_overrun_errors
.attr
.attr
,
544 &port_pma_attr_VL15_dropped
.attr
.attr
,
545 &port_pma_attr_port_xmit_data
.attr
.attr
,
546 &port_pma_attr_port_rcv_data
.attr
.attr
,
547 &port_pma_attr_port_xmit_packets
.attr
.attr
,
548 &port_pma_attr_port_rcv_packets
.attr
.attr
,
552 static struct attribute
*pma_attrs_ext
[] = {
553 &port_pma_attr_symbol_error
.attr
.attr
,
554 &port_pma_attr_link_error_recovery
.attr
.attr
,
555 &port_pma_attr_link_downed
.attr
.attr
,
556 &port_pma_attr_port_rcv_errors
.attr
.attr
,
557 &port_pma_attr_port_rcv_remote_physical_errors
.attr
.attr
,
558 &port_pma_attr_port_rcv_switch_relay_errors
.attr
.attr
,
559 &port_pma_attr_port_xmit_discards
.attr
.attr
,
560 &port_pma_attr_port_xmit_constraint_errors
.attr
.attr
,
561 &port_pma_attr_port_rcv_constraint_errors
.attr
.attr
,
562 &port_pma_attr_local_link_integrity_errors
.attr
.attr
,
563 &port_pma_attr_excessive_buffer_overrun_errors
.attr
.attr
,
564 &port_pma_attr_VL15_dropped
.attr
.attr
,
565 &port_pma_attr_ext_port_xmit_data
.attr
.attr
,
566 &port_pma_attr_ext_port_rcv_data
.attr
.attr
,
567 &port_pma_attr_ext_port_xmit_packets
.attr
.attr
,
568 &port_pma_attr_ext_port_rcv_packets
.attr
.attr
,
569 &port_pma_attr_ext_unicast_rcv_packets
.attr
.attr
,
570 &port_pma_attr_ext_unicast_xmit_packets
.attr
.attr
,
571 &port_pma_attr_ext_multicast_rcv_packets
.attr
.attr
,
572 &port_pma_attr_ext_multicast_xmit_packets
.attr
.attr
,
576 static struct attribute
*pma_attrs_noietf
[] = {
577 &port_pma_attr_symbol_error
.attr
.attr
,
578 &port_pma_attr_link_error_recovery
.attr
.attr
,
579 &port_pma_attr_link_downed
.attr
.attr
,
580 &port_pma_attr_port_rcv_errors
.attr
.attr
,
581 &port_pma_attr_port_rcv_remote_physical_errors
.attr
.attr
,
582 &port_pma_attr_port_rcv_switch_relay_errors
.attr
.attr
,
583 &port_pma_attr_port_xmit_discards
.attr
.attr
,
584 &port_pma_attr_port_xmit_constraint_errors
.attr
.attr
,
585 &port_pma_attr_port_rcv_constraint_errors
.attr
.attr
,
586 &port_pma_attr_local_link_integrity_errors
.attr
.attr
,
587 &port_pma_attr_excessive_buffer_overrun_errors
.attr
.attr
,
588 &port_pma_attr_VL15_dropped
.attr
.attr
,
589 &port_pma_attr_ext_port_xmit_data
.attr
.attr
,
590 &port_pma_attr_ext_port_rcv_data
.attr
.attr
,
591 &port_pma_attr_ext_port_xmit_packets
.attr
.attr
,
592 &port_pma_attr_ext_port_rcv_packets
.attr
.attr
,
596 static struct attribute_group pma_group
= {
601 static struct attribute_group pma_group_ext
= {
603 .attrs
= pma_attrs_ext
606 static struct attribute_group pma_group_noietf
= {
608 .attrs
= pma_attrs_noietf
611 static void ib_port_release(struct kobject
*kobj
)
613 struct ib_port
*p
= container_of(kobj
, struct ib_port
, kobj
);
617 if (p
->gid_group
.attrs
) {
618 for (i
= 0; (a
= p
->gid_group
.attrs
[i
]); ++i
)
621 kfree(p
->gid_group
.attrs
);
624 if (p
->pkey_group
.attrs
) {
625 for (i
= 0; (a
= p
->pkey_group
.attrs
[i
]); ++i
)
628 kfree(p
->pkey_group
.attrs
);
634 static void ib_port_gid_attr_release(struct kobject
*kobj
)
636 struct gid_attr_group
*g
= container_of(kobj
, struct gid_attr_group
,
642 for (i
= 0; (a
= g
->ndev
.attrs
[i
]); ++i
)
645 kfree(g
->ndev
.attrs
);
649 for (i
= 0; (a
= g
->type
.attrs
[i
]); ++i
)
652 kfree(g
->type
.attrs
);
658 static struct kobj_type port_type
= {
659 .release
= ib_port_release
,
660 .sysfs_ops
= &port_sysfs_ops
,
661 .default_attrs
= port_default_attrs
664 static struct kobj_type gid_attr_type
= {
665 .sysfs_ops
= &gid_attr_sysfs_ops
,
666 .release
= ib_port_gid_attr_release
669 static struct attribute
**
670 alloc_group_attrs(ssize_t (*show
)(struct ib_port
*,
671 struct port_attribute
*, char *buf
),
674 struct attribute
**tab_attr
;
675 struct port_table_attribute
*element
;
678 tab_attr
= kcalloc(1 + len
, sizeof(struct attribute
*), GFP_KERNEL
);
682 for (i
= 0; i
< len
; i
++) {
683 element
= kzalloc(sizeof(struct port_table_attribute
),
688 if (snprintf(element
->name
, sizeof(element
->name
),
689 "%d", i
) >= sizeof(element
->name
)) {
694 element
->attr
.attr
.name
= element
->name
;
695 element
->attr
.attr
.mode
= S_IRUGO
;
696 element
->attr
.show
= show
;
698 sysfs_attr_init(&element
->attr
.attr
);
700 tab_attr
[i
] = &element
->attr
.attr
;
713 * Figure out which counter table to use depending on
714 * the device capabilities.
716 static struct attribute_group
*get_counter_table(struct ib_device
*dev
,
719 struct ib_class_port_info cpi
;
721 if (get_perf_mad(dev
, port_num
, IB_PMA_CLASS_PORT_INFO
,
722 &cpi
, 40, sizeof(cpi
)) >= 0) {
723 if (cpi
.capability_mask
& IB_PMA_CLASS_CAP_EXT_WIDTH
)
724 /* We have extended counters */
725 return &pma_group_ext
;
727 if (cpi
.capability_mask
& IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF
)
728 /* But not the IETF ones */
729 return &pma_group_noietf
;
732 /* Fall back to normal counters */
736 static int add_port(struct ib_device
*device
, int port_num
,
737 int (*port_callback
)(struct ib_device
*,
738 u8
, struct kobject
*))
741 struct ib_port_attr attr
;
745 ret
= ib_query_port(device
, port_num
, &attr
);
749 p
= kzalloc(sizeof *p
, GFP_KERNEL
);
754 p
->port_num
= port_num
;
756 ret
= kobject_init_and_add(&p
->kobj
, &port_type
,
757 device
->ports_parent
,
764 p
->gid_attr_group
= kzalloc(sizeof(*p
->gid_attr_group
), GFP_KERNEL
);
765 if (!p
->gid_attr_group
) {
770 p
->gid_attr_group
->port
= p
;
771 ret
= kobject_init_and_add(&p
->gid_attr_group
->kobj
, &gid_attr_type
,
772 &p
->kobj
, "gid_attrs");
774 kfree(p
->gid_attr_group
);
778 p
->pma_table
= get_counter_table(device
, port_num
);
779 ret
= sysfs_create_group(&p
->kobj
, p
->pma_table
);
781 goto err_put_gid_attrs
;
783 p
->gid_group
.name
= "gids";
784 p
->gid_group
.attrs
= alloc_group_attrs(show_port_gid
, attr
.gid_tbl_len
);
785 if (!p
->gid_group
.attrs
) {
790 ret
= sysfs_create_group(&p
->kobj
, &p
->gid_group
);
794 p
->gid_attr_group
->ndev
.name
= "ndevs";
795 p
->gid_attr_group
->ndev
.attrs
= alloc_group_attrs(show_port_gid_attr_ndev
,
797 if (!p
->gid_attr_group
->ndev
.attrs
) {
802 ret
= sysfs_create_group(&p
->gid_attr_group
->kobj
,
803 &p
->gid_attr_group
->ndev
);
805 goto err_free_gid_ndev
;
807 p
->gid_attr_group
->type
.name
= "types";
808 p
->gid_attr_group
->type
.attrs
= alloc_group_attrs(show_port_gid_attr_gid_type
,
810 if (!p
->gid_attr_group
->type
.attrs
) {
812 goto err_remove_gid_ndev
;
815 ret
= sysfs_create_group(&p
->gid_attr_group
->kobj
,
816 &p
->gid_attr_group
->type
);
818 goto err_free_gid_type
;
820 p
->pkey_group
.name
= "pkeys";
821 p
->pkey_group
.attrs
= alloc_group_attrs(show_port_pkey
,
823 if (!p
->pkey_group
.attrs
) {
825 goto err_remove_gid_type
;
828 ret
= sysfs_create_group(&p
->kobj
, &p
->pkey_group
);
833 ret
= port_callback(device
, port_num
, &p
->kobj
);
835 goto err_remove_pkey
;
838 list_add_tail(&p
->kobj
.entry
, &device
->port_list
);
840 kobject_uevent(&p
->kobj
, KOBJ_ADD
);
844 sysfs_remove_group(&p
->kobj
, &p
->pkey_group
);
847 for (i
= 0; i
< attr
.pkey_tbl_len
; ++i
)
848 kfree(p
->pkey_group
.attrs
[i
]);
850 kfree(p
->pkey_group
.attrs
);
851 p
->pkey_group
.attrs
= NULL
;
854 sysfs_remove_group(&p
->gid_attr_group
->kobj
,
855 &p
->gid_attr_group
->type
);
858 for (i
= 0; i
< attr
.gid_tbl_len
; ++i
)
859 kfree(p
->gid_attr_group
->type
.attrs
[i
]);
861 kfree(p
->gid_attr_group
->type
.attrs
);
862 p
->gid_attr_group
->type
.attrs
= NULL
;
865 sysfs_remove_group(&p
->gid_attr_group
->kobj
,
866 &p
->gid_attr_group
->ndev
);
869 for (i
= 0; i
< attr
.gid_tbl_len
; ++i
)
870 kfree(p
->gid_attr_group
->ndev
.attrs
[i
]);
872 kfree(p
->gid_attr_group
->ndev
.attrs
);
873 p
->gid_attr_group
->ndev
.attrs
= NULL
;
876 sysfs_remove_group(&p
->kobj
, &p
->gid_group
);
879 for (i
= 0; i
< attr
.gid_tbl_len
; ++i
)
880 kfree(p
->gid_group
.attrs
[i
]);
882 kfree(p
->gid_group
.attrs
);
883 p
->gid_group
.attrs
= NULL
;
886 sysfs_remove_group(&p
->kobj
, p
->pma_table
);
889 kobject_put(&p
->gid_attr_group
->kobj
);
892 kobject_put(&p
->kobj
);
896 static ssize_t
show_node_type(struct device
*device
,
897 struct device_attribute
*attr
, char *buf
)
899 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
901 switch (dev
->node_type
) {
902 case RDMA_NODE_IB_CA
: return sprintf(buf
, "%d: CA\n", dev
->node_type
);
903 case RDMA_NODE_RNIC
: return sprintf(buf
, "%d: RNIC\n", dev
->node_type
);
904 case RDMA_NODE_USNIC
: return sprintf(buf
, "%d: usNIC\n", dev
->node_type
);
905 case RDMA_NODE_USNIC_UDP
: return sprintf(buf
, "%d: usNIC UDP\n", dev
->node_type
);
906 case RDMA_NODE_IB_SWITCH
: return sprintf(buf
, "%d: switch\n", dev
->node_type
);
907 case RDMA_NODE_IB_ROUTER
: return sprintf(buf
, "%d: router\n", dev
->node_type
);
908 default: return sprintf(buf
, "%d: <unknown>\n", dev
->node_type
);
912 static ssize_t
show_sys_image_guid(struct device
*device
,
913 struct device_attribute
*dev_attr
, char *buf
)
915 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
917 return sprintf(buf
, "%04x:%04x:%04x:%04x\n",
918 be16_to_cpu(((__be16
*) &dev
->attrs
.sys_image_guid
)[0]),
919 be16_to_cpu(((__be16
*) &dev
->attrs
.sys_image_guid
)[1]),
920 be16_to_cpu(((__be16
*) &dev
->attrs
.sys_image_guid
)[2]),
921 be16_to_cpu(((__be16
*) &dev
->attrs
.sys_image_guid
)[3]));
924 static ssize_t
show_node_guid(struct device
*device
,
925 struct device_attribute
*attr
, char *buf
)
927 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
929 return sprintf(buf
, "%04x:%04x:%04x:%04x\n",
930 be16_to_cpu(((__be16
*) &dev
->node_guid
)[0]),
931 be16_to_cpu(((__be16
*) &dev
->node_guid
)[1]),
932 be16_to_cpu(((__be16
*) &dev
->node_guid
)[2]),
933 be16_to_cpu(((__be16
*) &dev
->node_guid
)[3]));
936 static ssize_t
show_node_desc(struct device
*device
,
937 struct device_attribute
*attr
, char *buf
)
939 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
941 return sprintf(buf
, "%.64s\n", dev
->node_desc
);
944 static ssize_t
set_node_desc(struct device
*device
,
945 struct device_attribute
*attr
,
946 const char *buf
, size_t count
)
948 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
949 struct ib_device_modify desc
= {};
952 if (!dev
->modify_device
)
955 memcpy(desc
.node_desc
, buf
, min_t(int, count
, 64));
956 ret
= ib_modify_device(dev
, IB_DEVICE_MODIFY_NODE_DESC
, &desc
);
963 static DEVICE_ATTR(node_type
, S_IRUGO
, show_node_type
, NULL
);
964 static DEVICE_ATTR(sys_image_guid
, S_IRUGO
, show_sys_image_guid
, NULL
);
965 static DEVICE_ATTR(node_guid
, S_IRUGO
, show_node_guid
, NULL
);
966 static DEVICE_ATTR(node_desc
, S_IRUGO
| S_IWUSR
, show_node_desc
, set_node_desc
);
968 static struct device_attribute
*ib_class_attributes
[] = {
970 &dev_attr_sys_image_guid
,
975 /* Show a given an attribute in the statistics group */
976 static ssize_t
show_protocol_stat(const struct device
*device
,
977 struct device_attribute
*attr
, char *buf
,
980 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
981 union rdma_protocol_stats stats
;
984 ret
= dev
->get_protocol_stats(dev
, &stats
);
988 return sprintf(buf
, "%llu\n",
989 (unsigned long long) ((u64
*) &stats
)[offset
]);
992 /* generate a read-only iwarp statistics attribute */
993 #define IW_STATS_ENTRY(name) \
994 static ssize_t show_##name(struct device *device, \
995 struct device_attribute *attr, char *buf) \
997 return show_protocol_stat(device, attr, buf, \
998 offsetof(struct iw_protocol_stats, name) / \
1001 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
1003 IW_STATS_ENTRY(ipInReceives
);
1004 IW_STATS_ENTRY(ipInHdrErrors
);
1005 IW_STATS_ENTRY(ipInTooBigErrors
);
1006 IW_STATS_ENTRY(ipInNoRoutes
);
1007 IW_STATS_ENTRY(ipInAddrErrors
);
1008 IW_STATS_ENTRY(ipInUnknownProtos
);
1009 IW_STATS_ENTRY(ipInTruncatedPkts
);
1010 IW_STATS_ENTRY(ipInDiscards
);
1011 IW_STATS_ENTRY(ipInDelivers
);
1012 IW_STATS_ENTRY(ipOutForwDatagrams
);
1013 IW_STATS_ENTRY(ipOutRequests
);
1014 IW_STATS_ENTRY(ipOutDiscards
);
1015 IW_STATS_ENTRY(ipOutNoRoutes
);
1016 IW_STATS_ENTRY(ipReasmTimeout
);
1017 IW_STATS_ENTRY(ipReasmReqds
);
1018 IW_STATS_ENTRY(ipReasmOKs
);
1019 IW_STATS_ENTRY(ipReasmFails
);
1020 IW_STATS_ENTRY(ipFragOKs
);
1021 IW_STATS_ENTRY(ipFragFails
);
1022 IW_STATS_ENTRY(ipFragCreates
);
1023 IW_STATS_ENTRY(ipInMcastPkts
);
1024 IW_STATS_ENTRY(ipOutMcastPkts
);
1025 IW_STATS_ENTRY(ipInBcastPkts
);
1026 IW_STATS_ENTRY(ipOutBcastPkts
);
1027 IW_STATS_ENTRY(tcpRtoAlgorithm
);
1028 IW_STATS_ENTRY(tcpRtoMin
);
1029 IW_STATS_ENTRY(tcpRtoMax
);
1030 IW_STATS_ENTRY(tcpMaxConn
);
1031 IW_STATS_ENTRY(tcpActiveOpens
);
1032 IW_STATS_ENTRY(tcpPassiveOpens
);
1033 IW_STATS_ENTRY(tcpAttemptFails
);
1034 IW_STATS_ENTRY(tcpEstabResets
);
1035 IW_STATS_ENTRY(tcpCurrEstab
);
1036 IW_STATS_ENTRY(tcpInSegs
);
1037 IW_STATS_ENTRY(tcpOutSegs
);
1038 IW_STATS_ENTRY(tcpRetransSegs
);
1039 IW_STATS_ENTRY(tcpInErrs
);
1040 IW_STATS_ENTRY(tcpOutRsts
);
1042 static struct attribute
*iw_proto_stats_attrs
[] = {
1043 &dev_attr_ipInReceives
.attr
,
1044 &dev_attr_ipInHdrErrors
.attr
,
1045 &dev_attr_ipInTooBigErrors
.attr
,
1046 &dev_attr_ipInNoRoutes
.attr
,
1047 &dev_attr_ipInAddrErrors
.attr
,
1048 &dev_attr_ipInUnknownProtos
.attr
,
1049 &dev_attr_ipInTruncatedPkts
.attr
,
1050 &dev_attr_ipInDiscards
.attr
,
1051 &dev_attr_ipInDelivers
.attr
,
1052 &dev_attr_ipOutForwDatagrams
.attr
,
1053 &dev_attr_ipOutRequests
.attr
,
1054 &dev_attr_ipOutDiscards
.attr
,
1055 &dev_attr_ipOutNoRoutes
.attr
,
1056 &dev_attr_ipReasmTimeout
.attr
,
1057 &dev_attr_ipReasmReqds
.attr
,
1058 &dev_attr_ipReasmOKs
.attr
,
1059 &dev_attr_ipReasmFails
.attr
,
1060 &dev_attr_ipFragOKs
.attr
,
1061 &dev_attr_ipFragFails
.attr
,
1062 &dev_attr_ipFragCreates
.attr
,
1063 &dev_attr_ipInMcastPkts
.attr
,
1064 &dev_attr_ipOutMcastPkts
.attr
,
1065 &dev_attr_ipInBcastPkts
.attr
,
1066 &dev_attr_ipOutBcastPkts
.attr
,
1067 &dev_attr_tcpRtoAlgorithm
.attr
,
1068 &dev_attr_tcpRtoMin
.attr
,
1069 &dev_attr_tcpRtoMax
.attr
,
1070 &dev_attr_tcpMaxConn
.attr
,
1071 &dev_attr_tcpActiveOpens
.attr
,
1072 &dev_attr_tcpPassiveOpens
.attr
,
1073 &dev_attr_tcpAttemptFails
.attr
,
1074 &dev_attr_tcpEstabResets
.attr
,
1075 &dev_attr_tcpCurrEstab
.attr
,
1076 &dev_attr_tcpInSegs
.attr
,
1077 &dev_attr_tcpOutSegs
.attr
,
1078 &dev_attr_tcpRetransSegs
.attr
,
1079 &dev_attr_tcpInErrs
.attr
,
1080 &dev_attr_tcpOutRsts
.attr
,
1084 static struct attribute_group iw_stats_group
= {
1085 .name
= "proto_stats",
1086 .attrs
= iw_proto_stats_attrs
,
1089 static void free_port_list_attributes(struct ib_device
*device
)
1091 struct kobject
*p
, *t
;
1093 list_for_each_entry_safe(p
, t
, &device
->port_list
, entry
) {
1094 struct ib_port
*port
= container_of(p
, struct ib_port
, kobj
);
1095 list_del(&p
->entry
);
1096 sysfs_remove_group(p
, port
->pma_table
);
1097 sysfs_remove_group(p
, &port
->pkey_group
);
1098 sysfs_remove_group(p
, &port
->gid_group
);
1099 sysfs_remove_group(&port
->gid_attr_group
->kobj
,
1100 &port
->gid_attr_group
->ndev
);
1101 sysfs_remove_group(&port
->gid_attr_group
->kobj
,
1102 &port
->gid_attr_group
->type
);
1103 kobject_put(&port
->gid_attr_group
->kobj
);
1107 kobject_put(device
->ports_parent
);
1110 int ib_device_register_sysfs(struct ib_device
*device
,
1111 int (*port_callback
)(struct ib_device
*,
1112 u8
, struct kobject
*))
1114 struct device
*class_dev
= &device
->dev
;
1118 device
->dev
.parent
= device
->dma_device
;
1119 ret
= dev_set_name(class_dev
, "%s", device
->name
);
1123 ret
= device_add(class_dev
);
1127 for (i
= 0; i
< ARRAY_SIZE(ib_class_attributes
); ++i
) {
1128 ret
= device_create_file(class_dev
, ib_class_attributes
[i
]);
1130 goto err_unregister
;
1133 device
->ports_parent
= kobject_create_and_add("ports",
1135 if (!device
->ports_parent
) {
1140 if (rdma_cap_ib_switch(device
)) {
1141 ret
= add_port(device
, 0, port_callback
);
1145 for (i
= 1; i
<= device
->phys_port_cnt
; ++i
) {
1146 ret
= add_port(device
, i
, port_callback
);
1152 if (device
->node_type
== RDMA_NODE_RNIC
&& device
->get_protocol_stats
) {
1153 ret
= sysfs_create_group(&class_dev
->kobj
, &iw_stats_group
);
1161 free_port_list_attributes(device
);
1164 device_unregister(class_dev
);
1170 void ib_device_unregister_sysfs(struct ib_device
*device
)
1172 /* Hold kobject until ib_dealloc_device() */
1173 struct kobject
*kobj_dev
= kobject_get(&device
->dev
.kobj
);
1176 if (device
->node_type
== RDMA_NODE_RNIC
&& device
->get_protocol_stats
)
1177 sysfs_remove_group(kobj_dev
, &iw_stats_group
);
1179 free_port_list_attributes(device
);
1181 for (i
= 0; i
< ARRAY_SIZE(ib_class_attributes
); ++i
)
1182 device_remove_file(&device
->dev
, ib_class_attributes
[i
]);
1184 device_unregister(&device
->dev
);