sched: clean up wakeup balancing, move wake_affine()
[wrt350n-kernel.git] / drivers / infiniband / core / sysfs.c
blob5a4b2e65534b4b4fcd1fc878a0477dc5096126b1
1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
34 * $Id: sysfs.c 1349 2004-12-16 21:09:43Z roland $
37 #include "core_priv.h"
39 #include <linux/slab.h>
40 #include <linux/string.h>
42 #include <rdma/ib_mad.h>
44 struct ib_port {
45 struct kobject kobj;
46 struct ib_device *ibdev;
47 struct attribute_group gid_group;
48 struct attribute_group pkey_group;
49 u8 port_num;
52 struct port_attribute {
53 struct attribute attr;
54 ssize_t (*show)(struct ib_port *, struct port_attribute *, char *buf);
55 ssize_t (*store)(struct ib_port *, struct port_attribute *,
56 const char *buf, size_t count);
59 #define PORT_ATTR(_name, _mode, _show, _store) \
60 struct port_attribute port_attr_##_name = __ATTR(_name, _mode, _show, _store)
62 #define PORT_ATTR_RO(_name) \
63 struct port_attribute port_attr_##_name = __ATTR_RO(_name)
65 struct port_table_attribute {
66 struct port_attribute attr;
67 char name[8];
68 int index;
71 static inline int ibdev_is_alive(const struct ib_device *dev)
73 return dev->reg_state == IB_DEV_REGISTERED;
76 static ssize_t port_attr_show(struct kobject *kobj,
77 struct attribute *attr, char *buf)
79 struct port_attribute *port_attr =
80 container_of(attr, struct port_attribute, attr);
81 struct ib_port *p = container_of(kobj, struct ib_port, kobj);
83 if (!port_attr->show)
84 return -EIO;
85 if (!ibdev_is_alive(p->ibdev))
86 return -ENODEV;
88 return port_attr->show(p, port_attr, buf);
91 static struct sysfs_ops port_sysfs_ops = {
92 .show = port_attr_show
95 static ssize_t state_show(struct ib_port *p, struct port_attribute *unused,
96 char *buf)
98 struct ib_port_attr attr;
99 ssize_t ret;
101 static const char *state_name[] = {
102 [IB_PORT_NOP] = "NOP",
103 [IB_PORT_DOWN] = "DOWN",
104 [IB_PORT_INIT] = "INIT",
105 [IB_PORT_ARMED] = "ARMED",
106 [IB_PORT_ACTIVE] = "ACTIVE",
107 [IB_PORT_ACTIVE_DEFER] = "ACTIVE_DEFER"
110 ret = ib_query_port(p->ibdev, p->port_num, &attr);
111 if (ret)
112 return ret;
114 return sprintf(buf, "%d: %s\n", attr.state,
115 attr.state >= 0 && attr.state < ARRAY_SIZE(state_name) ?
116 state_name[attr.state] : "UNKNOWN");
119 static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused,
120 char *buf)
122 struct ib_port_attr attr;
123 ssize_t ret;
125 ret = ib_query_port(p->ibdev, p->port_num, &attr);
126 if (ret)
127 return ret;
129 return sprintf(buf, "0x%x\n", attr.lid);
132 static ssize_t lid_mask_count_show(struct ib_port *p,
133 struct port_attribute *unused,
134 char *buf)
136 struct ib_port_attr attr;
137 ssize_t ret;
139 ret = ib_query_port(p->ibdev, p->port_num, &attr);
140 if (ret)
141 return ret;
143 return sprintf(buf, "%d\n", attr.lmc);
146 static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused,
147 char *buf)
149 struct ib_port_attr attr;
150 ssize_t ret;
152 ret = ib_query_port(p->ibdev, p->port_num, &attr);
153 if (ret)
154 return ret;
156 return sprintf(buf, "0x%x\n", attr.sm_lid);
159 static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused,
160 char *buf)
162 struct ib_port_attr attr;
163 ssize_t ret;
165 ret = ib_query_port(p->ibdev, p->port_num, &attr);
166 if (ret)
167 return ret;
169 return sprintf(buf, "%d\n", attr.sm_sl);
172 static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused,
173 char *buf)
175 struct ib_port_attr attr;
176 ssize_t ret;
178 ret = ib_query_port(p->ibdev, p->port_num, &attr);
179 if (ret)
180 return ret;
182 return sprintf(buf, "0x%08x\n", attr.port_cap_flags);
185 static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
186 char *buf)
188 struct ib_port_attr attr;
189 char *speed = "";
190 int rate;
191 ssize_t ret;
193 ret = ib_query_port(p->ibdev, p->port_num, &attr);
194 if (ret)
195 return ret;
197 switch (attr.active_speed) {
198 case 2: speed = " DDR"; break;
199 case 4: speed = " QDR"; break;
202 rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed;
203 if (rate < 0)
204 return -EINVAL;
206 return sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
207 rate / 10, rate % 10 ? ".5" : "",
208 ib_width_enum_to_int(attr.active_width), speed);
211 static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
212 char *buf)
214 struct ib_port_attr attr;
216 ssize_t ret;
218 ret = ib_query_port(p->ibdev, p->port_num, &attr);
219 if (ret)
220 return ret;
222 switch (attr.phys_state) {
223 case 1: return sprintf(buf, "1: Sleep\n");
224 case 2: return sprintf(buf, "2: Polling\n");
225 case 3: return sprintf(buf, "3: Disabled\n");
226 case 4: return sprintf(buf, "4: PortConfigurationTraining\n");
227 case 5: return sprintf(buf, "5: LinkUp\n");
228 case 6: return sprintf(buf, "6: LinkErrorRecovery\n");
229 case 7: return sprintf(buf, "7: Phy Test\n");
230 default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state);
234 static PORT_ATTR_RO(state);
235 static PORT_ATTR_RO(lid);
236 static PORT_ATTR_RO(lid_mask_count);
237 static PORT_ATTR_RO(sm_lid);
238 static PORT_ATTR_RO(sm_sl);
239 static PORT_ATTR_RO(cap_mask);
240 static PORT_ATTR_RO(rate);
241 static PORT_ATTR_RO(phys_state);
243 static struct attribute *port_default_attrs[] = {
244 &port_attr_state.attr,
245 &port_attr_lid.attr,
246 &port_attr_lid_mask_count.attr,
247 &port_attr_sm_lid.attr,
248 &port_attr_sm_sl.attr,
249 &port_attr_cap_mask.attr,
250 &port_attr_rate.attr,
251 &port_attr_phys_state.attr,
252 NULL
255 static ssize_t show_port_gid(struct ib_port *p, struct port_attribute *attr,
256 char *buf)
258 struct port_table_attribute *tab_attr =
259 container_of(attr, struct port_table_attribute, attr);
260 union ib_gid gid;
261 ssize_t ret;
263 ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
264 if (ret)
265 return ret;
267 return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
268 be16_to_cpu(((__be16 *) gid.raw)[0]),
269 be16_to_cpu(((__be16 *) gid.raw)[1]),
270 be16_to_cpu(((__be16 *) gid.raw)[2]),
271 be16_to_cpu(((__be16 *) gid.raw)[3]),
272 be16_to_cpu(((__be16 *) gid.raw)[4]),
273 be16_to_cpu(((__be16 *) gid.raw)[5]),
274 be16_to_cpu(((__be16 *) gid.raw)[6]),
275 be16_to_cpu(((__be16 *) gid.raw)[7]));
278 static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
279 char *buf)
281 struct port_table_attribute *tab_attr =
282 container_of(attr, struct port_table_attribute, attr);
283 u16 pkey;
284 ssize_t ret;
286 ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
287 if (ret)
288 return ret;
290 return sprintf(buf, "0x%04x\n", pkey);
293 #define PORT_PMA_ATTR(_name, _counter, _width, _offset) \
294 struct port_table_attribute port_pma_attr_##_name = { \
295 .attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \
296 .index = (_offset) | ((_width) << 16) | ((_counter) << 24) \
299 static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
300 char *buf)
302 struct port_table_attribute *tab_attr =
303 container_of(attr, struct port_table_attribute, attr);
304 int offset = tab_attr->index & 0xffff;
305 int width = (tab_attr->index >> 16) & 0xff;
306 struct ib_mad *in_mad = NULL;
307 struct ib_mad *out_mad = NULL;
308 ssize_t ret;
310 if (!p->ibdev->process_mad)
311 return sprintf(buf, "N/A (no PMA)\n");
313 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
314 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
315 if (!in_mad || !out_mad) {
316 ret = -ENOMEM;
317 goto out;
320 in_mad->mad_hdr.base_version = 1;
321 in_mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_PERF_MGMT;
322 in_mad->mad_hdr.class_version = 1;
323 in_mad->mad_hdr.method = IB_MGMT_METHOD_GET;
324 in_mad->mad_hdr.attr_id = cpu_to_be16(0x12); /* PortCounters */
326 in_mad->data[41] = p->port_num; /* PortSelect field */
328 if ((p->ibdev->process_mad(p->ibdev, IB_MAD_IGNORE_MKEY,
329 p->port_num, NULL, NULL, in_mad, out_mad) &
330 (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) !=
331 (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) {
332 ret = -EINVAL;
333 goto out;
336 switch (width) {
337 case 4:
338 ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >>
339 (4 - (offset % 8))) & 0xf);
340 break;
341 case 8:
342 ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]);
343 break;
344 case 16:
345 ret = sprintf(buf, "%u\n",
346 be16_to_cpup((__be16 *)(out_mad->data + 40 + offset / 8)));
347 break;
348 case 32:
349 ret = sprintf(buf, "%u\n",
350 be32_to_cpup((__be32 *)(out_mad->data + 40 + offset / 8)));
351 break;
352 default:
353 ret = 0;
356 out:
357 kfree(in_mad);
358 kfree(out_mad);
360 return ret;
363 static PORT_PMA_ATTR(symbol_error , 0, 16, 32);
364 static PORT_PMA_ATTR(link_error_recovery , 1, 8, 48);
365 static PORT_PMA_ATTR(link_downed , 2, 8, 56);
366 static PORT_PMA_ATTR(port_rcv_errors , 3, 16, 64);
367 static PORT_PMA_ATTR(port_rcv_remote_physical_errors, 4, 16, 80);
368 static PORT_PMA_ATTR(port_rcv_switch_relay_errors , 5, 16, 96);
369 static PORT_PMA_ATTR(port_xmit_discards , 6, 16, 112);
370 static PORT_PMA_ATTR(port_xmit_constraint_errors , 7, 8, 128);
371 static PORT_PMA_ATTR(port_rcv_constraint_errors , 8, 8, 136);
372 static PORT_PMA_ATTR(local_link_integrity_errors , 9, 4, 152);
373 static PORT_PMA_ATTR(excessive_buffer_overrun_errors, 10, 4, 156);
374 static PORT_PMA_ATTR(VL15_dropped , 11, 16, 176);
375 static PORT_PMA_ATTR(port_xmit_data , 12, 32, 192);
376 static PORT_PMA_ATTR(port_rcv_data , 13, 32, 224);
377 static PORT_PMA_ATTR(port_xmit_packets , 14, 32, 256);
378 static PORT_PMA_ATTR(port_rcv_packets , 15, 32, 288);
380 static struct attribute *pma_attrs[] = {
381 &port_pma_attr_symbol_error.attr.attr,
382 &port_pma_attr_link_error_recovery.attr.attr,
383 &port_pma_attr_link_downed.attr.attr,
384 &port_pma_attr_port_rcv_errors.attr.attr,
385 &port_pma_attr_port_rcv_remote_physical_errors.attr.attr,
386 &port_pma_attr_port_rcv_switch_relay_errors.attr.attr,
387 &port_pma_attr_port_xmit_discards.attr.attr,
388 &port_pma_attr_port_xmit_constraint_errors.attr.attr,
389 &port_pma_attr_port_rcv_constraint_errors.attr.attr,
390 &port_pma_attr_local_link_integrity_errors.attr.attr,
391 &port_pma_attr_excessive_buffer_overrun_errors.attr.attr,
392 &port_pma_attr_VL15_dropped.attr.attr,
393 &port_pma_attr_port_xmit_data.attr.attr,
394 &port_pma_attr_port_rcv_data.attr.attr,
395 &port_pma_attr_port_xmit_packets.attr.attr,
396 &port_pma_attr_port_rcv_packets.attr.attr,
397 NULL
400 static struct attribute_group pma_group = {
401 .name = "counters",
402 .attrs = pma_attrs
405 static void ib_port_release(struct kobject *kobj)
407 struct ib_port *p = container_of(kobj, struct ib_port, kobj);
408 struct attribute *a;
409 int i;
411 for (i = 0; (a = p->gid_group.attrs[i]); ++i)
412 kfree(a);
414 kfree(p->gid_group.attrs);
416 for (i = 0; (a = p->pkey_group.attrs[i]); ++i)
417 kfree(a);
419 kfree(p->pkey_group.attrs);
421 kfree(p);
424 static struct kobj_type port_type = {
425 .release = ib_port_release,
426 .sysfs_ops = &port_sysfs_ops,
427 .default_attrs = port_default_attrs
430 static void ib_device_release(struct class_device *cdev)
432 struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
434 kfree(dev);
437 static int ib_device_uevent(struct class_device *cdev,
438 struct kobj_uevent_env *env)
440 struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
442 if (add_uevent_var(env, "NAME=%s", dev->name))
443 return -ENOMEM;
446 * It would be nice to pass the node GUID with the event...
449 return 0;
452 static struct attribute **
453 alloc_group_attrs(ssize_t (*show)(struct ib_port *,
454 struct port_attribute *, char *buf),
455 int len)
457 struct attribute **tab_attr;
458 struct port_table_attribute *element;
459 int i;
461 tab_attr = kcalloc(1 + len, sizeof(struct attribute *), GFP_KERNEL);
462 if (!tab_attr)
463 return NULL;
465 for (i = 0; i < len; i++) {
466 element = kzalloc(sizeof(struct port_table_attribute),
467 GFP_KERNEL);
468 if (!element)
469 goto err;
471 if (snprintf(element->name, sizeof(element->name),
472 "%d", i) >= sizeof(element->name)) {
473 kfree(element);
474 goto err;
477 element->attr.attr.name = element->name;
478 element->attr.attr.mode = S_IRUGO;
479 element->attr.show = show;
480 element->index = i;
482 tab_attr[i] = &element->attr.attr;
485 return tab_attr;
487 err:
488 while (--i >= 0)
489 kfree(tab_attr[i]);
490 kfree(tab_attr);
491 return NULL;
494 static int add_port(struct ib_device *device, int port_num)
496 struct ib_port *p;
497 struct ib_port_attr attr;
498 int i;
499 int ret;
501 ret = ib_query_port(device, port_num, &attr);
502 if (ret)
503 return ret;
505 p = kzalloc(sizeof *p, GFP_KERNEL);
506 if (!p)
507 return -ENOMEM;
509 p->ibdev = device;
510 p->port_num = port_num;
512 ret = kobject_init_and_add(&p->kobj, &port_type,
513 kobject_get(device->ports_parent),
514 "%d", port_num);
515 if (ret)
516 goto err_put;
518 ret = sysfs_create_group(&p->kobj, &pma_group);
519 if (ret)
520 goto err_put;
522 p->gid_group.name = "gids";
523 p->gid_group.attrs = alloc_group_attrs(show_port_gid, attr.gid_tbl_len);
524 if (!p->gid_group.attrs)
525 goto err_remove_pma;
527 ret = sysfs_create_group(&p->kobj, &p->gid_group);
528 if (ret)
529 goto err_free_gid;
531 p->pkey_group.name = "pkeys";
532 p->pkey_group.attrs = alloc_group_attrs(show_port_pkey,
533 attr.pkey_tbl_len);
534 if (!p->pkey_group.attrs)
535 goto err_remove_gid;
537 ret = sysfs_create_group(&p->kobj, &p->pkey_group);
538 if (ret)
539 goto err_free_pkey;
541 list_add_tail(&p->kobj.entry, &device->port_list);
543 kobject_uevent(&p->kobj, KOBJ_ADD);
544 return 0;
546 err_free_pkey:
547 for (i = 0; i < attr.pkey_tbl_len; ++i)
548 kfree(p->pkey_group.attrs[i]);
550 kfree(p->pkey_group.attrs);
552 err_remove_gid:
553 sysfs_remove_group(&p->kobj, &p->gid_group);
555 err_free_gid:
556 for (i = 0; i < attr.gid_tbl_len; ++i)
557 kfree(p->gid_group.attrs[i]);
559 kfree(p->gid_group.attrs);
561 err_remove_pma:
562 sysfs_remove_group(&p->kobj, &pma_group);
564 err_put:
565 kobject_put(device->ports_parent);
566 kfree(p);
567 return ret;
570 static ssize_t show_node_type(struct class_device *cdev, char *buf)
572 struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
574 if (!ibdev_is_alive(dev))
575 return -ENODEV;
577 switch (dev->node_type) {
578 case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type);
579 case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type);
580 case RDMA_NODE_IB_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type);
581 case RDMA_NODE_IB_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type);
582 default: return sprintf(buf, "%d: <unknown>\n", dev->node_type);
586 static ssize_t show_sys_image_guid(struct class_device *cdev, char *buf)
588 struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
589 struct ib_device_attr attr;
590 ssize_t ret;
592 if (!ibdev_is_alive(dev))
593 return -ENODEV;
595 ret = ib_query_device(dev, &attr);
596 if (ret)
597 return ret;
599 return sprintf(buf, "%04x:%04x:%04x:%04x\n",
600 be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
601 be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
602 be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
603 be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
606 static ssize_t show_node_guid(struct class_device *cdev, char *buf)
608 struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
610 if (!ibdev_is_alive(dev))
611 return -ENODEV;
613 return sprintf(buf, "%04x:%04x:%04x:%04x\n",
614 be16_to_cpu(((__be16 *) &dev->node_guid)[0]),
615 be16_to_cpu(((__be16 *) &dev->node_guid)[1]),
616 be16_to_cpu(((__be16 *) &dev->node_guid)[2]),
617 be16_to_cpu(((__be16 *) &dev->node_guid)[3]));
620 static ssize_t show_node_desc(struct class_device *cdev, char *buf)
622 struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
624 return sprintf(buf, "%.64s\n", dev->node_desc);
627 static ssize_t set_node_desc(struct class_device *cdev, const char *buf,
628 size_t count)
630 struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
631 struct ib_device_modify desc = {};
632 int ret;
634 if (!dev->modify_device)
635 return -EIO;
637 memcpy(desc.node_desc, buf, min_t(int, count, 64));
638 ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
639 if (ret)
640 return ret;
642 return count;
645 static CLASS_DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
646 static CLASS_DEVICE_ATTR(sys_image_guid, S_IRUGO, show_sys_image_guid, NULL);
647 static CLASS_DEVICE_ATTR(node_guid, S_IRUGO, show_node_guid, NULL);
648 static CLASS_DEVICE_ATTR(node_desc, S_IRUGO | S_IWUSR, show_node_desc,
649 set_node_desc);
651 static struct class_device_attribute *ib_class_attributes[] = {
652 &class_device_attr_node_type,
653 &class_device_attr_sys_image_guid,
654 &class_device_attr_node_guid,
655 &class_device_attr_node_desc
658 static struct class ib_class = {
659 .name = "infiniband",
660 .release = ib_device_release,
661 .uevent = ib_device_uevent,
664 int ib_device_register_sysfs(struct ib_device *device)
666 struct class_device *class_dev = &device->class_dev;
667 int ret;
668 int i;
670 class_dev->class = &ib_class;
671 class_dev->class_data = device;
672 class_dev->dev = device->dma_device;
673 strlcpy(class_dev->class_id, device->name, BUS_ID_SIZE);
675 INIT_LIST_HEAD(&device->port_list);
677 ret = class_device_register(class_dev);
678 if (ret)
679 goto err;
681 for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i) {
682 ret = class_device_create_file(class_dev, ib_class_attributes[i]);
683 if (ret)
684 goto err_unregister;
687 device->ports_parent = kobject_create_and_add("ports",
688 kobject_get(&class_dev->kobj));
689 if (!device->ports_parent) {
690 ret = -ENOMEM;
691 goto err_put;
694 if (device->node_type == RDMA_NODE_IB_SWITCH) {
695 ret = add_port(device, 0);
696 if (ret)
697 goto err_put;
698 } else {
699 for (i = 1; i <= device->phys_port_cnt; ++i) {
700 ret = add_port(device, i);
701 if (ret)
702 goto err_put;
706 return 0;
708 err_put:
710 struct kobject *p, *t;
711 struct ib_port *port;
713 list_for_each_entry_safe(p, t, &device->port_list, entry) {
714 list_del(&p->entry);
715 port = container_of(p, struct ib_port, kobj);
716 sysfs_remove_group(p, &pma_group);
717 sysfs_remove_group(p, &port->pkey_group);
718 sysfs_remove_group(p, &port->gid_group);
719 kobject_put(p);
723 kobject_put(&class_dev->kobj);
725 err_unregister:
726 class_device_unregister(class_dev);
728 err:
729 return ret;
732 void ib_device_unregister_sysfs(struct ib_device *device)
734 struct kobject *p, *t;
735 struct ib_port *port;
737 list_for_each_entry_safe(p, t, &device->port_list, entry) {
738 list_del(&p->entry);
739 port = container_of(p, struct ib_port, kobj);
740 sysfs_remove_group(p, &pma_group);
741 sysfs_remove_group(p, &port->pkey_group);
742 sysfs_remove_group(p, &port->gid_group);
743 kobject_put(p);
746 kobject_put(device->ports_parent);
747 class_device_unregister(&device->class_dev);
750 int ib_sysfs_setup(void)
752 return class_register(&ib_class);
755 void ib_sysfs_cleanup(void)
757 class_unregister(&ib_class);