console: Replace #if 0 with atomic var 'ignore_console_lock_warning'
[linux/fpc-iii.git] / drivers / platform / mellanox / mlxreg-hotplug.c
blobac97aa020db326dfc32ce821943ff37e2d77749c
1 /*
2 * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2018 Vadim Pasternak <vadimp@mellanox.com>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the names of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
17 * Alternatively, this software may be distributed under the terms of the
18 * GNU General Public License ("GPL") version 2 as published by the Free
19 * Software Foundation.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
34 #include <linux/bitops.h>
35 #include <linux/device.h>
36 #include <linux/hwmon.h>
37 #include <linux/hwmon-sysfs.h>
38 #include <linux/i2c.h>
39 #include <linux/interrupt.h>
40 #include <linux/module.h>
41 #include <linux/of_device.h>
42 #include <linux/platform_data/mlxreg.h>
43 #include <linux/platform_device.h>
44 #include <linux/spinlock.h>
45 #include <linux/regmap.h>
46 #include <linux/workqueue.h>
48 /* Offset of event and mask registers from status register. */
49 #define MLXREG_HOTPLUG_EVENT_OFF 1
50 #define MLXREG_HOTPLUG_MASK_OFF 2
51 #define MLXREG_HOTPLUG_AGGR_MASK_OFF 1
53 /* ASIC health parameters. */
54 #define MLXREG_HOTPLUG_HEALTH_MASK 0x02
55 #define MLXREG_HOTPLUG_RST_CNTR 3
57 #define MLXREG_HOTPLUG_ATTRS_MAX 24
58 #define MLXREG_HOTPLUG_NOT_ASSERT 3
60 /**
61 * struct mlxreg_hotplug_priv_data - platform private data:
62 * @irq: platform device interrupt number;
63 * @dev: basic device;
64 * @pdev: platform device;
65 * @plat: platform data;
66 * @regmap: register map handle;
67 * @dwork_irq: delayed work template;
68 * @lock: spin lock;
69 * @hwmon: hwmon device;
70 * @mlxreg_hotplug_attr: sysfs attributes array;
71 * @mlxreg_hotplug_dev_attr: sysfs sensor device attribute array;
72 * @group: sysfs attribute group;
73 * @groups: list of sysfs attribute group for hwmon registration;
74 * @cell: location of top aggregation interrupt register;
75 * @mask: top aggregation interrupt common mask;
76 * @aggr_cache: last value of aggregation register status;
77 * @after_probe: flag indication probing completion;
78 * @not_asserted: number of entries in workqueue with no signal assertion;
80 struct mlxreg_hotplug_priv_data {
81 int irq;
82 struct device *dev;
83 struct platform_device *pdev;
84 struct mlxreg_hotplug_platform_data *plat;
85 struct regmap *regmap;
86 struct delayed_work dwork_irq;
87 spinlock_t lock; /* sync with interrupt */
88 struct device *hwmon;
89 struct attribute *mlxreg_hotplug_attr[MLXREG_HOTPLUG_ATTRS_MAX + 1];
90 struct sensor_device_attribute_2
91 mlxreg_hotplug_dev_attr[MLXREG_HOTPLUG_ATTRS_MAX];
92 struct attribute_group group;
93 const struct attribute_group *groups[2];
94 u32 cell;
95 u32 mask;
96 u32 aggr_cache;
97 bool after_probe;
98 u8 not_asserted;
101 static int mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data *priv,
102 struct mlxreg_core_data *data)
104 struct mlxreg_core_hotplug_platform_data *pdata;
107 * Return if adapter number is negative. It could be in case hotplug
108 * event is not associated with hotplug device.
110 if (data->hpdev.nr < 0)
111 return 0;
113 pdata = dev_get_platdata(&priv->pdev->dev);
114 data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr +
115 pdata->shift_nr);
116 if (!data->hpdev.adapter) {
117 dev_err(priv->dev, "Failed to get adapter for bus %d\n",
118 data->hpdev.nr + pdata->shift_nr);
119 return -EFAULT;
122 data->hpdev.client = i2c_new_device(data->hpdev.adapter,
123 data->hpdev.brdinfo);
124 if (!data->hpdev.client) {
125 dev_err(priv->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
126 data->hpdev.brdinfo->type, data->hpdev.nr +
127 pdata->shift_nr, data->hpdev.brdinfo->addr);
129 i2c_put_adapter(data->hpdev.adapter);
130 data->hpdev.adapter = NULL;
131 return -EFAULT;
134 return 0;
137 static void mlxreg_hotplug_device_destroy(struct mlxreg_core_data *data)
139 if (data->hpdev.client) {
140 i2c_unregister_device(data->hpdev.client);
141 data->hpdev.client = NULL;
144 if (data->hpdev.adapter) {
145 i2c_put_adapter(data->hpdev.adapter);
146 data->hpdev.adapter = NULL;
150 static ssize_t mlxreg_hotplug_attr_show(struct device *dev,
151 struct device_attribute *attr,
152 char *buf)
154 struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(dev);
155 struct mlxreg_core_hotplug_platform_data *pdata;
156 int index = to_sensor_dev_attr_2(attr)->index;
157 int nr = to_sensor_dev_attr_2(attr)->nr;
158 struct mlxreg_core_item *item;
159 struct mlxreg_core_data *data;
160 u32 regval;
161 int ret;
163 pdata = dev_get_platdata(&priv->pdev->dev);
164 item = pdata->items + nr;
165 data = item->data + index;
167 ret = regmap_read(priv->regmap, data->reg, &regval);
168 if (ret)
169 return ret;
171 if (item->health) {
172 regval &= data->mask;
173 } else {
174 /* Bit = 0 : functional if item->inversed is true. */
175 if (item->inversed)
176 regval = !(regval & data->mask);
177 else
178 regval = !!(regval & data->mask);
181 return sprintf(buf, "%u\n", regval);
184 #define PRIV_ATTR(i) priv->mlxreg_hotplug_attr[i]
185 #define PRIV_DEV_ATTR(i) priv->mlxreg_hotplug_dev_attr[i]
187 static int mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data *priv)
189 struct mlxreg_core_hotplug_platform_data *pdata;
190 struct mlxreg_core_item *item;
191 struct mlxreg_core_data *data;
192 int num_attrs = 0, id = 0, i, j;
194 pdata = dev_get_platdata(&priv->pdev->dev);
195 item = pdata->items;
197 /* Go over all kinds of items - psu, pwr, fan. */
198 for (i = 0; i < pdata->counter; i++, item++) {
199 num_attrs += item->count;
200 data = item->data;
201 /* Go over all units within the item. */
202 for (j = 0; j < item->count; j++, data++, id++) {
203 PRIV_ATTR(id) = &PRIV_DEV_ATTR(id).dev_attr.attr;
204 PRIV_ATTR(id)->name = devm_kasprintf(&priv->pdev->dev,
205 GFP_KERNEL,
206 data->label);
208 if (!PRIV_ATTR(id)->name) {
209 dev_err(priv->dev, "Memory allocation failed for attr %d.\n",
210 id);
211 return -ENOMEM;
214 PRIV_DEV_ATTR(id).dev_attr.attr.name =
215 PRIV_ATTR(id)->name;
216 PRIV_DEV_ATTR(id).dev_attr.attr.mode = 0444;
217 PRIV_DEV_ATTR(id).dev_attr.show =
218 mlxreg_hotplug_attr_show;
219 PRIV_DEV_ATTR(id).nr = i;
220 PRIV_DEV_ATTR(id).index = j;
221 sysfs_attr_init(&PRIV_DEV_ATTR(id).dev_attr.attr);
225 priv->group.attrs = devm_kcalloc(&priv->pdev->dev,
226 num_attrs,
227 sizeof(struct attribute *),
228 GFP_KERNEL);
229 if (!priv->group.attrs)
230 return -ENOMEM;
232 priv->group.attrs = priv->mlxreg_hotplug_attr;
233 priv->groups[0] = &priv->group;
234 priv->groups[1] = NULL;
236 return 0;
239 static void
240 mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
241 struct mlxreg_core_item *item)
243 struct mlxreg_core_data *data;
244 u32 asserted, regval, bit;
245 int ret;
248 * Validate if item related to received signal type is valid.
249 * It should never happen, excepted the situation when some
250 * piece of hardware is broken. In such situation just produce
251 * error message and return. Caller must continue to handle the
252 * signals from other devices if any.
254 if (unlikely(!item)) {
255 dev_err(priv->dev, "False signal: at offset:mask 0x%02x:0x%02x.\n",
256 item->reg, item->mask);
258 return;
261 /* Mask event. */
262 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
264 if (ret)
265 goto out;
267 /* Read status. */
268 ret = regmap_read(priv->regmap, item->reg, &regval);
269 if (ret)
270 goto out;
272 /* Set asserted bits and save last status. */
273 regval &= item->mask;
274 asserted = item->cache ^ regval;
275 item->cache = regval;
277 for_each_set_bit(bit, (unsigned long *)&asserted, 8) {
278 data = item->data + bit;
279 if (regval & BIT(bit)) {
280 if (item->inversed)
281 mlxreg_hotplug_device_destroy(data);
282 else
283 mlxreg_hotplug_device_create(priv, data);
284 } else {
285 if (item->inversed)
286 mlxreg_hotplug_device_create(priv, data);
287 else
288 mlxreg_hotplug_device_destroy(data);
292 /* Acknowledge event. */
293 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_EVENT_OFF,
295 if (ret)
296 goto out;
298 /* Unmask event. */
299 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
300 item->mask);
302 out:
303 if (ret)
304 dev_err(priv->dev, "Failed to complete workqueue.\n");
307 static void
308 mlxreg_hotplug_health_work_helper(struct mlxreg_hotplug_priv_data *priv,
309 struct mlxreg_core_item *item)
311 struct mlxreg_core_data *data = item->data;
312 u32 regval;
313 int i, ret = 0;
315 for (i = 0; i < item->count; i++, data++) {
316 /* Mask event. */
317 ret = regmap_write(priv->regmap, data->reg +
318 MLXREG_HOTPLUG_MASK_OFF, 0);
319 if (ret)
320 goto out;
322 /* Read status. */
323 ret = regmap_read(priv->regmap, data->reg, &regval);
324 if (ret)
325 goto out;
327 regval &= data->mask;
328 item->cache = regval;
329 if (regval == MLXREG_HOTPLUG_HEALTH_MASK) {
330 if ((data->health_cntr++ == MLXREG_HOTPLUG_RST_CNTR) ||
331 !priv->after_probe) {
332 mlxreg_hotplug_device_create(priv, data);
333 data->attached = true;
335 } else {
336 if (data->attached) {
337 mlxreg_hotplug_device_destroy(data);
338 data->attached = false;
339 data->health_cntr = 0;
343 /* Acknowledge event. */
344 ret = regmap_write(priv->regmap, data->reg +
345 MLXREG_HOTPLUG_EVENT_OFF, 0);
346 if (ret)
347 goto out;
349 /* Unmask event. */
350 ret = regmap_write(priv->regmap, data->reg +
351 MLXREG_HOTPLUG_MASK_OFF, data->mask);
352 if (ret)
353 goto out;
356 out:
357 if (ret)
358 dev_err(priv->dev, "Failed to complete workqueue.\n");
362 * mlxreg_hotplug_work_handler - performs traversing of device interrupt
363 * registers according to the below hierarchy schema:
365 * Aggregation registers (status/mask)
366 * PSU registers: *---*
367 * *-----------------* | |
368 * |status/event/mask|-----> | * |
369 * *-----------------* | |
370 * Power registers: | |
371 * *-----------------* | |
372 * |status/event/mask|-----> | * |
373 * *-----------------* | |
374 * FAN registers: | |--> CPU
375 * *-----------------* | |
376 * |status/event/mask|-----> | * |
377 * *-----------------* | |
378 * ASIC registers: | |
379 * *-----------------* | |
380 * |status/event/mask|-----> | * |
381 * *-----------------* | |
382 * *---*
384 * In case some system changed are detected: FAN in/out, PSU in/out, power
385 * cable attached/detached, ASIC health good/bad, relevant device is created
386 * or destroyed.
388 static void mlxreg_hotplug_work_handler(struct work_struct *work)
390 struct mlxreg_core_hotplug_platform_data *pdata;
391 struct mlxreg_hotplug_priv_data *priv;
392 struct mlxreg_core_item *item;
393 u32 regval, aggr_asserted;
394 unsigned long flags;
395 int i, ret;
397 priv = container_of(work, struct mlxreg_hotplug_priv_data,
398 dwork_irq.work);
399 pdata = dev_get_platdata(&priv->pdev->dev);
400 item = pdata->items;
402 /* Mask aggregation event. */
403 ret = regmap_write(priv->regmap, pdata->cell +
404 MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
405 if (ret < 0)
406 goto out;
408 /* Read aggregation status. */
409 ret = regmap_read(priv->regmap, pdata->cell, &regval);
410 if (ret)
411 goto out;
413 regval &= pdata->mask;
414 aggr_asserted = priv->aggr_cache ^ regval;
415 priv->aggr_cache = regval;
418 * Handler is invoked, but no assertion is detected at top aggregation
419 * status level. Set aggr_asserted to mask value to allow handler extra
420 * run over all relevant signals to recover any missed signal.
422 if (priv->not_asserted == MLXREG_HOTPLUG_NOT_ASSERT) {
423 priv->not_asserted = 0;
424 aggr_asserted = pdata->mask;
426 if (!aggr_asserted)
427 goto unmask_event;
429 /* Handle topology and health configuration changes. */
430 for (i = 0; i < pdata->counter; i++, item++) {
431 if (aggr_asserted & item->aggr_mask) {
432 if (item->health)
433 mlxreg_hotplug_health_work_helper(priv, item);
434 else
435 mlxreg_hotplug_work_helper(priv, item);
439 spin_lock_irqsave(&priv->lock, flags);
442 * It is possible, that some signals have been inserted, while
443 * interrupt has been masked by mlxreg_hotplug_work_handler. In this
444 * case such signals will be missed. In order to handle these signals
445 * delayed work is canceled and work task re-scheduled for immediate
446 * execution. It allows to handle missed signals, if any. In other case
447 * work handler just validates that no new signals have been received
448 * during masking.
450 cancel_delayed_work(&priv->dwork_irq);
451 schedule_delayed_work(&priv->dwork_irq, 0);
453 spin_unlock_irqrestore(&priv->lock, flags);
455 return;
457 unmask_event:
458 priv->not_asserted++;
459 /* Unmask aggregation event (no need acknowledge). */
460 ret = regmap_write(priv->regmap, pdata->cell +
461 MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
463 out:
464 if (ret)
465 dev_err(priv->dev, "Failed to complete workqueue.\n");
468 static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv)
470 struct mlxreg_core_hotplug_platform_data *pdata;
471 struct mlxreg_core_item *item;
472 int i, ret;
474 pdata = dev_get_platdata(&priv->pdev->dev);
475 item = pdata->items;
477 for (i = 0; i < pdata->counter; i++, item++) {
478 /* Clear group presense event. */
479 ret = regmap_write(priv->regmap, item->reg +
480 MLXREG_HOTPLUG_EVENT_OFF, 0);
481 if (ret)
482 goto out;
484 /* Set group initial status as mask and unmask group event. */
485 if (item->inversed) {
486 item->cache = item->mask;
487 ret = regmap_write(priv->regmap, item->reg +
488 MLXREG_HOTPLUG_MASK_OFF,
489 item->mask);
490 if (ret)
491 goto out;
495 /* Keep aggregation initial status as zero and unmask events. */
496 ret = regmap_write(priv->regmap, pdata->cell +
497 MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
498 if (ret)
499 goto out;
501 /* Keep low aggregation initial status as zero and unmask events. */
502 if (pdata->cell_low) {
503 ret = regmap_write(priv->regmap, pdata->cell_low +
504 MLXREG_HOTPLUG_AGGR_MASK_OFF,
505 pdata->mask_low);
506 if (ret)
507 goto out;
510 /* Invoke work handler for initializing hot plug devices setting. */
511 mlxreg_hotplug_work_handler(&priv->dwork_irq.work);
513 out:
514 if (ret)
515 dev_err(priv->dev, "Failed to set interrupts.\n");
516 enable_irq(priv->irq);
517 return ret;
520 static void mlxreg_hotplug_unset_irq(struct mlxreg_hotplug_priv_data *priv)
522 struct mlxreg_core_hotplug_platform_data *pdata;
523 struct mlxreg_core_item *item;
524 struct mlxreg_core_data *data;
525 int count, i, j;
527 pdata = dev_get_platdata(&priv->pdev->dev);
528 item = pdata->items;
529 disable_irq(priv->irq);
530 cancel_delayed_work_sync(&priv->dwork_irq);
532 /* Mask low aggregation event, if defined. */
533 if (pdata->cell_low)
534 regmap_write(priv->regmap, pdata->cell_low +
535 MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
537 /* Mask aggregation event. */
538 regmap_write(priv->regmap, pdata->cell + MLXREG_HOTPLUG_AGGR_MASK_OFF,
541 /* Clear topology configurations. */
542 for (i = 0; i < pdata->counter; i++, item++) {
543 data = item->data;
544 /* Mask group presense event. */
545 regmap_write(priv->regmap, data->reg + MLXREG_HOTPLUG_MASK_OFF,
547 /* Clear group presense event. */
548 regmap_write(priv->regmap, data->reg +
549 MLXREG_HOTPLUG_EVENT_OFF, 0);
551 /* Remove all the attached devices in group. */
552 count = item->count;
553 for (j = 0; j < count; j++, data++)
554 mlxreg_hotplug_device_destroy(data);
558 static irqreturn_t mlxreg_hotplug_irq_handler(int irq, void *dev)
560 struct mlxreg_hotplug_priv_data *priv;
562 priv = (struct mlxreg_hotplug_priv_data *)dev;
564 /* Schedule work task for immediate execution.*/
565 schedule_delayed_work(&priv->dwork_irq, 0);
567 return IRQ_HANDLED;
570 static int mlxreg_hotplug_probe(struct platform_device *pdev)
572 struct mlxreg_core_hotplug_platform_data *pdata;
573 struct mlxreg_hotplug_priv_data *priv;
574 struct i2c_adapter *deferred_adap;
575 int err;
577 pdata = dev_get_platdata(&pdev->dev);
578 if (!pdata) {
579 dev_err(&pdev->dev, "Failed to get platform data.\n");
580 return -EINVAL;
583 /* Defer probing if the necessary adapter is not configured yet. */
584 deferred_adap = i2c_get_adapter(pdata->deferred_nr);
585 if (!deferred_adap)
586 return -EPROBE_DEFER;
587 i2c_put_adapter(deferred_adap);
589 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
590 if (!priv)
591 return -ENOMEM;
593 if (pdata->irq) {
594 priv->irq = pdata->irq;
595 } else {
596 priv->irq = platform_get_irq(pdev, 0);
597 if (priv->irq < 0) {
598 dev_err(&pdev->dev, "Failed to get platform irq: %d\n",
599 priv->irq);
600 return priv->irq;
604 priv->regmap = pdata->regmap;
605 priv->dev = pdev->dev.parent;
606 priv->pdev = pdev;
608 err = devm_request_irq(&pdev->dev, priv->irq,
609 mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING
610 | IRQF_SHARED, "mlxreg-hotplug", priv);
611 if (err) {
612 dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
613 return err;
616 disable_irq(priv->irq);
617 spin_lock_init(&priv->lock);
618 INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler);
619 /* Perform initial interrupts setup. */
620 mlxreg_hotplug_set_irq(priv);
622 priv->after_probe = true;
623 dev_set_drvdata(&pdev->dev, priv);
625 err = mlxreg_hotplug_attr_init(priv);
626 if (err) {
627 dev_err(&pdev->dev, "Failed to allocate attributes: %d\n",
628 err);
629 return err;
632 priv->hwmon = devm_hwmon_device_register_with_groups(&pdev->dev,
633 "mlxreg_hotplug", priv, priv->groups);
634 if (IS_ERR(priv->hwmon)) {
635 dev_err(&pdev->dev, "Failed to register hwmon device %ld\n",
636 PTR_ERR(priv->hwmon));
637 return PTR_ERR(priv->hwmon);
640 return 0;
643 static int mlxreg_hotplug_remove(struct platform_device *pdev)
645 struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(&pdev->dev);
647 /* Clean interrupts setup. */
648 mlxreg_hotplug_unset_irq(priv);
650 return 0;
653 static struct platform_driver mlxreg_hotplug_driver = {
654 .driver = {
655 .name = "mlxreg-hotplug",
657 .probe = mlxreg_hotplug_probe,
658 .remove = mlxreg_hotplug_remove,
661 module_platform_driver(mlxreg_hotplug_driver);
663 MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
664 MODULE_DESCRIPTION("Mellanox regmap hotplug platform driver");
665 MODULE_LICENSE("Dual BSD/GPL");
666 MODULE_ALIAS("platform:mlxreg-hotplug");