1 // SPDX-License-Identifier: GPL-2.0
3 * platform.c - platform 'pseudo' bus for legacy devices
5 * Copyright (c) 2002-3 Patrick Mochel
6 * Copyright (c) 2002-3 Open Source Development Labs
8 * Please see Documentation/driver-api/driver-model/platform.rst for more
12 #include <linux/string.h>
13 #include <linux/platform_device.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/interrupt.h>
19 #include <linux/ioport.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/memblock.h>
22 #include <linux/err.h>
23 #include <linux/slab.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm_domain.h>
26 #include <linux/idr.h>
27 #include <linux/acpi.h>
28 #include <linux/clk/clk-conf.h>
29 #include <linux/limits.h>
30 #include <linux/property.h>
31 #include <linux/kmemleak.h>
32 #include <linux/types.h>
35 #include "power/power.h"
37 /* For automatically allocated device IDs */
38 static DEFINE_IDA(platform_devid_ida
);
40 struct device platform_bus
= {
41 .init_name
= "platform",
43 EXPORT_SYMBOL_GPL(platform_bus
);
46 * platform_get_resource - get a resource for a device
47 * @dev: platform device
48 * @type: resource type
49 * @num: resource index
51 * Return: a pointer to the resource or NULL on failure.
53 struct resource
*platform_get_resource(struct platform_device
*dev
,
54 unsigned int type
, unsigned int num
)
58 for (i
= 0; i
< dev
->num_resources
; i
++) {
59 struct resource
*r
= &dev
->resource
[i
];
61 if (type
== resource_type(r
) && num
-- == 0)
66 EXPORT_SYMBOL_GPL(platform_get_resource
);
68 struct resource
*platform_get_mem_or_io(struct platform_device
*dev
,
73 for (i
= 0; i
< dev
->num_resources
; i
++) {
74 struct resource
*r
= &dev
->resource
[i
];
76 if ((resource_type(r
) & (IORESOURCE_MEM
|IORESOURCE_IO
)) && num
-- == 0)
81 EXPORT_SYMBOL_GPL(platform_get_mem_or_io
);
83 #ifdef CONFIG_HAS_IOMEM
85 * devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a
86 * platform device and get resource
88 * @pdev: platform device to use both for memory resource lookup as well as
90 * @index: resource index
91 * @res: optional output parameter to store a pointer to the obtained resource.
93 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
97 devm_platform_get_and_ioremap_resource(struct platform_device
*pdev
,
98 unsigned int index
, struct resource
**res
)
102 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, index
);
105 return devm_ioremap_resource(&pdev
->dev
, r
);
107 EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource
);
110 * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform
113 * @pdev: platform device to use both for memory resource lookup as well as
114 * resource management
115 * @index: resource index
117 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
120 void __iomem
*devm_platform_ioremap_resource(struct platform_device
*pdev
,
123 return devm_platform_get_and_ioremap_resource(pdev
, index
, NULL
);
125 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource
);
128 * devm_platform_ioremap_resource_wc - write-combined variant of
129 * devm_platform_ioremap_resource()
131 * @pdev: platform device to use both for memory resource lookup as well as
132 * resource management
133 * @index: resource index
135 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
138 void __iomem
*devm_platform_ioremap_resource_wc(struct platform_device
*pdev
,
141 struct resource
*res
;
143 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, index
);
144 return devm_ioremap_resource_wc(&pdev
->dev
, res
);
148 * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for
149 * a platform device, retrieve the
152 * @pdev: platform device to use both for memory resource lookup as well as
153 * resource management
154 * @name: name of the resource
156 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
160 devm_platform_ioremap_resource_byname(struct platform_device
*pdev
,
163 struct resource
*res
;
165 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, name
);
166 return devm_ioremap_resource(&pdev
->dev
, res
);
168 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname
);
169 #endif /* CONFIG_HAS_IOMEM */
172 * platform_get_irq_optional - get an optional IRQ for a device
173 * @dev: platform device
174 * @num: IRQ number index
176 * Gets an IRQ for a platform device. Device drivers should check the return
177 * value for errors so as to not pass a negative integer value to the
178 * request_irq() APIs. This is the same as platform_get_irq(), except that it
179 * does not print an error message if an IRQ can not be obtained.
183 * int irq = platform_get_irq_optional(pdev, 0);
187 * Return: non-zero IRQ number on success, negative error number on failure.
189 int platform_get_irq_optional(struct platform_device
*dev
, unsigned int num
)
193 /* sparc does not have irqs represented as IORESOURCE_IRQ resources */
194 if (!dev
|| num
>= dev
->archdata
.num_irqs
)
196 ret
= dev
->archdata
.irqs
[num
];
201 if (IS_ENABLED(CONFIG_OF_IRQ
) && dev
->dev
.of_node
) {
202 ret
= of_irq_get(dev
->dev
.of_node
, num
);
203 if (ret
> 0 || ret
== -EPROBE_DEFER
)
207 r
= platform_get_resource(dev
, IORESOURCE_IRQ
, num
);
208 if (has_acpi_companion(&dev
->dev
)) {
209 if (r
&& r
->flags
& IORESOURCE_DISABLED
) {
210 ret
= acpi_irq_get(ACPI_HANDLE(&dev
->dev
), num
, r
);
217 * The resources may pass trigger flags to the irqs that need
218 * to be set up. It so happens that the trigger flags for
219 * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
222 if (r
&& r
->flags
& IORESOURCE_BITS
) {
223 struct irq_data
*irqd
;
225 irqd
= irq_get_irq_data(r
->start
);
230 irqd_set_trigger_type(irqd
, r
->flags
& IORESOURCE_BITS
);
239 * For the index 0 interrupt, allow falling back to GpioInt
240 * resources. While a device could have both Interrupt and GpioInt
241 * resources, making this fallback ambiguous, in many common cases
242 * the device will only expose one IRQ, and this fallback
243 * allows a common code path across either kind of resource.
245 if (num
== 0 && has_acpi_companion(&dev
->dev
)) {
246 ret
= acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev
->dev
), num
);
247 /* Our callers expect -ENXIO for missing IRQs. */
248 if (ret
>= 0 || ret
== -EPROBE_DEFER
)
255 WARN(ret
== 0, "0 is an invalid IRQ number\n");
258 EXPORT_SYMBOL_GPL(platform_get_irq_optional
);
261 * platform_get_irq - get an IRQ for a device
262 * @dev: platform device
263 * @num: IRQ number index
265 * Gets an IRQ for a platform device and prints an error message if finding the
266 * IRQ fails. Device drivers should check the return value for errors so as to
267 * not pass a negative integer value to the request_irq() APIs.
271 * int irq = platform_get_irq(pdev, 0);
275 * Return: non-zero IRQ number on success, negative error number on failure.
277 int platform_get_irq(struct platform_device
*dev
, unsigned int num
)
281 ret
= platform_get_irq_optional(dev
, num
);
282 if (ret
< 0 && ret
!= -EPROBE_DEFER
)
283 dev_err(&dev
->dev
, "IRQ index %u not found\n", num
);
287 EXPORT_SYMBOL_GPL(platform_get_irq
);
290 * platform_irq_count - Count the number of IRQs a platform device uses
291 * @dev: platform device
293 * Return: Number of IRQs a platform device uses or EPROBE_DEFER
295 int platform_irq_count(struct platform_device
*dev
)
299 while ((ret
= platform_get_irq_optional(dev
, nr
)) >= 0)
302 if (ret
== -EPROBE_DEFER
)
307 EXPORT_SYMBOL_GPL(platform_irq_count
);
309 struct irq_affinity_devres
{
314 static void platform_disable_acpi_irq(struct platform_device
*pdev
, int index
)
318 r
= platform_get_resource(pdev
, IORESOURCE_IRQ
, index
);
320 irqresource_disabled(r
, 0);
323 static void devm_platform_get_irqs_affinity_release(struct device
*dev
,
326 struct irq_affinity_devres
*ptr
= res
;
329 for (i
= 0; i
< ptr
->count
; i
++) {
330 irq_dispose_mapping(ptr
->irq
[i
]);
332 if (has_acpi_companion(dev
))
333 platform_disable_acpi_irq(to_platform_device(dev
), i
);
338 * devm_platform_get_irqs_affinity - devm method to get a set of IRQs for a
339 * device using an interrupt affinity descriptor
340 * @dev: platform device pointer
341 * @affd: affinity descriptor
342 * @minvec: minimum count of interrupt vectors
343 * @maxvec: maximum count of interrupt vectors
344 * @irqs: pointer holder for IRQ numbers
346 * Gets a set of IRQs for a platform device, and updates IRQ afffinty according
347 * to the passed affinity descriptor
349 * Return: Number of vectors on success, negative error number on failure.
351 int devm_platform_get_irqs_affinity(struct platform_device
*dev
,
352 struct irq_affinity
*affd
,
357 struct irq_affinity_devres
*ptr
;
358 struct irq_affinity_desc
*desc
;
368 nvec
= platform_irq_count(dev
);
373 nvec
= irq_calc_affinity_vectors(minvec
, nvec
, affd
);
380 size
= sizeof(*ptr
) + sizeof(unsigned int) * nvec
;
381 ptr
= devres_alloc(devm_platform_get_irqs_affinity_release
, size
,
388 for (i
= 0; i
< nvec
; i
++) {
389 int irq
= platform_get_irq(dev
, i
);
392 goto err_free_devres
;
397 desc
= irq_create_affinity_masks(nvec
, affd
);
400 goto err_free_devres
;
403 for (i
= 0; i
< nvec
; i
++) {
404 ret
= irq_update_affinity_desc(ptr
->irq
[i
], &desc
[i
]);
406 dev_err(&dev
->dev
, "failed to update irq%d affinity descriptor (%d)\n",
412 devres_add(&dev
->dev
, ptr
);
426 EXPORT_SYMBOL_GPL(devm_platform_get_irqs_affinity
);
429 * platform_get_resource_byname - get a resource for a device by name
430 * @dev: platform device
431 * @type: resource type
432 * @name: resource name
434 struct resource
*platform_get_resource_byname(struct platform_device
*dev
,
440 for (i
= 0; i
< dev
->num_resources
; i
++) {
441 struct resource
*r
= &dev
->resource
[i
];
443 if (unlikely(!r
->name
))
446 if (type
== resource_type(r
) && !strcmp(r
->name
, name
))
451 EXPORT_SYMBOL_GPL(platform_get_resource_byname
);
453 static int __platform_get_irq_byname(struct platform_device
*dev
,
459 if (IS_ENABLED(CONFIG_OF_IRQ
) && dev
->dev
.of_node
) {
460 ret
= of_irq_get_byname(dev
->dev
.of_node
, name
);
461 if (ret
> 0 || ret
== -EPROBE_DEFER
)
465 r
= platform_get_resource_byname(dev
, IORESOURCE_IRQ
, name
);
467 WARN(r
->start
== 0, "0 is an invalid IRQ number\n");
475 * platform_get_irq_byname - get an IRQ for a device by name
476 * @dev: platform device
479 * Get an IRQ like platform_get_irq(), but then by name rather then by index.
481 * Return: non-zero IRQ number on success, negative error number on failure.
483 int platform_get_irq_byname(struct platform_device
*dev
, const char *name
)
487 ret
= __platform_get_irq_byname(dev
, name
);
488 if (ret
< 0 && ret
!= -EPROBE_DEFER
)
489 dev_err(&dev
->dev
, "IRQ %s not found\n", name
);
493 EXPORT_SYMBOL_GPL(platform_get_irq_byname
);
496 * platform_get_irq_byname_optional - get an optional IRQ for a device by name
497 * @dev: platform device
500 * Get an optional IRQ by name like platform_get_irq_byname(). Except that it
501 * does not print an error message if an IRQ can not be obtained.
503 * Return: non-zero IRQ number on success, negative error number on failure.
505 int platform_get_irq_byname_optional(struct platform_device
*dev
,
508 return __platform_get_irq_byname(dev
, name
);
510 EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional
);
513 * platform_add_devices - add a numbers of platform devices
514 * @devs: array of platform devices to add
515 * @num: number of platform devices in array
517 int platform_add_devices(struct platform_device
**devs
, int num
)
521 for (i
= 0; i
< num
; i
++) {
522 ret
= platform_device_register(devs
[i
]);
525 platform_device_unregister(devs
[i
]);
532 EXPORT_SYMBOL_GPL(platform_add_devices
);
534 struct platform_object
{
535 struct platform_device pdev
;
540 * Set up default DMA mask for platform devices if the they weren't
541 * previously set by the architecture / DT.
543 static void setup_pdev_dma_masks(struct platform_device
*pdev
)
545 pdev
->dev
.dma_parms
= &pdev
->dma_parms
;
547 if (!pdev
->dev
.coherent_dma_mask
)
548 pdev
->dev
.coherent_dma_mask
= DMA_BIT_MASK(32);
549 if (!pdev
->dev
.dma_mask
) {
550 pdev
->platform_dma_mask
= DMA_BIT_MASK(32);
551 pdev
->dev
.dma_mask
= &pdev
->platform_dma_mask
;
556 * platform_device_put - destroy a platform device
557 * @pdev: platform device to free
559 * Free all memory associated with a platform device. This function must
560 * _only_ be externally called in error cases. All other usage is a bug.
562 void platform_device_put(struct platform_device
*pdev
)
564 if (!IS_ERR_OR_NULL(pdev
))
565 put_device(&pdev
->dev
);
567 EXPORT_SYMBOL_GPL(platform_device_put
);
569 static void platform_device_release(struct device
*dev
)
571 struct platform_object
*pa
= container_of(dev
, struct platform_object
,
574 of_device_node_put(&pa
->pdev
.dev
);
575 kfree(pa
->pdev
.dev
.platform_data
);
576 kfree(pa
->pdev
.mfd_cell
);
577 kfree(pa
->pdev
.resource
);
578 kfree(pa
->pdev
.driver_override
);
583 * platform_device_alloc - create a platform device
584 * @name: base name of the device we're adding
587 * Create a platform device object which can have other objects attached
588 * to it, and which will have attached objects freed when it is released.
590 struct platform_device
*platform_device_alloc(const char *name
, int id
)
592 struct platform_object
*pa
;
594 pa
= kzalloc(sizeof(*pa
) + strlen(name
) + 1, GFP_KERNEL
);
596 strcpy(pa
->name
, name
);
597 pa
->pdev
.name
= pa
->name
;
599 device_initialize(&pa
->pdev
.dev
);
600 pa
->pdev
.dev
.release
= platform_device_release
;
601 setup_pdev_dma_masks(&pa
->pdev
);
604 return pa
? &pa
->pdev
: NULL
;
606 EXPORT_SYMBOL_GPL(platform_device_alloc
);
609 * platform_device_add_resources - add resources to a platform device
610 * @pdev: platform device allocated by platform_device_alloc to add resources to
611 * @res: set of resources that needs to be allocated for the device
612 * @num: number of resources
614 * Add a copy of the resources to the platform device. The memory
615 * associated with the resources will be freed when the platform device is
618 int platform_device_add_resources(struct platform_device
*pdev
,
619 const struct resource
*res
, unsigned int num
)
621 struct resource
*r
= NULL
;
624 r
= kmemdup(res
, sizeof(struct resource
) * num
, GFP_KERNEL
);
629 kfree(pdev
->resource
);
631 pdev
->num_resources
= num
;
634 EXPORT_SYMBOL_GPL(platform_device_add_resources
);
637 * platform_device_add_data - add platform-specific data to a platform device
638 * @pdev: platform device allocated by platform_device_alloc to add resources to
639 * @data: platform specific data for this platform device
640 * @size: size of platform specific data
642 * Add a copy of platform specific data to the platform device's
643 * platform_data pointer. The memory associated with the platform data
644 * will be freed when the platform device is released.
646 int platform_device_add_data(struct platform_device
*pdev
, const void *data
,
652 d
= kmemdup(data
, size
, GFP_KERNEL
);
657 kfree(pdev
->dev
.platform_data
);
658 pdev
->dev
.platform_data
= d
;
661 EXPORT_SYMBOL_GPL(platform_device_add_data
);
664 * platform_device_add_properties - add built-in properties to a platform device
665 * @pdev: platform device to add properties to
666 * @properties: null terminated array of properties to add
668 * The function will take deep copy of @properties and attach the copy to the
669 * platform device. The memory associated with properties will be freed when the
670 * platform device is released.
672 int platform_device_add_properties(struct platform_device
*pdev
,
673 const struct property_entry
*properties
)
675 return device_add_properties(&pdev
->dev
, properties
);
677 EXPORT_SYMBOL_GPL(platform_device_add_properties
);
680 * platform_device_add - add a platform device to device hierarchy
681 * @pdev: platform device we're adding
683 * This is part 2 of platform_device_register(), though may be called
684 * separately _iff_ pdev was allocated by platform_device_alloc().
686 int platform_device_add(struct platform_device
*pdev
)
694 if (!pdev
->dev
.parent
)
695 pdev
->dev
.parent
= &platform_bus
;
697 pdev
->dev
.bus
= &platform_bus_type
;
701 dev_set_name(&pdev
->dev
, "%s.%d", pdev
->name
, pdev
->id
);
703 case PLATFORM_DEVID_NONE
:
704 dev_set_name(&pdev
->dev
, "%s", pdev
->name
);
706 case PLATFORM_DEVID_AUTO
:
708 * Automatically allocated device ID. We mark it as such so
709 * that we remember it must be freed, and we append a suffix
710 * to avoid namespace collision with explicit IDs.
712 ret
= ida_alloc(&platform_devid_ida
, GFP_KERNEL
);
716 pdev
->id_auto
= true;
717 dev_set_name(&pdev
->dev
, "%s.%d.auto", pdev
->name
, pdev
->id
);
721 for (i
= 0; i
< pdev
->num_resources
; i
++) {
722 struct resource
*p
, *r
= &pdev
->resource
[i
];
725 r
->name
= dev_name(&pdev
->dev
);
729 if (resource_type(r
) == IORESOURCE_MEM
)
731 else if (resource_type(r
) == IORESOURCE_IO
)
732 p
= &ioport_resource
;
736 ret
= insert_resource(p
, r
);
738 dev_err(&pdev
->dev
, "failed to claim resource %d: %pR\n", i
, r
);
744 pr_debug("Registering platform device '%s'. Parent at %s\n",
745 dev_name(&pdev
->dev
), dev_name(pdev
->dev
.parent
));
747 ret
= device_add(&pdev
->dev
);
753 ida_free(&platform_devid_ida
, pdev
->id
);
754 pdev
->id
= PLATFORM_DEVID_AUTO
;
758 struct resource
*r
= &pdev
->resource
[i
];
766 EXPORT_SYMBOL_GPL(platform_device_add
);
769 * platform_device_del - remove a platform-level device
770 * @pdev: platform device we're removing
772 * Note that this function will also release all memory- and port-based
773 * resources owned by the device (@dev->resource). This function must
774 * _only_ be externally called in error cases. All other usage is a bug.
776 void platform_device_del(struct platform_device
*pdev
)
780 if (!IS_ERR_OR_NULL(pdev
)) {
781 device_del(&pdev
->dev
);
784 ida_free(&platform_devid_ida
, pdev
->id
);
785 pdev
->id
= PLATFORM_DEVID_AUTO
;
788 for (i
= 0; i
< pdev
->num_resources
; i
++) {
789 struct resource
*r
= &pdev
->resource
[i
];
795 EXPORT_SYMBOL_GPL(platform_device_del
);
798 * platform_device_register - add a platform-level device
799 * @pdev: platform device we're adding
801 int platform_device_register(struct platform_device
*pdev
)
803 device_initialize(&pdev
->dev
);
804 setup_pdev_dma_masks(pdev
);
805 return platform_device_add(pdev
);
807 EXPORT_SYMBOL_GPL(platform_device_register
);
810 * platform_device_unregister - unregister a platform-level device
811 * @pdev: platform device we're unregistering
813 * Unregistration is done in 2 steps. First we release all resources
814 * and remove it from the subsystem, then we drop reference count by
815 * calling platform_device_put().
817 void platform_device_unregister(struct platform_device
*pdev
)
819 platform_device_del(pdev
);
820 platform_device_put(pdev
);
822 EXPORT_SYMBOL_GPL(platform_device_unregister
);
825 * platform_device_register_full - add a platform-level device with
826 * resources and platform-specific data
828 * @pdevinfo: data used to create device
830 * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
832 struct platform_device
*platform_device_register_full(
833 const struct platform_device_info
*pdevinfo
)
836 struct platform_device
*pdev
;
838 pdev
= platform_device_alloc(pdevinfo
->name
, pdevinfo
->id
);
840 return ERR_PTR(-ENOMEM
);
842 pdev
->dev
.parent
= pdevinfo
->parent
;
843 pdev
->dev
.fwnode
= pdevinfo
->fwnode
;
844 pdev
->dev
.of_node
= of_node_get(to_of_node(pdev
->dev
.fwnode
));
845 pdev
->dev
.of_node_reused
= pdevinfo
->of_node_reused
;
847 if (pdevinfo
->dma_mask
) {
848 pdev
->platform_dma_mask
= pdevinfo
->dma_mask
;
849 pdev
->dev
.dma_mask
= &pdev
->platform_dma_mask
;
850 pdev
->dev
.coherent_dma_mask
= pdevinfo
->dma_mask
;
853 ret
= platform_device_add_resources(pdev
,
854 pdevinfo
->res
, pdevinfo
->num_res
);
858 ret
= platform_device_add_data(pdev
,
859 pdevinfo
->data
, pdevinfo
->size_data
);
863 if (pdevinfo
->properties
) {
864 ret
= platform_device_add_properties(pdev
,
865 pdevinfo
->properties
);
870 ret
= platform_device_add(pdev
);
873 ACPI_COMPANION_SET(&pdev
->dev
, NULL
);
874 platform_device_put(pdev
);
880 EXPORT_SYMBOL_GPL(platform_device_register_full
);
883 * __platform_driver_register - register a driver for platform-level devices
884 * @drv: platform driver structure
885 * @owner: owning module/driver
887 int __platform_driver_register(struct platform_driver
*drv
,
888 struct module
*owner
)
890 drv
->driver
.owner
= owner
;
891 drv
->driver
.bus
= &platform_bus_type
;
893 return driver_register(&drv
->driver
);
895 EXPORT_SYMBOL_GPL(__platform_driver_register
);
898 * platform_driver_unregister - unregister a driver for platform-level devices
899 * @drv: platform driver structure
901 void platform_driver_unregister(struct platform_driver
*drv
)
903 driver_unregister(&drv
->driver
);
905 EXPORT_SYMBOL_GPL(platform_driver_unregister
);
907 static int platform_probe_fail(struct platform_device
*pdev
)
913 * __platform_driver_probe - register driver for non-hotpluggable device
914 * @drv: platform driver structure
915 * @probe: the driver probe routine, probably from an __init section
916 * @module: module which will be the owner of the driver
918 * Use this instead of platform_driver_register() when you know the device
919 * is not hotpluggable and has already been registered, and you want to
920 * remove its run-once probe() infrastructure from memory after the driver
921 * has bound to the device.
923 * One typical use for this would be with drivers for controllers integrated
924 * into system-on-chip processors, where the controller devices have been
925 * configured as part of board setup.
927 * Note that this is incompatible with deferred probing.
929 * Returns zero if the driver registered and bound to a device, else returns
930 * a negative error code and with the driver not registered.
932 int __init_or_module
__platform_driver_probe(struct platform_driver
*drv
,
933 int (*probe
)(struct platform_device
*), struct module
*module
)
937 if (drv
->driver
.probe_type
== PROBE_PREFER_ASYNCHRONOUS
) {
938 pr_err("%s: drivers registered with %s can not be probed asynchronously\n",
939 drv
->driver
.name
, __func__
);
944 * We have to run our probes synchronously because we check if
945 * we find any devices to bind to and exit with error if there
948 drv
->driver
.probe_type
= PROBE_FORCE_SYNCHRONOUS
;
951 * Prevent driver from requesting probe deferral to avoid further
952 * futile probe attempts.
954 drv
->prevent_deferred_probe
= true;
956 /* make sure driver won't have bind/unbind attributes */
957 drv
->driver
.suppress_bind_attrs
= true;
959 /* temporary section violation during probe() */
961 retval
= code
= __platform_driver_register(drv
, module
);
966 * Fixup that section violation, being paranoid about code scanning
967 * the list of drivers in order to probe new devices. Check to see
968 * if the probe was successful, and make sure any forced probes of
971 spin_lock(&drv
->driver
.bus
->p
->klist_drivers
.k_lock
);
972 drv
->probe
= platform_probe_fail
;
973 if (code
== 0 && list_empty(&drv
->driver
.p
->klist_devices
.k_list
))
975 spin_unlock(&drv
->driver
.bus
->p
->klist_drivers
.k_lock
);
978 platform_driver_unregister(drv
);
981 EXPORT_SYMBOL_GPL(__platform_driver_probe
);
984 * __platform_create_bundle - register driver and create corresponding device
985 * @driver: platform driver structure
986 * @probe: the driver probe routine, probably from an __init section
987 * @res: set of resources that needs to be allocated for the device
988 * @n_res: number of resources
989 * @data: platform specific data for this platform device
990 * @size: size of platform specific data
991 * @module: module which will be the owner of the driver
993 * Use this in legacy-style modules that probe hardware directly and
994 * register a single platform device and corresponding platform driver.
996 * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
998 struct platform_device
* __init_or_module
__platform_create_bundle(
999 struct platform_driver
*driver
,
1000 int (*probe
)(struct platform_device
*),
1001 struct resource
*res
, unsigned int n_res
,
1002 const void *data
, size_t size
, struct module
*module
)
1004 struct platform_device
*pdev
;
1007 pdev
= platform_device_alloc(driver
->driver
.name
, -1);
1013 error
= platform_device_add_resources(pdev
, res
, n_res
);
1017 error
= platform_device_add_data(pdev
, data
, size
);
1021 error
= platform_device_add(pdev
);
1025 error
= __platform_driver_probe(driver
, probe
, module
);
1032 platform_device_del(pdev
);
1034 platform_device_put(pdev
);
1036 return ERR_PTR(error
);
1038 EXPORT_SYMBOL_GPL(__platform_create_bundle
);
1041 * __platform_register_drivers - register an array of platform drivers
1042 * @drivers: an array of drivers to register
1043 * @count: the number of drivers to register
1044 * @owner: module owning the drivers
1046 * Registers platform drivers specified by an array. On failure to register a
1047 * driver, all previously registered drivers will be unregistered. Callers of
1048 * this API should use platform_unregister_drivers() to unregister drivers in
1049 * the reverse order.
1051 * Returns: 0 on success or a negative error code on failure.
1053 int __platform_register_drivers(struct platform_driver
* const *drivers
,
1054 unsigned int count
, struct module
*owner
)
1059 for (i
= 0; i
< count
; i
++) {
1060 pr_debug("registering platform driver %ps\n", drivers
[i
]);
1062 err
= __platform_driver_register(drivers
[i
], owner
);
1064 pr_err("failed to register platform driver %ps: %d\n",
1074 pr_debug("unregistering platform driver %ps\n", drivers
[i
]);
1075 platform_driver_unregister(drivers
[i
]);
1080 EXPORT_SYMBOL_GPL(__platform_register_drivers
);
1083 * platform_unregister_drivers - unregister an array of platform drivers
1084 * @drivers: an array of drivers to unregister
1085 * @count: the number of drivers to unregister
1087 * Unregisters platform drivers specified by an array. This is typically used
1088 * to complement an earlier call to platform_register_drivers(). Drivers are
1089 * unregistered in the reverse order in which they were registered.
1091 void platform_unregister_drivers(struct platform_driver
* const *drivers
,
1095 pr_debug("unregistering platform driver %ps\n", drivers
[count
]);
1096 platform_driver_unregister(drivers
[count
]);
1099 EXPORT_SYMBOL_GPL(platform_unregister_drivers
);
1101 static const struct platform_device_id
*platform_match_id(
1102 const struct platform_device_id
*id
,
1103 struct platform_device
*pdev
)
1105 while (id
->name
[0]) {
1106 if (strcmp(pdev
->name
, id
->name
) == 0) {
1107 pdev
->id_entry
= id
;
1115 #ifdef CONFIG_PM_SLEEP
1117 static int platform_legacy_suspend(struct device
*dev
, pm_message_t mesg
)
1119 struct platform_driver
*pdrv
= to_platform_driver(dev
->driver
);
1120 struct platform_device
*pdev
= to_platform_device(dev
);
1123 if (dev
->driver
&& pdrv
->suspend
)
1124 ret
= pdrv
->suspend(pdev
, mesg
);
1129 static int platform_legacy_resume(struct device
*dev
)
1131 struct platform_driver
*pdrv
= to_platform_driver(dev
->driver
);
1132 struct platform_device
*pdev
= to_platform_device(dev
);
1135 if (dev
->driver
&& pdrv
->resume
)
1136 ret
= pdrv
->resume(pdev
);
1141 #endif /* CONFIG_PM_SLEEP */
1143 #ifdef CONFIG_SUSPEND
1145 int platform_pm_suspend(struct device
*dev
)
1147 struct device_driver
*drv
= dev
->driver
;
1154 if (drv
->pm
->suspend
)
1155 ret
= drv
->pm
->suspend(dev
);
1157 ret
= platform_legacy_suspend(dev
, PMSG_SUSPEND
);
1163 int platform_pm_resume(struct device
*dev
)
1165 struct device_driver
*drv
= dev
->driver
;
1172 if (drv
->pm
->resume
)
1173 ret
= drv
->pm
->resume(dev
);
1175 ret
= platform_legacy_resume(dev
);
1181 #endif /* CONFIG_SUSPEND */
1183 #ifdef CONFIG_HIBERNATE_CALLBACKS
1185 int platform_pm_freeze(struct device
*dev
)
1187 struct device_driver
*drv
= dev
->driver
;
1194 if (drv
->pm
->freeze
)
1195 ret
= drv
->pm
->freeze(dev
);
1197 ret
= platform_legacy_suspend(dev
, PMSG_FREEZE
);
1203 int platform_pm_thaw(struct device
*dev
)
1205 struct device_driver
*drv
= dev
->driver
;
1213 ret
= drv
->pm
->thaw(dev
);
1215 ret
= platform_legacy_resume(dev
);
1221 int platform_pm_poweroff(struct device
*dev
)
1223 struct device_driver
*drv
= dev
->driver
;
1230 if (drv
->pm
->poweroff
)
1231 ret
= drv
->pm
->poweroff(dev
);
1233 ret
= platform_legacy_suspend(dev
, PMSG_HIBERNATE
);
1239 int platform_pm_restore(struct device
*dev
)
1241 struct device_driver
*drv
= dev
->driver
;
1248 if (drv
->pm
->restore
)
1249 ret
= drv
->pm
->restore(dev
);
1251 ret
= platform_legacy_resume(dev
);
1257 #endif /* CONFIG_HIBERNATE_CALLBACKS */
1259 /* modalias support enables more hands-off userspace setup:
1260 * (a) environment variable lets new-style hotplug events work once system is
1261 * fully running: "modprobe $MODALIAS"
1262 * (b) sysfs attribute lets new-style coldplug recover from hotplug events
1263 * mishandled before system is fully running: "modprobe $(cat modalias)"
1265 static ssize_t
modalias_show(struct device
*dev
,
1266 struct device_attribute
*attr
, char *buf
)
1268 struct platform_device
*pdev
= to_platform_device(dev
);
1271 len
= of_device_modalias(dev
, buf
, PAGE_SIZE
);
1275 len
= acpi_device_modalias(dev
, buf
, PAGE_SIZE
- 1);
1279 return sysfs_emit(buf
, "platform:%s\n", pdev
->name
);
1281 static DEVICE_ATTR_RO(modalias
);
1283 static ssize_t
numa_node_show(struct device
*dev
,
1284 struct device_attribute
*attr
, char *buf
)
1286 return sysfs_emit(buf
, "%d\n", dev_to_node(dev
));
1288 static DEVICE_ATTR_RO(numa_node
);
1290 static ssize_t
driver_override_show(struct device
*dev
,
1291 struct device_attribute
*attr
, char *buf
)
1293 struct platform_device
*pdev
= to_platform_device(dev
);
1297 len
= sysfs_emit(buf
, "%s\n", pdev
->driver_override
);
1303 static ssize_t
driver_override_store(struct device
*dev
,
1304 struct device_attribute
*attr
,
1305 const char *buf
, size_t count
)
1307 struct platform_device
*pdev
= to_platform_device(dev
);
1308 char *driver_override
, *old
, *cp
;
1310 /* We need to keep extra room for a newline */
1311 if (count
>= (PAGE_SIZE
- 1))
1314 driver_override
= kstrndup(buf
, count
, GFP_KERNEL
);
1315 if (!driver_override
)
1318 cp
= strchr(driver_override
, '\n');
1323 old
= pdev
->driver_override
;
1324 if (strlen(driver_override
)) {
1325 pdev
->driver_override
= driver_override
;
1327 kfree(driver_override
);
1328 pdev
->driver_override
= NULL
;
1336 static DEVICE_ATTR_RW(driver_override
);
1338 static struct attribute
*platform_dev_attrs
[] = {
1339 &dev_attr_modalias
.attr
,
1340 &dev_attr_numa_node
.attr
,
1341 &dev_attr_driver_override
.attr
,
1345 static umode_t
platform_dev_attrs_visible(struct kobject
*kobj
, struct attribute
*a
,
1348 struct device
*dev
= container_of(kobj
, typeof(*dev
), kobj
);
1350 if (a
== &dev_attr_numa_node
.attr
&&
1351 dev_to_node(dev
) == NUMA_NO_NODE
)
1357 static struct attribute_group platform_dev_group
= {
1358 .attrs
= platform_dev_attrs
,
1359 .is_visible
= platform_dev_attrs_visible
,
1361 __ATTRIBUTE_GROUPS(platform_dev
);
1365 * platform_match - bind platform device to platform driver.
1369 * Platform device IDs are assumed to be encoded like this:
1370 * "<name><instance>", where <name> is a short description of the type of
1371 * device, like "pci" or "floppy", and <instance> is the enumerated
1372 * instance of the device, like '0' or '42'. Driver IDs are simply
1373 * "<name>". So, extract the <name> from the platform_device structure,
1374 * and compare it against the name of the driver. Return whether they match
1377 static int platform_match(struct device
*dev
, struct device_driver
*drv
)
1379 struct platform_device
*pdev
= to_platform_device(dev
);
1380 struct platform_driver
*pdrv
= to_platform_driver(drv
);
1382 /* When driver_override is set, only bind to the matching driver */
1383 if (pdev
->driver_override
)
1384 return !strcmp(pdev
->driver_override
, drv
->name
);
1386 /* Attempt an OF style match first */
1387 if (of_driver_match_device(dev
, drv
))
1390 /* Then try ACPI style match */
1391 if (acpi_driver_match_device(dev
, drv
))
1394 /* Then try to match against the id table */
1396 return platform_match_id(pdrv
->id_table
, pdev
) != NULL
;
1398 /* fall-back to driver name match */
1399 return (strcmp(pdev
->name
, drv
->name
) == 0);
1402 static int platform_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
1404 struct platform_device
*pdev
= to_platform_device(dev
);
1407 /* Some devices have extra OF data and an OF-style MODALIAS */
1408 rc
= of_device_uevent_modalias(dev
, env
);
1412 rc
= acpi_device_uevent_modalias(dev
, env
);
1416 add_uevent_var(env
, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX
,
1421 static int platform_probe(struct device
*_dev
)
1423 struct platform_driver
*drv
= to_platform_driver(_dev
->driver
);
1424 struct platform_device
*dev
= to_platform_device(_dev
);
1428 * A driver registered using platform_driver_probe() cannot be bound
1429 * again later because the probe function usually lives in __init code
1430 * and so is gone. For these drivers .probe is set to
1431 * platform_probe_fail in __platform_driver_probe(). Don't even prepare
1432 * clocks and PM domains for these to match the traditional behaviour.
1434 if (unlikely(drv
->probe
== platform_probe_fail
))
1437 ret
= of_clk_set_defaults(_dev
->of_node
, false);
1441 ret
= dev_pm_domain_attach(_dev
, true);
1446 ret
= drv
->probe(dev
);
1448 dev_pm_domain_detach(_dev
, true);
1452 if (drv
->prevent_deferred_probe
&& ret
== -EPROBE_DEFER
) {
1453 dev_warn(_dev
, "probe deferral not supported\n");
1460 static int platform_remove(struct device
*_dev
)
1462 struct platform_driver
*drv
= to_platform_driver(_dev
->driver
);
1463 struct platform_device
*dev
= to_platform_device(_dev
);
1467 ret
= drv
->remove(dev
);
1468 dev_pm_domain_detach(_dev
, true);
1473 static void platform_shutdown(struct device
*_dev
)
1475 struct platform_device
*dev
= to_platform_device(_dev
);
1476 struct platform_driver
*drv
;
1481 drv
= to_platform_driver(_dev
->driver
);
1487 int platform_dma_configure(struct device
*dev
)
1489 enum dev_dma_attr attr
;
1493 ret
= of_dma_configure(dev
, dev
->of_node
, true);
1494 } else if (has_acpi_companion(dev
)) {
1495 attr
= acpi_get_dma_attr(to_acpi_device_node(dev
->fwnode
));
1496 ret
= acpi_dma_configure(dev
, attr
);
1502 static const struct dev_pm_ops platform_dev_pm_ops
= {
1503 .runtime_suspend
= pm_generic_runtime_suspend
,
1504 .runtime_resume
= pm_generic_runtime_resume
,
1505 USE_PLATFORM_PM_SLEEP_OPS
1508 struct bus_type platform_bus_type
= {
1510 .dev_groups
= platform_dev_groups
,
1511 .match
= platform_match
,
1512 .uevent
= platform_uevent
,
1513 .probe
= platform_probe
,
1514 .remove
= platform_remove
,
1515 .shutdown
= platform_shutdown
,
1516 .dma_configure
= platform_dma_configure
,
1517 .pm
= &platform_dev_pm_ops
,
1519 EXPORT_SYMBOL_GPL(platform_bus_type
);
1521 static inline int __platform_match(struct device
*dev
, const void *drv
)
1523 return platform_match(dev
, (struct device_driver
*)drv
);
1527 * platform_find_device_by_driver - Find a platform device with a given
1529 * @start: The device to start the search from.
1530 * @drv: The device driver to look for.
1532 struct device
*platform_find_device_by_driver(struct device
*start
,
1533 const struct device_driver
*drv
)
1535 return bus_find_device(&platform_bus_type
, start
, drv
,
1538 EXPORT_SYMBOL_GPL(platform_find_device_by_driver
);
1540 void __weak __init
early_platform_cleanup(void) { }
1542 int __init
platform_bus_init(void)
1546 early_platform_cleanup();
1548 error
= device_register(&platform_bus
);
1550 put_device(&platform_bus
);
1553 error
= bus_register(&platform_bus_type
);
1555 device_unregister(&platform_bus
);
1556 of_platform_register_reconfig_notifier();