1 // SPDX-License-Identifier: GPL-2.0
3 * platform.c - platform 'pseudo' bus for legacy devices
5 * Copyright (c) 2002-3 Patrick Mochel
6 * Copyright (c) 2002-3 Open Source Development Labs
8 * Please see Documentation/driver-api/driver-model/platform.rst for more
12 #include <linux/string.h>
13 #include <linux/platform_device.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/memblock.h>
20 #include <linux/err.h>
21 #include <linux/slab.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/pm_domain.h>
24 #include <linux/idr.h>
25 #include <linux/acpi.h>
26 #include <linux/clk/clk-conf.h>
27 #include <linux/limits.h>
28 #include <linux/property.h>
29 #include <linux/kmemleak.h>
30 #include <linux/types.h>
33 #include "power/power.h"
35 /* For automatically allocated device IDs */
36 static DEFINE_IDA(platform_devid_ida
);
38 struct device platform_bus
= {
39 .init_name
= "platform",
41 EXPORT_SYMBOL_GPL(platform_bus
);
44 * platform_get_resource - get a resource for a device
45 * @dev: platform device
46 * @type: resource type
47 * @num: resource index
49 struct resource
*platform_get_resource(struct platform_device
*dev
,
50 unsigned int type
, unsigned int num
)
54 for (i
= 0; i
< dev
->num_resources
; i
++) {
55 struct resource
*r
= &dev
->resource
[i
];
57 if (type
== resource_type(r
) && num
-- == 0)
62 EXPORT_SYMBOL_GPL(platform_get_resource
);
64 #ifdef CONFIG_HAS_IOMEM
66 * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform
69 * @pdev: platform device to use both for memory resource lookup as well as
71 * @index: resource index
73 void __iomem
*devm_platform_ioremap_resource(struct platform_device
*pdev
,
78 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, index
);
79 return devm_ioremap_resource(&pdev
->dev
, res
);
81 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource
);
84 * devm_platform_ioremap_resource_wc - write-combined variant of
85 * devm_platform_ioremap_resource()
87 * @pdev: platform device to use both for memory resource lookup as well as
89 * @index: resource index
91 void __iomem
*devm_platform_ioremap_resource_wc(struct platform_device
*pdev
,
96 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, index
);
97 return devm_ioremap_resource_wc(&pdev
->dev
, res
);
101 * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for
102 * a platform device, retrieve the
105 * @pdev: platform device to use both for memory resource lookup as well as
106 * resource management
107 * @name: name of the resource
110 devm_platform_ioremap_resource_byname(struct platform_device
*pdev
,
113 struct resource
*res
;
115 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, name
);
116 return devm_ioremap_resource(&pdev
->dev
, res
);
118 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname
);
119 #endif /* CONFIG_HAS_IOMEM */
122 * platform_get_irq_optional - get an optional IRQ for a device
123 * @dev: platform device
124 * @num: IRQ number index
126 * Gets an IRQ for a platform device. Device drivers should check the return
127 * value for errors so as to not pass a negative integer value to the
128 * request_irq() APIs. This is the same as platform_get_irq(), except that it
129 * does not print an error message if an IRQ can not be obtained.
132 * int irq = platform_get_irq_optional(pdev, 0);
136 * Return: IRQ number on success, negative error number on failure.
138 int platform_get_irq_optional(struct platform_device
*dev
, unsigned int num
)
141 /* sparc does not have irqs represented as IORESOURCE_IRQ resources */
142 if (!dev
|| num
>= dev
->archdata
.num_irqs
)
144 return dev
->archdata
.irqs
[num
];
149 if (IS_ENABLED(CONFIG_OF_IRQ
) && dev
->dev
.of_node
) {
150 ret
= of_irq_get(dev
->dev
.of_node
, num
);
151 if (ret
> 0 || ret
== -EPROBE_DEFER
)
155 r
= platform_get_resource(dev
, IORESOURCE_IRQ
, num
);
156 if (has_acpi_companion(&dev
->dev
)) {
157 if (r
&& r
->flags
& IORESOURCE_DISABLED
) {
158 ret
= acpi_irq_get(ACPI_HANDLE(&dev
->dev
), num
, r
);
165 * The resources may pass trigger flags to the irqs that need
166 * to be set up. It so happens that the trigger flags for
167 * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
170 if (r
&& r
->flags
& IORESOURCE_BITS
) {
171 struct irq_data
*irqd
;
173 irqd
= irq_get_irq_data(r
->start
);
176 irqd_set_trigger_type(irqd
, r
->flags
& IORESOURCE_BITS
);
183 * For the index 0 interrupt, allow falling back to GpioInt
184 * resources. While a device could have both Interrupt and GpioInt
185 * resources, making this fallback ambiguous, in many common cases
186 * the device will only expose one IRQ, and this fallback
187 * allows a common code path across either kind of resource.
189 if (num
== 0 && has_acpi_companion(&dev
->dev
)) {
190 ret
= acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev
->dev
), num
);
191 /* Our callers expect -ENXIO for missing IRQs. */
192 if (ret
>= 0 || ret
== -EPROBE_DEFER
)
199 EXPORT_SYMBOL_GPL(platform_get_irq_optional
);
202 * platform_get_irq - get an IRQ for a device
203 * @dev: platform device
204 * @num: IRQ number index
206 * Gets an IRQ for a platform device and prints an error message if finding the
207 * IRQ fails. Device drivers should check the return value for errors so as to
208 * not pass a negative integer value to the request_irq() APIs.
211 * int irq = platform_get_irq(pdev, 0);
215 * Return: IRQ number on success, negative error number on failure.
217 int platform_get_irq(struct platform_device
*dev
, unsigned int num
)
221 ret
= platform_get_irq_optional(dev
, num
);
222 if (ret
< 0 && ret
!= -EPROBE_DEFER
)
223 dev_err(&dev
->dev
, "IRQ index %u not found\n", num
);
227 EXPORT_SYMBOL_GPL(platform_get_irq
);
230 * platform_irq_count - Count the number of IRQs a platform device uses
231 * @dev: platform device
233 * Return: Number of IRQs a platform device uses or EPROBE_DEFER
235 int platform_irq_count(struct platform_device
*dev
)
239 while ((ret
= platform_get_irq_optional(dev
, nr
)) >= 0)
242 if (ret
== -EPROBE_DEFER
)
247 EXPORT_SYMBOL_GPL(platform_irq_count
);
250 * platform_get_resource_byname - get a resource for a device by name
251 * @dev: platform device
252 * @type: resource type
253 * @name: resource name
255 struct resource
*platform_get_resource_byname(struct platform_device
*dev
,
261 for (i
= 0; i
< dev
->num_resources
; i
++) {
262 struct resource
*r
= &dev
->resource
[i
];
264 if (unlikely(!r
->name
))
267 if (type
== resource_type(r
) && !strcmp(r
->name
, name
))
272 EXPORT_SYMBOL_GPL(platform_get_resource_byname
);
274 static int __platform_get_irq_byname(struct platform_device
*dev
,
280 if (IS_ENABLED(CONFIG_OF_IRQ
) && dev
->dev
.of_node
) {
281 ret
= of_irq_get_byname(dev
->dev
.of_node
, name
);
282 if (ret
> 0 || ret
== -EPROBE_DEFER
)
286 r
= platform_get_resource_byname(dev
, IORESOURCE_IRQ
, name
);
294 * platform_get_irq_byname - get an IRQ for a device by name
295 * @dev: platform device
298 * Get an IRQ like platform_get_irq(), but then by name rather then by index.
300 * Return: IRQ number on success, negative error number on failure.
302 int platform_get_irq_byname(struct platform_device
*dev
, const char *name
)
306 ret
= __platform_get_irq_byname(dev
, name
);
307 if (ret
< 0 && ret
!= -EPROBE_DEFER
)
308 dev_err(&dev
->dev
, "IRQ %s not found\n", name
);
312 EXPORT_SYMBOL_GPL(platform_get_irq_byname
);
315 * platform_get_irq_byname_optional - get an optional IRQ for a device by name
316 * @dev: platform device
319 * Get an optional IRQ by name like platform_get_irq_byname(). Except that it
320 * does not print an error message if an IRQ can not be obtained.
322 * Return: IRQ number on success, negative error number on failure.
324 int platform_get_irq_byname_optional(struct platform_device
*dev
,
327 return __platform_get_irq_byname(dev
, name
);
329 EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional
);
332 * platform_add_devices - add a numbers of platform devices
333 * @devs: array of platform devices to add
334 * @num: number of platform devices in array
336 int platform_add_devices(struct platform_device
**devs
, int num
)
340 for (i
= 0; i
< num
; i
++) {
341 ret
= platform_device_register(devs
[i
]);
344 platform_device_unregister(devs
[i
]);
351 EXPORT_SYMBOL_GPL(platform_add_devices
);
353 struct platform_object
{
354 struct platform_device pdev
;
359 * Set up default DMA mask for platform devices if the they weren't
360 * previously set by the architecture / DT.
362 static void setup_pdev_dma_masks(struct platform_device
*pdev
)
364 if (!pdev
->dev
.coherent_dma_mask
)
365 pdev
->dev
.coherent_dma_mask
= DMA_BIT_MASK(32);
367 pdev
->dma_mask
= DMA_BIT_MASK(32);
368 if (!pdev
->dev
.dma_mask
)
369 pdev
->dev
.dma_mask
= &pdev
->dma_mask
;
373 * platform_device_put - destroy a platform device
374 * @pdev: platform device to free
376 * Free all memory associated with a platform device. This function must
377 * _only_ be externally called in error cases. All other usage is a bug.
379 void platform_device_put(struct platform_device
*pdev
)
381 if (!IS_ERR_OR_NULL(pdev
))
382 put_device(&pdev
->dev
);
384 EXPORT_SYMBOL_GPL(platform_device_put
);
386 static void platform_device_release(struct device
*dev
)
388 struct platform_object
*pa
= container_of(dev
, struct platform_object
,
391 of_device_node_put(&pa
->pdev
.dev
);
392 kfree(pa
->pdev
.dev
.platform_data
);
393 kfree(pa
->pdev
.mfd_cell
);
394 kfree(pa
->pdev
.resource
);
395 kfree(pa
->pdev
.driver_override
);
400 * platform_device_alloc - create a platform device
401 * @name: base name of the device we're adding
404 * Create a platform device object which can have other objects attached
405 * to it, and which will have attached objects freed when it is released.
407 struct platform_device
*platform_device_alloc(const char *name
, int id
)
409 struct platform_object
*pa
;
411 pa
= kzalloc(sizeof(*pa
) + strlen(name
) + 1, GFP_KERNEL
);
413 strcpy(pa
->name
, name
);
414 pa
->pdev
.name
= pa
->name
;
416 device_initialize(&pa
->pdev
.dev
);
417 pa
->pdev
.dev
.release
= platform_device_release
;
418 setup_pdev_dma_masks(&pa
->pdev
);
421 return pa
? &pa
->pdev
: NULL
;
423 EXPORT_SYMBOL_GPL(platform_device_alloc
);
426 * platform_device_add_resources - add resources to a platform device
427 * @pdev: platform device allocated by platform_device_alloc to add resources to
428 * @res: set of resources that needs to be allocated for the device
429 * @num: number of resources
431 * Add a copy of the resources to the platform device. The memory
432 * associated with the resources will be freed when the platform device is
435 int platform_device_add_resources(struct platform_device
*pdev
,
436 const struct resource
*res
, unsigned int num
)
438 struct resource
*r
= NULL
;
441 r
= kmemdup(res
, sizeof(struct resource
) * num
, GFP_KERNEL
);
446 kfree(pdev
->resource
);
448 pdev
->num_resources
= num
;
451 EXPORT_SYMBOL_GPL(platform_device_add_resources
);
454 * platform_device_add_data - add platform-specific data to a platform device
455 * @pdev: platform device allocated by platform_device_alloc to add resources to
456 * @data: platform specific data for this platform device
457 * @size: size of platform specific data
459 * Add a copy of platform specific data to the platform device's
460 * platform_data pointer. The memory associated with the platform data
461 * will be freed when the platform device is released.
463 int platform_device_add_data(struct platform_device
*pdev
, const void *data
,
469 d
= kmemdup(data
, size
, GFP_KERNEL
);
474 kfree(pdev
->dev
.platform_data
);
475 pdev
->dev
.platform_data
= d
;
478 EXPORT_SYMBOL_GPL(platform_device_add_data
);
481 * platform_device_add_properties - add built-in properties to a platform device
482 * @pdev: platform device to add properties to
483 * @properties: null terminated array of properties to add
485 * The function will take deep copy of @properties and attach the copy to the
486 * platform device. The memory associated with properties will be freed when the
487 * platform device is released.
489 int platform_device_add_properties(struct platform_device
*pdev
,
490 const struct property_entry
*properties
)
492 return device_add_properties(&pdev
->dev
, properties
);
494 EXPORT_SYMBOL_GPL(platform_device_add_properties
);
497 * platform_device_add - add a platform device to device hierarchy
498 * @pdev: platform device we're adding
500 * This is part 2 of platform_device_register(), though may be called
501 * separately _iff_ pdev was allocated by platform_device_alloc().
503 int platform_device_add(struct platform_device
*pdev
)
511 if (!pdev
->dev
.parent
)
512 pdev
->dev
.parent
= &platform_bus
;
514 pdev
->dev
.bus
= &platform_bus_type
;
518 dev_set_name(&pdev
->dev
, "%s.%d", pdev
->name
, pdev
->id
);
520 case PLATFORM_DEVID_NONE
:
521 dev_set_name(&pdev
->dev
, "%s", pdev
->name
);
523 case PLATFORM_DEVID_AUTO
:
525 * Automatically allocated device ID. We mark it as such so
526 * that we remember it must be freed, and we append a suffix
527 * to avoid namespace collision with explicit IDs.
529 ret
= ida_simple_get(&platform_devid_ida
, 0, 0, GFP_KERNEL
);
533 pdev
->id_auto
= true;
534 dev_set_name(&pdev
->dev
, "%s.%d.auto", pdev
->name
, pdev
->id
);
538 for (i
= 0; i
< pdev
->num_resources
; i
++) {
539 struct resource
*p
, *r
= &pdev
->resource
[i
];
542 r
->name
= dev_name(&pdev
->dev
);
546 if (resource_type(r
) == IORESOURCE_MEM
)
548 else if (resource_type(r
) == IORESOURCE_IO
)
549 p
= &ioport_resource
;
553 ret
= insert_resource(p
, r
);
555 dev_err(&pdev
->dev
, "failed to claim resource %d: %pR\n", i
, r
);
561 pr_debug("Registering platform device '%s'. Parent at %s\n",
562 dev_name(&pdev
->dev
), dev_name(pdev
->dev
.parent
));
564 ret
= device_add(&pdev
->dev
);
570 ida_simple_remove(&platform_devid_ida
, pdev
->id
);
571 pdev
->id
= PLATFORM_DEVID_AUTO
;
575 struct resource
*r
= &pdev
->resource
[i
];
583 EXPORT_SYMBOL_GPL(platform_device_add
);
586 * platform_device_del - remove a platform-level device
587 * @pdev: platform device we're removing
589 * Note that this function will also release all memory- and port-based
590 * resources owned by the device (@dev->resource). This function must
591 * _only_ be externally called in error cases. All other usage is a bug.
593 void platform_device_del(struct platform_device
*pdev
)
597 if (!IS_ERR_OR_NULL(pdev
)) {
598 device_del(&pdev
->dev
);
601 ida_simple_remove(&platform_devid_ida
, pdev
->id
);
602 pdev
->id
= PLATFORM_DEVID_AUTO
;
605 for (i
= 0; i
< pdev
->num_resources
; i
++) {
606 struct resource
*r
= &pdev
->resource
[i
];
612 EXPORT_SYMBOL_GPL(platform_device_del
);
615 * platform_device_register - add a platform-level device
616 * @pdev: platform device we're adding
618 int platform_device_register(struct platform_device
*pdev
)
620 device_initialize(&pdev
->dev
);
621 setup_pdev_dma_masks(pdev
);
622 return platform_device_add(pdev
);
624 EXPORT_SYMBOL_GPL(platform_device_register
);
627 * platform_device_unregister - unregister a platform-level device
628 * @pdev: platform device we're unregistering
630 * Unregistration is done in 2 steps. First we release all resources
631 * and remove it from the subsystem, then we drop reference count by
632 * calling platform_device_put().
634 void platform_device_unregister(struct platform_device
*pdev
)
636 platform_device_del(pdev
);
637 platform_device_put(pdev
);
639 EXPORT_SYMBOL_GPL(platform_device_unregister
);
642 * platform_device_register_full - add a platform-level device with
643 * resources and platform-specific data
645 * @pdevinfo: data used to create device
647 * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
649 struct platform_device
*platform_device_register_full(
650 const struct platform_device_info
*pdevinfo
)
653 struct platform_device
*pdev
;
655 pdev
= platform_device_alloc(pdevinfo
->name
, pdevinfo
->id
);
657 return ERR_PTR(-ENOMEM
);
659 pdev
->dev
.parent
= pdevinfo
->parent
;
660 pdev
->dev
.fwnode
= pdevinfo
->fwnode
;
661 pdev
->dev
.of_node
= of_node_get(to_of_node(pdev
->dev
.fwnode
));
662 pdev
->dev
.of_node_reused
= pdevinfo
->of_node_reused
;
664 if (pdevinfo
->dma_mask
) {
666 * This memory isn't freed when the device is put,
667 * I don't have a nice idea for that though. Conceptually
668 * dma_mask in struct device should not be a pointer.
669 * See http://thread.gmane.org/gmane.linux.kernel.pci/9081
672 kmalloc(sizeof(*pdev
->dev
.dma_mask
), GFP_KERNEL
);
673 if (!pdev
->dev
.dma_mask
)
676 kmemleak_ignore(pdev
->dev
.dma_mask
);
678 *pdev
->dev
.dma_mask
= pdevinfo
->dma_mask
;
679 pdev
->dev
.coherent_dma_mask
= pdevinfo
->dma_mask
;
682 ret
= platform_device_add_resources(pdev
,
683 pdevinfo
->res
, pdevinfo
->num_res
);
687 ret
= platform_device_add_data(pdev
,
688 pdevinfo
->data
, pdevinfo
->size_data
);
692 if (pdevinfo
->properties
) {
693 ret
= platform_device_add_properties(pdev
,
694 pdevinfo
->properties
);
699 ret
= platform_device_add(pdev
);
702 ACPI_COMPANION_SET(&pdev
->dev
, NULL
);
703 kfree(pdev
->dev
.dma_mask
);
704 platform_device_put(pdev
);
710 EXPORT_SYMBOL_GPL(platform_device_register_full
);
712 static int platform_drv_probe(struct device
*_dev
)
714 struct platform_driver
*drv
= to_platform_driver(_dev
->driver
);
715 struct platform_device
*dev
= to_platform_device(_dev
);
718 ret
= of_clk_set_defaults(_dev
->of_node
, false);
722 ret
= dev_pm_domain_attach(_dev
, true);
727 ret
= drv
->probe(dev
);
729 dev_pm_domain_detach(_dev
, true);
733 if (drv
->prevent_deferred_probe
&& ret
== -EPROBE_DEFER
) {
734 dev_warn(_dev
, "probe deferral not supported\n");
741 static int platform_drv_probe_fail(struct device
*_dev
)
746 static int platform_drv_remove(struct device
*_dev
)
748 struct platform_driver
*drv
= to_platform_driver(_dev
->driver
);
749 struct platform_device
*dev
= to_platform_device(_dev
);
753 ret
= drv
->remove(dev
);
754 dev_pm_domain_detach(_dev
, true);
759 static void platform_drv_shutdown(struct device
*_dev
)
761 struct platform_driver
*drv
= to_platform_driver(_dev
->driver
);
762 struct platform_device
*dev
= to_platform_device(_dev
);
769 * __platform_driver_register - register a driver for platform-level devices
770 * @drv: platform driver structure
771 * @owner: owning module/driver
773 int __platform_driver_register(struct platform_driver
*drv
,
774 struct module
*owner
)
776 drv
->driver
.owner
= owner
;
777 drv
->driver
.bus
= &platform_bus_type
;
778 drv
->driver
.probe
= platform_drv_probe
;
779 drv
->driver
.remove
= platform_drv_remove
;
780 drv
->driver
.shutdown
= platform_drv_shutdown
;
782 return driver_register(&drv
->driver
);
784 EXPORT_SYMBOL_GPL(__platform_driver_register
);
787 * platform_driver_unregister - unregister a driver for platform-level devices
788 * @drv: platform driver structure
790 void platform_driver_unregister(struct platform_driver
*drv
)
792 driver_unregister(&drv
->driver
);
794 EXPORT_SYMBOL_GPL(platform_driver_unregister
);
797 * __platform_driver_probe - register driver for non-hotpluggable device
798 * @drv: platform driver structure
799 * @probe: the driver probe routine, probably from an __init section
800 * @module: module which will be the owner of the driver
802 * Use this instead of platform_driver_register() when you know the device
803 * is not hotpluggable and has already been registered, and you want to
804 * remove its run-once probe() infrastructure from memory after the driver
805 * has bound to the device.
807 * One typical use for this would be with drivers for controllers integrated
808 * into system-on-chip processors, where the controller devices have been
809 * configured as part of board setup.
811 * Note that this is incompatible with deferred probing.
813 * Returns zero if the driver registered and bound to a device, else returns
814 * a negative error code and with the driver not registered.
816 int __init_or_module
__platform_driver_probe(struct platform_driver
*drv
,
817 int (*probe
)(struct platform_device
*), struct module
*module
)
821 if (drv
->driver
.probe_type
== PROBE_PREFER_ASYNCHRONOUS
) {
822 pr_err("%s: drivers registered with %s can not be probed asynchronously\n",
823 drv
->driver
.name
, __func__
);
828 * We have to run our probes synchronously because we check if
829 * we find any devices to bind to and exit with error if there
832 drv
->driver
.probe_type
= PROBE_FORCE_SYNCHRONOUS
;
835 * Prevent driver from requesting probe deferral to avoid further
836 * futile probe attempts.
838 drv
->prevent_deferred_probe
= true;
840 /* make sure driver won't have bind/unbind attributes */
841 drv
->driver
.suppress_bind_attrs
= true;
843 /* temporary section violation during probe() */
845 retval
= code
= __platform_driver_register(drv
, module
);
848 * Fixup that section violation, being paranoid about code scanning
849 * the list of drivers in order to probe new devices. Check to see
850 * if the probe was successful, and make sure any forced probes of
853 spin_lock(&drv
->driver
.bus
->p
->klist_drivers
.k_lock
);
855 if (code
== 0 && list_empty(&drv
->driver
.p
->klist_devices
.k_list
))
857 drv
->driver
.probe
= platform_drv_probe_fail
;
858 spin_unlock(&drv
->driver
.bus
->p
->klist_drivers
.k_lock
);
861 platform_driver_unregister(drv
);
864 EXPORT_SYMBOL_GPL(__platform_driver_probe
);
867 * __platform_create_bundle - register driver and create corresponding device
868 * @driver: platform driver structure
869 * @probe: the driver probe routine, probably from an __init section
870 * @res: set of resources that needs to be allocated for the device
871 * @n_res: number of resources
872 * @data: platform specific data for this platform device
873 * @size: size of platform specific data
874 * @module: module which will be the owner of the driver
876 * Use this in legacy-style modules that probe hardware directly and
877 * register a single platform device and corresponding platform driver.
879 * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
881 struct platform_device
* __init_or_module
__platform_create_bundle(
882 struct platform_driver
*driver
,
883 int (*probe
)(struct platform_device
*),
884 struct resource
*res
, unsigned int n_res
,
885 const void *data
, size_t size
, struct module
*module
)
887 struct platform_device
*pdev
;
890 pdev
= platform_device_alloc(driver
->driver
.name
, -1);
896 error
= platform_device_add_resources(pdev
, res
, n_res
);
900 error
= platform_device_add_data(pdev
, data
, size
);
904 error
= platform_device_add(pdev
);
908 error
= __platform_driver_probe(driver
, probe
, module
);
915 platform_device_del(pdev
);
917 platform_device_put(pdev
);
919 return ERR_PTR(error
);
921 EXPORT_SYMBOL_GPL(__platform_create_bundle
);
924 * __platform_register_drivers - register an array of platform drivers
925 * @drivers: an array of drivers to register
926 * @count: the number of drivers to register
927 * @owner: module owning the drivers
929 * Registers platform drivers specified by an array. On failure to register a
930 * driver, all previously registered drivers will be unregistered. Callers of
931 * this API should use platform_unregister_drivers() to unregister drivers in
934 * Returns: 0 on success or a negative error code on failure.
936 int __platform_register_drivers(struct platform_driver
* const *drivers
,
937 unsigned int count
, struct module
*owner
)
942 for (i
= 0; i
< count
; i
++) {
943 pr_debug("registering platform driver %ps\n", drivers
[i
]);
945 err
= __platform_driver_register(drivers
[i
], owner
);
947 pr_err("failed to register platform driver %ps: %d\n",
957 pr_debug("unregistering platform driver %ps\n", drivers
[i
]);
958 platform_driver_unregister(drivers
[i
]);
963 EXPORT_SYMBOL_GPL(__platform_register_drivers
);
966 * platform_unregister_drivers - unregister an array of platform drivers
967 * @drivers: an array of drivers to unregister
968 * @count: the number of drivers to unregister
970 * Unegisters platform drivers specified by an array. This is typically used
971 * to complement an earlier call to platform_register_drivers(). Drivers are
972 * unregistered in the reverse order in which they were registered.
974 void platform_unregister_drivers(struct platform_driver
* const *drivers
,
978 pr_debug("unregistering platform driver %ps\n", drivers
[count
]);
979 platform_driver_unregister(drivers
[count
]);
982 EXPORT_SYMBOL_GPL(platform_unregister_drivers
);
984 /* modalias support enables more hands-off userspace setup:
985 * (a) environment variable lets new-style hotplug events work once system is
986 * fully running: "modprobe $MODALIAS"
987 * (b) sysfs attribute lets new-style coldplug recover from hotplug events
988 * mishandled before system is fully running: "modprobe $(cat modalias)"
990 static ssize_t
modalias_show(struct device
*dev
, struct device_attribute
*a
,
993 struct platform_device
*pdev
= to_platform_device(dev
);
996 len
= of_device_modalias(dev
, buf
, PAGE_SIZE
);
1000 len
= acpi_device_modalias(dev
, buf
, PAGE_SIZE
-1);
1004 len
= snprintf(buf
, PAGE_SIZE
, "platform:%s\n", pdev
->name
);
1006 return (len
>= PAGE_SIZE
) ? (PAGE_SIZE
- 1) : len
;
1008 static DEVICE_ATTR_RO(modalias
);
1010 static ssize_t
driver_override_store(struct device
*dev
,
1011 struct device_attribute
*attr
,
1012 const char *buf
, size_t count
)
1014 struct platform_device
*pdev
= to_platform_device(dev
);
1015 char *driver_override
, *old
, *cp
;
1017 /* We need to keep extra room for a newline */
1018 if (count
>= (PAGE_SIZE
- 1))
1021 driver_override
= kstrndup(buf
, count
, GFP_KERNEL
);
1022 if (!driver_override
)
1025 cp
= strchr(driver_override
, '\n');
1030 old
= pdev
->driver_override
;
1031 if (strlen(driver_override
)) {
1032 pdev
->driver_override
= driver_override
;
1034 kfree(driver_override
);
1035 pdev
->driver_override
= NULL
;
1044 static ssize_t
driver_override_show(struct device
*dev
,
1045 struct device_attribute
*attr
, char *buf
)
1047 struct platform_device
*pdev
= to_platform_device(dev
);
1051 len
= sprintf(buf
, "%s\n", pdev
->driver_override
);
1055 static DEVICE_ATTR_RW(driver_override
);
1058 static struct attribute
*platform_dev_attrs
[] = {
1059 &dev_attr_modalias
.attr
,
1060 &dev_attr_driver_override
.attr
,
1063 ATTRIBUTE_GROUPS(platform_dev
);
1065 static int platform_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
1067 struct platform_device
*pdev
= to_platform_device(dev
);
1070 /* Some devices have extra OF data and an OF-style MODALIAS */
1071 rc
= of_device_uevent_modalias(dev
, env
);
1075 rc
= acpi_device_uevent_modalias(dev
, env
);
1079 add_uevent_var(env
, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX
,
1084 static const struct platform_device_id
*platform_match_id(
1085 const struct platform_device_id
*id
,
1086 struct platform_device
*pdev
)
1088 while (id
->name
[0]) {
1089 if (strcmp(pdev
->name
, id
->name
) == 0) {
1090 pdev
->id_entry
= id
;
1099 * platform_match - bind platform device to platform driver.
1103 * Platform device IDs are assumed to be encoded like this:
1104 * "<name><instance>", where <name> is a short description of the type of
1105 * device, like "pci" or "floppy", and <instance> is the enumerated
1106 * instance of the device, like '0' or '42'. Driver IDs are simply
1107 * "<name>". So, extract the <name> from the platform_device structure,
1108 * and compare it against the name of the driver. Return whether they match
1111 static int platform_match(struct device
*dev
, struct device_driver
*drv
)
1113 struct platform_device
*pdev
= to_platform_device(dev
);
1114 struct platform_driver
*pdrv
= to_platform_driver(drv
);
1116 /* When driver_override is set, only bind to the matching driver */
1117 if (pdev
->driver_override
)
1118 return !strcmp(pdev
->driver_override
, drv
->name
);
1120 /* Attempt an OF style match first */
1121 if (of_driver_match_device(dev
, drv
))
1124 /* Then try ACPI style match */
1125 if (acpi_driver_match_device(dev
, drv
))
1128 /* Then try to match against the id table */
1130 return platform_match_id(pdrv
->id_table
, pdev
) != NULL
;
1132 /* fall-back to driver name match */
1133 return (strcmp(pdev
->name
, drv
->name
) == 0);
1136 #ifdef CONFIG_PM_SLEEP
1138 static int platform_legacy_suspend(struct device
*dev
, pm_message_t mesg
)
1140 struct platform_driver
*pdrv
= to_platform_driver(dev
->driver
);
1141 struct platform_device
*pdev
= to_platform_device(dev
);
1144 if (dev
->driver
&& pdrv
->suspend
)
1145 ret
= pdrv
->suspend(pdev
, mesg
);
1150 static int platform_legacy_resume(struct device
*dev
)
1152 struct platform_driver
*pdrv
= to_platform_driver(dev
->driver
);
1153 struct platform_device
*pdev
= to_platform_device(dev
);
1156 if (dev
->driver
&& pdrv
->resume
)
1157 ret
= pdrv
->resume(pdev
);
1162 #endif /* CONFIG_PM_SLEEP */
1164 #ifdef CONFIG_SUSPEND
1166 int platform_pm_suspend(struct device
*dev
)
1168 struct device_driver
*drv
= dev
->driver
;
1175 if (drv
->pm
->suspend
)
1176 ret
= drv
->pm
->suspend(dev
);
1178 ret
= platform_legacy_suspend(dev
, PMSG_SUSPEND
);
1184 int platform_pm_resume(struct device
*dev
)
1186 struct device_driver
*drv
= dev
->driver
;
1193 if (drv
->pm
->resume
)
1194 ret
= drv
->pm
->resume(dev
);
1196 ret
= platform_legacy_resume(dev
);
1202 #endif /* CONFIG_SUSPEND */
1204 #ifdef CONFIG_HIBERNATE_CALLBACKS
1206 int platform_pm_freeze(struct device
*dev
)
1208 struct device_driver
*drv
= dev
->driver
;
1215 if (drv
->pm
->freeze
)
1216 ret
= drv
->pm
->freeze(dev
);
1218 ret
= platform_legacy_suspend(dev
, PMSG_FREEZE
);
1224 int platform_pm_thaw(struct device
*dev
)
1226 struct device_driver
*drv
= dev
->driver
;
1234 ret
= drv
->pm
->thaw(dev
);
1236 ret
= platform_legacy_resume(dev
);
1242 int platform_pm_poweroff(struct device
*dev
)
1244 struct device_driver
*drv
= dev
->driver
;
1251 if (drv
->pm
->poweroff
)
1252 ret
= drv
->pm
->poweroff(dev
);
1254 ret
= platform_legacy_suspend(dev
, PMSG_HIBERNATE
);
1260 int platform_pm_restore(struct device
*dev
)
1262 struct device_driver
*drv
= dev
->driver
;
1269 if (drv
->pm
->restore
)
1270 ret
= drv
->pm
->restore(dev
);
1272 ret
= platform_legacy_resume(dev
);
1278 #endif /* CONFIG_HIBERNATE_CALLBACKS */
1280 int platform_dma_configure(struct device
*dev
)
1282 enum dev_dma_attr attr
;
1286 ret
= of_dma_configure(dev
, dev
->of_node
, true);
1287 } else if (has_acpi_companion(dev
)) {
1288 attr
= acpi_get_dma_attr(to_acpi_device_node(dev
->fwnode
));
1289 ret
= acpi_dma_configure(dev
, attr
);
1295 static const struct dev_pm_ops platform_dev_pm_ops
= {
1296 .runtime_suspend
= pm_generic_runtime_suspend
,
1297 .runtime_resume
= pm_generic_runtime_resume
,
1298 USE_PLATFORM_PM_SLEEP_OPS
1301 struct bus_type platform_bus_type
= {
1303 .dev_groups
= platform_dev_groups
,
1304 .match
= platform_match
,
1305 .uevent
= platform_uevent
,
1306 .dma_configure
= platform_dma_configure
,
1307 .pm
= &platform_dev_pm_ops
,
1309 EXPORT_SYMBOL_GPL(platform_bus_type
);
1311 static inline int __platform_match(struct device
*dev
, const void *drv
)
1313 return platform_match(dev
, (struct device_driver
*)drv
);
1317 * platform_find_device_by_driver - Find a platform device with a given
1319 * @start: The device to start the search from.
1320 * @drv: The device driver to look for.
1322 struct device
*platform_find_device_by_driver(struct device
*start
,
1323 const struct device_driver
*drv
)
1325 return bus_find_device(&platform_bus_type
, start
, drv
,
1328 EXPORT_SYMBOL_GPL(platform_find_device_by_driver
);
1330 void __weak __init
early_platform_cleanup(void) { }
1332 int __init
platform_bus_init(void)
1336 early_platform_cleanup();
1338 error
= device_register(&platform_bus
);
1340 put_device(&platform_bus
);
1343 error
= bus_register(&platform_bus_type
);
1345 device_unregister(&platform_bus
);
1346 of_platform_register_reconfig_notifier();