1 // SPDX-License-Identifier: GPL-2.0-only
3 * drivers/mfd/mfd-core.c
6 * Copyright (c) 2006 Ian Molton
7 * Copyright (c) 2007,2008 Dmitry Baryshkov
10 #include <linux/kernel.h>
11 #include <linux/platform_device.h>
12 #include <linux/acpi.h>
13 #include <linux/list.h>
14 #include <linux/property.h>
15 #include <linux/mfd/core.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/irqdomain.h>
21 #include <linux/of_address.h>
22 #include <linux/regulator/consumer.h>
24 static LIST_HEAD(mfd_of_node_list
);
26 struct mfd_of_node_entry
{
27 struct list_head list
;
29 struct device_node
*np
;
32 static struct device_type mfd_dev_type
= {
36 int mfd_cell_enable(struct platform_device
*pdev
)
38 const struct mfd_cell
*cell
= mfd_get_cell(pdev
);
41 dev_dbg(&pdev
->dev
, "No .enable() call-back registered\n");
45 return cell
->enable(pdev
);
47 EXPORT_SYMBOL(mfd_cell_enable
);
49 int mfd_cell_disable(struct platform_device
*pdev
)
51 const struct mfd_cell
*cell
= mfd_get_cell(pdev
);
54 dev_dbg(&pdev
->dev
, "No .disable() call-back registered\n");
58 return cell
->disable(pdev
);
60 EXPORT_SYMBOL(mfd_cell_disable
);
62 #if IS_ENABLED(CONFIG_ACPI)
63 static void mfd_acpi_add_device(const struct mfd_cell
*cell
,
64 struct platform_device
*pdev
)
66 const struct mfd_cell_acpi_match
*match
= cell
->acpi_match
;
67 struct acpi_device
*parent
, *child
;
68 struct acpi_device
*adev
;
70 parent
= ACPI_COMPANION(pdev
->dev
.parent
);
75 * MFD child device gets its ACPI handle either from the ACPI device
76 * directly under the parent that matches the either _HID or _CID, or
77 * _ADR or it will use the parent handle if is no ID is given.
79 * Note that use of _ADR is a grey area in the ACPI specification,
80 * though Intel Galileo Gen2 is using it to distinguish the children
86 struct acpi_device_id ids
[2] = {};
88 strlcpy(ids
[0].id
, match
->pnpid
, sizeof(ids
[0].id
));
89 list_for_each_entry(child
, &parent
->children
, node
) {
90 if (!acpi_match_device_ids(child
, ids
)) {
96 unsigned long long adr
;
99 list_for_each_entry(child
, &parent
->children
, node
) {
100 status
= acpi_evaluate_integer(child
->handle
,
103 if (ACPI_SUCCESS(status
) && match
->adr
== adr
) {
111 ACPI_COMPANION_SET(&pdev
->dev
, adev
);
114 static inline void mfd_acpi_add_device(const struct mfd_cell
*cell
,
115 struct platform_device
*pdev
)
120 static int mfd_match_of_node_to_dev(struct platform_device
*pdev
,
121 struct device_node
*np
,
122 const struct mfd_cell
*cell
)
124 #if IS_ENABLED(CONFIG_OF)
125 struct mfd_of_node_entry
*of_entry
;
129 /* Skip if OF node has previously been allocated to a device */
130 list_for_each_entry(of_entry
, &mfd_of_node_list
, list
)
131 if (of_entry
->np
== np
)
134 if (!cell
->use_of_reg
)
135 /* No of_reg defined - allocate first free compatible match */
136 goto allocate_of_node
;
138 /* We only care about each node's first defined address */
139 reg
= of_get_address(np
, 0, NULL
, NULL
);
141 /* OF node does not contatin a 'reg' property to match to */
144 of_node_addr
= of_read_number(reg
, of_n_addr_cells(np
));
146 if (cell
->of_reg
!= of_node_addr
)
151 of_entry
= kzalloc(sizeof(*of_entry
), GFP_KERNEL
);
155 of_entry
->dev
= &pdev
->dev
;
157 list_add_tail(&of_entry
->list
, &mfd_of_node_list
);
159 pdev
->dev
.of_node
= np
;
160 pdev
->dev
.fwnode
= &np
->fwnode
;
165 static int mfd_add_device(struct device
*parent
, int id
,
166 const struct mfd_cell
*cell
,
167 struct resource
*mem_base
,
168 int irq_base
, struct irq_domain
*domain
)
170 struct resource
*res
;
171 struct platform_device
*pdev
;
172 struct device_node
*np
= NULL
;
173 struct mfd_of_node_entry
*of_entry
, *tmp
;
178 if (id
== PLATFORM_DEVID_AUTO
)
181 platform_id
= id
+ cell
->id
;
183 pdev
= platform_device_alloc(cell
->name
, platform_id
);
187 pdev
->mfd_cell
= kmemdup(cell
, sizeof(*cell
), GFP_KERNEL
);
191 res
= kcalloc(cell
->num_resources
, sizeof(*res
), GFP_KERNEL
);
195 pdev
->dev
.parent
= parent
;
196 pdev
->dev
.type
= &mfd_dev_type
;
197 pdev
->dev
.dma_mask
= parent
->dma_mask
;
198 pdev
->dev
.dma_parms
= parent
->dma_parms
;
199 pdev
->dev
.coherent_dma_mask
= parent
->coherent_dma_mask
;
201 ret
= regulator_bulk_register_supply_alias(
202 &pdev
->dev
, cell
->parent_supplies
,
203 parent
, cell
->parent_supplies
,
204 cell
->num_parent_supplies
);
208 if (IS_ENABLED(CONFIG_OF
) && parent
->of_node
&& cell
->of_compatible
) {
209 for_each_child_of_node(parent
->of_node
, np
) {
210 if (of_device_is_compatible(np
, cell
->of_compatible
)) {
211 /* Ignore 'disabled' devices error free */
212 if (!of_device_is_available(np
)) {
217 ret
= mfd_match_of_node_to_dev(pdev
, np
, cell
);
227 if (!pdev
->dev
.of_node
)
228 pr_warn("%s: Failed to locate of_node [id: %d]\n",
229 cell
->name
, platform_id
);
232 mfd_acpi_add_device(cell
, pdev
);
234 if (cell
->pdata_size
) {
235 ret
= platform_device_add_data(pdev
,
236 cell
->platform_data
, cell
->pdata_size
);
241 if (cell
->properties
) {
242 ret
= platform_device_add_properties(pdev
, cell
->properties
);
247 for (r
= 0; r
< cell
->num_resources
; r
++) {
248 res
[r
].name
= cell
->resources
[r
].name
;
249 res
[r
].flags
= cell
->resources
[r
].flags
;
251 /* Find out base to use */
252 if ((cell
->resources
[r
].flags
& IORESOURCE_MEM
) && mem_base
) {
253 res
[r
].parent
= mem_base
;
254 res
[r
].start
= mem_base
->start
+
255 cell
->resources
[r
].start
;
256 res
[r
].end
= mem_base
->start
+
257 cell
->resources
[r
].end
;
258 } else if (cell
->resources
[r
].flags
& IORESOURCE_IRQ
) {
260 /* Unable to create mappings for IRQ ranges. */
261 WARN_ON(cell
->resources
[r
].start
!=
262 cell
->resources
[r
].end
);
263 res
[r
].start
= res
[r
].end
= irq_create_mapping(
264 domain
, cell
->resources
[r
].start
);
266 res
[r
].start
= irq_base
+
267 cell
->resources
[r
].start
;
268 res
[r
].end
= irq_base
+
269 cell
->resources
[r
].end
;
272 res
[r
].parent
= cell
->resources
[r
].parent
;
273 res
[r
].start
= cell
->resources
[r
].start
;
274 res
[r
].end
= cell
->resources
[r
].end
;
277 if (!cell
->ignore_resource_conflicts
) {
278 if (has_acpi_companion(&pdev
->dev
)) {
279 ret
= acpi_check_resource_conflict(&res
[r
]);
286 ret
= platform_device_add_resources(pdev
, res
, cell
->num_resources
);
290 ret
= platform_device_add(pdev
);
294 if (cell
->pm_runtime_no_callbacks
)
295 pm_runtime_no_callbacks(&pdev
->dev
);
302 list_for_each_entry_safe(of_entry
, tmp
, &mfd_of_node_list
, list
)
303 if (of_entry
->dev
== &pdev
->dev
) {
304 list_del(&of_entry
->list
);
308 regulator_bulk_unregister_supply_alias(&pdev
->dev
,
309 cell
->parent_supplies
,
310 cell
->num_parent_supplies
);
314 platform_device_put(pdev
);
320 * mfd_add_devices - register child devices
322 * @parent: Pointer to parent device.
323 * @id: Can be PLATFORM_DEVID_AUTO to let the Platform API take care
324 * of device numbering, or will be added to a device's cell_id.
325 * @cells: Array of (struct mfd_cell)s describing child devices.
326 * @n_devs: Number of child devices to register.
327 * @mem_base: Parent register range resource for child devices.
328 * @irq_base: Base of the range of virtual interrupt numbers allocated for
329 * this MFD device. Unused if @domain is specified.
330 * @domain: Interrupt domain to create mappings for hardware interrupts.
332 int mfd_add_devices(struct device
*parent
, int id
,
333 const struct mfd_cell
*cells
, int n_devs
,
334 struct resource
*mem_base
,
335 int irq_base
, struct irq_domain
*domain
)
340 for (i
= 0; i
< n_devs
; i
++) {
341 ret
= mfd_add_device(parent
, id
, cells
+ i
, mem_base
,
351 mfd_remove_devices(parent
);
355 EXPORT_SYMBOL(mfd_add_devices
);
357 static int mfd_remove_devices_fn(struct device
*dev
, void *data
)
359 struct platform_device
*pdev
;
360 const struct mfd_cell
*cell
;
363 if (dev
->type
!= &mfd_dev_type
)
366 pdev
= to_platform_device(dev
);
367 cell
= mfd_get_cell(pdev
);
369 if (level
&& cell
->level
> *level
)
372 regulator_bulk_unregister_supply_alias(dev
, cell
->parent_supplies
,
373 cell
->num_parent_supplies
);
375 platform_device_unregister(pdev
);
379 void mfd_remove_devices_late(struct device
*parent
)
381 int level
= MFD_DEP_LEVEL_HIGH
;
383 device_for_each_child_reverse(parent
, &level
, mfd_remove_devices_fn
);
385 EXPORT_SYMBOL(mfd_remove_devices_late
);
387 void mfd_remove_devices(struct device
*parent
)
389 int level
= MFD_DEP_LEVEL_NORMAL
;
391 device_for_each_child_reverse(parent
, &level
, mfd_remove_devices_fn
);
393 EXPORT_SYMBOL(mfd_remove_devices
);
395 static void devm_mfd_dev_release(struct device
*dev
, void *res
)
397 mfd_remove_devices(dev
);
401 * devm_mfd_add_devices - Resource managed version of mfd_add_devices()
403 * Returns 0 on success or an appropriate negative error number on failure.
404 * All child-devices of the MFD will automatically be removed when it gets
407 * @dev: Pointer to parent device.
408 * @id: Can be PLATFORM_DEVID_AUTO to let the Platform API take care
409 * of device numbering, or will be added to a device's cell_id.
410 * @cells: Array of (struct mfd_cell)s describing child devices.
411 * @n_devs: Number of child devices to register.
412 * @mem_base: Parent register range resource for child devices.
413 * @irq_base: Base of the range of virtual interrupt numbers allocated for
414 * this MFD device. Unused if @domain is specified.
415 * @domain: Interrupt domain to create mappings for hardware interrupts.
417 int devm_mfd_add_devices(struct device
*dev
, int id
,
418 const struct mfd_cell
*cells
, int n_devs
,
419 struct resource
*mem_base
,
420 int irq_base
, struct irq_domain
*domain
)
425 ptr
= devres_alloc(devm_mfd_dev_release
, sizeof(*ptr
), GFP_KERNEL
);
429 ret
= mfd_add_devices(dev
, id
, cells
, n_devs
, mem_base
,
437 devres_add(dev
, ptr
);
441 EXPORT_SYMBOL(devm_mfd_add_devices
);
443 MODULE_LICENSE("GPL");
444 MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov");