NFSv4: The NFSv4.0 client must send RENEW calls if it holds a delegation
[linux/fpc-iii.git] / drivers / mfd / mfd-core.c
blob0902523af62d47e33e139c1feab75e5b0ef2a49c
1 /*
2 * drivers/mfd/mfd-core.c
4 * core MFD support
5 * Copyright (c) 2006 Ian Molton
6 * Copyright (c) 2007,2008 Dmitry Baryshkov
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
14 #include <linux/kernel.h>
15 #include <linux/platform_device.h>
16 #include <linux/acpi.h>
17 #include <linux/mfd/core.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/slab.h>
21 int mfd_cell_enable(struct platform_device *pdev)
23 const struct mfd_cell *cell = mfd_get_cell(pdev);
24 int err = 0;
26 /* only call enable hook if the cell wasn't previously enabled */
27 if (atomic_inc_return(cell->usage_count) == 1)
28 err = cell->enable(pdev);
30 /* if the enable hook failed, decrement counter to allow retries */
31 if (err)
32 atomic_dec(cell->usage_count);
34 return err;
36 EXPORT_SYMBOL(mfd_cell_enable);
38 int mfd_cell_disable(struct platform_device *pdev)
40 const struct mfd_cell *cell = mfd_get_cell(pdev);
41 int err = 0;
43 /* only disable if no other clients are using it */
44 if (atomic_dec_return(cell->usage_count) == 0)
45 err = cell->disable(pdev);
47 /* if the disable hook failed, increment to allow retries */
48 if (err)
49 atomic_inc(cell->usage_count);
51 /* sanity check; did someone call disable too many times? */
52 WARN_ON(atomic_read(cell->usage_count) < 0);
54 return err;
56 EXPORT_SYMBOL(mfd_cell_disable);
58 static int mfd_platform_add_cell(struct platform_device *pdev,
59 const struct mfd_cell *cell)
61 if (!cell)
62 return 0;
64 pdev->mfd_cell = kmemdup(cell, sizeof(*cell), GFP_KERNEL);
65 if (!pdev->mfd_cell)
66 return -ENOMEM;
68 return 0;
71 static int mfd_add_device(struct device *parent, int id,
72 const struct mfd_cell *cell,
73 struct resource *mem_base,
74 int irq_base)
76 struct resource *res;
77 struct platform_device *pdev;
78 int ret = -ENOMEM;
79 int r;
81 pdev = platform_device_alloc(cell->name, id + cell->id);
82 if (!pdev)
83 goto fail_alloc;
85 res = kzalloc(sizeof(*res) * cell->num_resources, GFP_KERNEL);
86 if (!res)
87 goto fail_device;
89 pdev->dev.parent = parent;
91 if (cell->pdata_size) {
92 ret = platform_device_add_data(pdev,
93 cell->platform_data, cell->pdata_size);
94 if (ret)
95 goto fail_res;
98 ret = mfd_platform_add_cell(pdev, cell);
99 if (ret)
100 goto fail_res;
102 for (r = 0; r < cell->num_resources; r++) {
103 res[r].name = cell->resources[r].name;
104 res[r].flags = cell->resources[r].flags;
106 /* Find out base to use */
107 if ((cell->resources[r].flags & IORESOURCE_MEM) && mem_base) {
108 res[r].parent = mem_base;
109 res[r].start = mem_base->start +
110 cell->resources[r].start;
111 res[r].end = mem_base->start +
112 cell->resources[r].end;
113 } else if (cell->resources[r].flags & IORESOURCE_IRQ) {
114 res[r].start = irq_base +
115 cell->resources[r].start;
116 res[r].end = irq_base +
117 cell->resources[r].end;
118 } else {
119 res[r].parent = cell->resources[r].parent;
120 res[r].start = cell->resources[r].start;
121 res[r].end = cell->resources[r].end;
124 if (!cell->ignore_resource_conflicts) {
125 ret = acpi_check_resource_conflict(res);
126 if (ret)
127 goto fail_res;
131 ret = platform_device_add_resources(pdev, res, cell->num_resources);
132 if (ret)
133 goto fail_res;
135 ret = platform_device_add(pdev);
136 if (ret)
137 goto fail_res;
139 if (cell->pm_runtime_no_callbacks)
140 pm_runtime_no_callbacks(&pdev->dev);
142 kfree(res);
144 return 0;
146 fail_res:
147 kfree(res);
148 fail_device:
149 platform_device_put(pdev);
150 fail_alloc:
151 return ret;
154 int mfd_add_devices(struct device *parent, int id,
155 struct mfd_cell *cells, int n_devs,
156 struct resource *mem_base,
157 int irq_base)
159 int i;
160 int ret = 0;
161 atomic_t *cnts;
163 /* initialize reference counting for all cells */
164 cnts = kcalloc(sizeof(*cnts), n_devs, GFP_KERNEL);
165 if (!cnts)
166 return -ENOMEM;
168 for (i = 0; i < n_devs; i++) {
169 atomic_set(&cnts[i], 0);
170 cells[i].usage_count = &cnts[i];
171 ret = mfd_add_device(parent, id, cells + i, mem_base, irq_base);
172 if (ret)
173 break;
176 if (ret)
177 mfd_remove_devices(parent);
179 return ret;
181 EXPORT_SYMBOL(mfd_add_devices);
183 static int mfd_remove_devices_fn(struct device *dev, void *c)
185 struct platform_device *pdev = to_platform_device(dev);
186 const struct mfd_cell *cell = mfd_get_cell(pdev);
187 atomic_t **usage_count = c;
189 /* find the base address of usage_count pointers (for freeing) */
190 if (!*usage_count || (cell->usage_count < *usage_count))
191 *usage_count = cell->usage_count;
193 platform_device_unregister(pdev);
194 return 0;
197 void mfd_remove_devices(struct device *parent)
199 atomic_t *cnts = NULL;
201 device_for_each_child(parent, &cnts, mfd_remove_devices_fn);
202 kfree(cnts);
204 EXPORT_SYMBOL(mfd_remove_devices);
206 int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones)
208 struct mfd_cell cell_entry;
209 struct device *dev;
210 struct platform_device *pdev;
211 int i;
213 /* fetch the parent cell's device (should already be registered!) */
214 dev = bus_find_device_by_name(&platform_bus_type, NULL, cell);
215 if (!dev) {
216 printk(KERN_ERR "failed to find device for cell %s\n", cell);
217 return -ENODEV;
219 pdev = to_platform_device(dev);
220 memcpy(&cell_entry, mfd_get_cell(pdev), sizeof(cell_entry));
222 WARN_ON(!cell_entry.enable);
224 for (i = 0; i < n_clones; i++) {
225 cell_entry.name = clones[i];
226 /* don't give up if a single call fails; just report error */
227 if (mfd_add_device(pdev->dev.parent, -1, &cell_entry, NULL, 0))
228 dev_err(dev, "failed to create platform device '%s'\n",
229 clones[i]);
232 return 0;
234 EXPORT_SYMBOL(mfd_clone_cell);
236 MODULE_LICENSE("GPL");
237 MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov");