gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / media / v4l2-core / v4l2-clk.c
blob91274eee69775bf90b2d9d53388191bdc532df93
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * V4L2 clock service
5 * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
6 */
8 #include <linux/atomic.h>
9 #include <linux/clk.h>
10 #include <linux/device.h>
11 #include <linux/errno.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/of.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
19 #include <media/v4l2-clk.h>
20 #include <media/v4l2-subdev.h>
22 static DEFINE_MUTEX(clk_lock);
23 static LIST_HEAD(clk_list);
25 static struct v4l2_clk *v4l2_clk_find(const char *dev_id)
27 struct v4l2_clk *clk;
29 list_for_each_entry(clk, &clk_list, list)
30 if (!strcmp(dev_id, clk->dev_id))
31 return clk;
33 return ERR_PTR(-ENODEV);
36 struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id)
38 struct v4l2_clk *clk;
39 struct clk *ccf_clk = clk_get(dev, id);
40 char clk_name[V4L2_CLK_NAME_SIZE];
42 if (PTR_ERR(ccf_clk) == -EPROBE_DEFER)
43 return ERR_PTR(-EPROBE_DEFER);
45 if (!IS_ERR_OR_NULL(ccf_clk)) {
46 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
47 if (!clk) {
48 clk_put(ccf_clk);
49 return ERR_PTR(-ENOMEM);
51 clk->clk = ccf_clk;
53 return clk;
56 mutex_lock(&clk_lock);
57 clk = v4l2_clk_find(dev_name(dev));
59 /* if dev_name is not found, try use the OF name to find again */
60 if (PTR_ERR(clk) == -ENODEV && dev->of_node) {
61 v4l2_clk_name_of(clk_name, sizeof(clk_name), dev->of_node);
62 clk = v4l2_clk_find(clk_name);
65 if (!IS_ERR(clk))
66 atomic_inc(&clk->use_count);
67 mutex_unlock(&clk_lock);
69 return clk;
71 EXPORT_SYMBOL(v4l2_clk_get);
73 void v4l2_clk_put(struct v4l2_clk *clk)
75 struct v4l2_clk *tmp;
77 if (IS_ERR(clk))
78 return;
80 if (clk->clk) {
81 clk_put(clk->clk);
82 kfree(clk);
83 return;
86 mutex_lock(&clk_lock);
88 list_for_each_entry(tmp, &clk_list, list)
89 if (tmp == clk)
90 atomic_dec(&clk->use_count);
92 mutex_unlock(&clk_lock);
94 EXPORT_SYMBOL(v4l2_clk_put);
96 static int v4l2_clk_lock_driver(struct v4l2_clk *clk)
98 struct v4l2_clk *tmp;
99 int ret = -ENODEV;
101 mutex_lock(&clk_lock);
103 list_for_each_entry(tmp, &clk_list, list)
104 if (tmp == clk) {
105 ret = !try_module_get(clk->ops->owner);
106 if (ret)
107 ret = -EFAULT;
108 break;
111 mutex_unlock(&clk_lock);
113 return ret;
116 static void v4l2_clk_unlock_driver(struct v4l2_clk *clk)
118 module_put(clk->ops->owner);
121 int v4l2_clk_enable(struct v4l2_clk *clk)
123 int ret;
125 if (clk->clk)
126 return clk_prepare_enable(clk->clk);
128 ret = v4l2_clk_lock_driver(clk);
129 if (ret < 0)
130 return ret;
132 mutex_lock(&clk->lock);
134 if (++clk->enable == 1 && clk->ops->enable) {
135 ret = clk->ops->enable(clk);
136 if (ret < 0)
137 clk->enable--;
140 mutex_unlock(&clk->lock);
142 return ret;
144 EXPORT_SYMBOL(v4l2_clk_enable);
147 * You might Oops if you try to disabled a disabled clock, because then the
148 * driver isn't locked and could have been unloaded by now, so, don't do that
150 void v4l2_clk_disable(struct v4l2_clk *clk)
152 int enable;
154 if (clk->clk)
155 return clk_disable_unprepare(clk->clk);
157 mutex_lock(&clk->lock);
159 enable = --clk->enable;
160 if (WARN(enable < 0, "Unbalanced %s() on %s!\n", __func__,
161 clk->dev_id))
162 clk->enable++;
163 else if (!enable && clk->ops->disable)
164 clk->ops->disable(clk);
166 mutex_unlock(&clk->lock);
168 v4l2_clk_unlock_driver(clk);
170 EXPORT_SYMBOL(v4l2_clk_disable);
172 unsigned long v4l2_clk_get_rate(struct v4l2_clk *clk)
174 int ret;
176 if (clk->clk)
177 return clk_get_rate(clk->clk);
179 ret = v4l2_clk_lock_driver(clk);
180 if (ret < 0)
181 return ret;
183 mutex_lock(&clk->lock);
184 if (!clk->ops->get_rate)
185 ret = -ENOSYS;
186 else
187 ret = clk->ops->get_rate(clk);
188 mutex_unlock(&clk->lock);
190 v4l2_clk_unlock_driver(clk);
192 return ret;
194 EXPORT_SYMBOL(v4l2_clk_get_rate);
196 int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate)
198 int ret;
200 if (clk->clk) {
201 long r = clk_round_rate(clk->clk, rate);
202 if (r < 0)
203 return r;
204 return clk_set_rate(clk->clk, r);
207 ret = v4l2_clk_lock_driver(clk);
209 if (ret < 0)
210 return ret;
212 mutex_lock(&clk->lock);
213 if (!clk->ops->set_rate)
214 ret = -ENOSYS;
215 else
216 ret = clk->ops->set_rate(clk, rate);
217 mutex_unlock(&clk->lock);
219 v4l2_clk_unlock_driver(clk);
221 return ret;
223 EXPORT_SYMBOL(v4l2_clk_set_rate);
225 struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
226 const char *dev_id,
227 void *priv)
229 struct v4l2_clk *clk;
230 int ret;
232 if (!ops || !dev_id)
233 return ERR_PTR(-EINVAL);
235 clk = kzalloc(sizeof(struct v4l2_clk), GFP_KERNEL);
236 if (!clk)
237 return ERR_PTR(-ENOMEM);
239 clk->dev_id = kstrdup(dev_id, GFP_KERNEL);
240 if (!clk->dev_id) {
241 ret = -ENOMEM;
242 goto ealloc;
244 clk->ops = ops;
245 clk->priv = priv;
246 atomic_set(&clk->use_count, 0);
247 mutex_init(&clk->lock);
249 mutex_lock(&clk_lock);
250 if (!IS_ERR(v4l2_clk_find(dev_id))) {
251 mutex_unlock(&clk_lock);
252 ret = -EEXIST;
253 goto eexist;
255 list_add_tail(&clk->list, &clk_list);
256 mutex_unlock(&clk_lock);
258 return clk;
260 eexist:
261 ealloc:
262 kfree(clk->dev_id);
263 kfree(clk);
264 return ERR_PTR(ret);
266 EXPORT_SYMBOL(v4l2_clk_register);
268 void v4l2_clk_unregister(struct v4l2_clk *clk)
270 if (WARN(atomic_read(&clk->use_count),
271 "%s(): Refusing to unregister ref-counted %s clock!\n",
272 __func__, clk->dev_id))
273 return;
275 mutex_lock(&clk_lock);
276 list_del(&clk->list);
277 mutex_unlock(&clk_lock);
279 kfree(clk->dev_id);
280 kfree(clk);
282 EXPORT_SYMBOL(v4l2_clk_unregister);
284 struct v4l2_clk_fixed {
285 unsigned long rate;
286 struct v4l2_clk_ops ops;
289 static unsigned long fixed_get_rate(struct v4l2_clk *clk)
291 struct v4l2_clk_fixed *priv = clk->priv;
292 return priv->rate;
295 struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
296 unsigned long rate, struct module *owner)
298 struct v4l2_clk *clk;
299 struct v4l2_clk_fixed *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
301 if (!priv)
302 return ERR_PTR(-ENOMEM);
304 priv->rate = rate;
305 priv->ops.get_rate = fixed_get_rate;
306 priv->ops.owner = owner;
308 clk = v4l2_clk_register(&priv->ops, dev_id, priv);
309 if (IS_ERR(clk))
310 kfree(priv);
312 return clk;
314 EXPORT_SYMBOL(__v4l2_clk_register_fixed);
316 void v4l2_clk_unregister_fixed(struct v4l2_clk *clk)
318 kfree(clk->priv);
319 v4l2_clk_unregister(clk);
321 EXPORT_SYMBOL(v4l2_clk_unregister_fixed);