hyperv: Remove recv_pkt_list and lock
[linux/fpc-iii.git] / drivers / gpu / host1x / bus.c
blobccdd2e6da5e3710a205e1b5dc3a9c667a9ed6bcf
1 /*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012-2013, NVIDIA Corporation
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/host1x.h>
19 #include <linux/of.h>
20 #include <linux/slab.h>
22 #include "bus.h"
23 #include "dev.h"
25 static DEFINE_MUTEX(clients_lock);
26 static LIST_HEAD(clients);
28 static DEFINE_MUTEX(drivers_lock);
29 static LIST_HEAD(drivers);
31 static DEFINE_MUTEX(devices_lock);
32 static LIST_HEAD(devices);
34 struct host1x_subdev {
35 struct host1x_client *client;
36 struct device_node *np;
37 struct list_head list;
40 /**
41 * host1x_subdev_add() - add a new subdevice with an associated device node
43 static int host1x_subdev_add(struct host1x_device *device,
44 struct device_node *np)
46 struct host1x_subdev *subdev;
48 subdev = kzalloc(sizeof(*subdev), GFP_KERNEL);
49 if (!subdev)
50 return -ENOMEM;
52 INIT_LIST_HEAD(&subdev->list);
53 subdev->np = of_node_get(np);
55 mutex_lock(&device->subdevs_lock);
56 list_add_tail(&subdev->list, &device->subdevs);
57 mutex_unlock(&device->subdevs_lock);
59 return 0;
62 /**
63 * host1x_subdev_del() - remove subdevice
65 static void host1x_subdev_del(struct host1x_subdev *subdev)
67 list_del(&subdev->list);
68 of_node_put(subdev->np);
69 kfree(subdev);
72 /**
73 * host1x_device_parse_dt() - scan device tree and add matching subdevices
75 static int host1x_device_parse_dt(struct host1x_device *device)
77 struct device_node *np;
78 int err;
80 for_each_child_of_node(device->dev.parent->of_node, np) {
81 if (of_match_node(device->driver->subdevs, np) &&
82 of_device_is_available(np)) {
83 err = host1x_subdev_add(device, np);
84 if (err < 0)
85 return err;
89 return 0;
92 static void host1x_subdev_register(struct host1x_device *device,
93 struct host1x_subdev *subdev,
94 struct host1x_client *client)
96 int err;
99 * Move the subdevice to the list of active (registered) subdevices
100 * and associate it with a client. At the same time, associate the
101 * client with its parent device.
103 mutex_lock(&device->subdevs_lock);
104 mutex_lock(&device->clients_lock);
105 list_move_tail(&client->list, &device->clients);
106 list_move_tail(&subdev->list, &device->active);
107 client->parent = &device->dev;
108 subdev->client = client;
109 mutex_unlock(&device->clients_lock);
110 mutex_unlock(&device->subdevs_lock);
113 * When all subdevices have been registered, the composite device is
114 * ready to be probed.
116 if (list_empty(&device->subdevs)) {
117 err = device->driver->probe(device);
118 if (err < 0)
119 dev_err(&device->dev, "probe failed: %d\n", err);
123 static void __host1x_subdev_unregister(struct host1x_device *device,
124 struct host1x_subdev *subdev)
126 struct host1x_client *client = subdev->client;
127 int err;
130 * If all subdevices have been activated, we're about to remove the
131 * first active subdevice, so unload the driver first.
133 if (list_empty(&device->subdevs)) {
134 err = device->driver->remove(device);
135 if (err < 0)
136 dev_err(&device->dev, "remove failed: %d\n", err);
140 * Move the subdevice back to the list of idle subdevices and remove
141 * it from list of clients.
143 mutex_lock(&device->clients_lock);
144 subdev->client = NULL;
145 client->parent = NULL;
146 list_move_tail(&subdev->list, &device->subdevs);
148 * XXX: Perhaps don't do this here, but rather explicitly remove it
149 * when the device is about to be deleted.
151 * This is somewhat complicated by the fact that this function is
152 * used to remove the subdevice when a client is unregistered but
153 * also when the composite device is about to be removed.
155 list_del_init(&client->list);
156 mutex_unlock(&device->clients_lock);
159 static void host1x_subdev_unregister(struct host1x_device *device,
160 struct host1x_subdev *subdev)
162 mutex_lock(&device->subdevs_lock);
163 __host1x_subdev_unregister(device, subdev);
164 mutex_unlock(&device->subdevs_lock);
167 int host1x_device_init(struct host1x_device *device)
169 struct host1x_client *client;
170 int err;
172 mutex_lock(&device->clients_lock);
174 list_for_each_entry(client, &device->clients, list) {
175 if (client->ops && client->ops->init) {
176 err = client->ops->init(client);
177 if (err < 0) {
178 dev_err(&device->dev,
179 "failed to initialize %s: %d\n",
180 dev_name(client->dev), err);
181 mutex_unlock(&device->clients_lock);
182 return err;
187 mutex_unlock(&device->clients_lock);
189 return 0;
191 EXPORT_SYMBOL(host1x_device_init);
193 int host1x_device_exit(struct host1x_device *device)
195 struct host1x_client *client;
196 int err;
198 mutex_lock(&device->clients_lock);
200 list_for_each_entry_reverse(client, &device->clients, list) {
201 if (client->ops && client->ops->exit) {
202 err = client->ops->exit(client);
203 if (err < 0) {
204 dev_err(&device->dev,
205 "failed to cleanup %s: %d\n",
206 dev_name(client->dev), err);
207 mutex_unlock(&device->clients_lock);
208 return err;
213 mutex_unlock(&device->clients_lock);
215 return 0;
217 EXPORT_SYMBOL(host1x_device_exit);
219 static int host1x_register_client(struct host1x *host1x,
220 struct host1x_client *client)
222 struct host1x_device *device;
223 struct host1x_subdev *subdev;
225 mutex_lock(&host1x->devices_lock);
227 list_for_each_entry(device, &host1x->devices, list) {
228 list_for_each_entry(subdev, &device->subdevs, list) {
229 if (subdev->np == client->dev->of_node) {
230 host1x_subdev_register(device, subdev, client);
231 mutex_unlock(&host1x->devices_lock);
232 return 0;
237 mutex_unlock(&host1x->devices_lock);
238 return -ENODEV;
241 static int host1x_unregister_client(struct host1x *host1x,
242 struct host1x_client *client)
244 struct host1x_device *device, *dt;
245 struct host1x_subdev *subdev;
247 mutex_lock(&host1x->devices_lock);
249 list_for_each_entry_safe(device, dt, &host1x->devices, list) {
250 list_for_each_entry(subdev, &device->active, list) {
251 if (subdev->client == client) {
252 host1x_subdev_unregister(device, subdev);
253 mutex_unlock(&host1x->devices_lock);
254 return 0;
259 mutex_unlock(&host1x->devices_lock);
260 return -ENODEV;
263 static struct bus_type host1x_bus_type = {
264 .name = "host1x",
267 int host1x_bus_init(void)
269 return bus_register(&host1x_bus_type);
272 void host1x_bus_exit(void)
274 bus_unregister(&host1x_bus_type);
277 static void host1x_device_release(struct device *dev)
279 struct host1x_device *device = to_host1x_device(dev);
281 kfree(device);
284 static int host1x_device_add(struct host1x *host1x,
285 struct host1x_driver *driver)
287 struct host1x_client *client, *tmp;
288 struct host1x_subdev *subdev;
289 struct host1x_device *device;
290 int err;
292 device = kzalloc(sizeof(*device), GFP_KERNEL);
293 if (!device)
294 return -ENOMEM;
296 mutex_init(&device->subdevs_lock);
297 INIT_LIST_HEAD(&device->subdevs);
298 INIT_LIST_HEAD(&device->active);
299 mutex_init(&device->clients_lock);
300 INIT_LIST_HEAD(&device->clients);
301 INIT_LIST_HEAD(&device->list);
302 device->driver = driver;
304 device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
305 device->dev.dma_mask = &device->dev.coherent_dma_mask;
306 device->dev.release = host1x_device_release;
307 dev_set_name(&device->dev, "%s", driver->name);
308 device->dev.bus = &host1x_bus_type;
309 device->dev.parent = host1x->dev;
311 err = device_register(&device->dev);
312 if (err < 0)
313 return err;
315 err = host1x_device_parse_dt(device);
316 if (err < 0) {
317 device_unregister(&device->dev);
318 return err;
321 mutex_lock(&host1x->devices_lock);
322 list_add_tail(&device->list, &host1x->devices);
323 mutex_unlock(&host1x->devices_lock);
325 mutex_lock(&clients_lock);
327 list_for_each_entry_safe(client, tmp, &clients, list) {
328 list_for_each_entry(subdev, &device->subdevs, list) {
329 if (subdev->np == client->dev->of_node) {
330 host1x_subdev_register(device, subdev, client);
331 break;
336 mutex_unlock(&clients_lock);
338 return 0;
342 * Removes a device by first unregistering any subdevices and then removing
343 * itself from the list of devices.
345 * This function must be called with the host1x->devices_lock held.
347 static void host1x_device_del(struct host1x *host1x,
348 struct host1x_device *device)
350 struct host1x_subdev *subdev, *sd;
351 struct host1x_client *client, *cl;
353 mutex_lock(&device->subdevs_lock);
355 /* unregister subdevices */
356 list_for_each_entry_safe(subdev, sd, &device->active, list) {
358 * host1x_subdev_unregister() will remove the client from
359 * any lists, so we'll need to manually add it back to the
360 * list of idle clients.
362 * XXX: Alternatively, perhaps don't remove the client from
363 * any lists in host1x_subdev_unregister() and instead do
364 * that explicitly from host1x_unregister_client()?
366 client = subdev->client;
368 __host1x_subdev_unregister(device, subdev);
370 /* add the client to the list of idle clients */
371 mutex_lock(&clients_lock);
372 list_add_tail(&client->list, &clients);
373 mutex_unlock(&clients_lock);
376 /* remove subdevices */
377 list_for_each_entry_safe(subdev, sd, &device->subdevs, list)
378 host1x_subdev_del(subdev);
380 mutex_unlock(&device->subdevs_lock);
382 /* move clients to idle list */
383 mutex_lock(&clients_lock);
384 mutex_lock(&device->clients_lock);
386 list_for_each_entry_safe(client, cl, &device->clients, list)
387 list_move_tail(&client->list, &clients);
389 mutex_unlock(&device->clients_lock);
390 mutex_unlock(&clients_lock);
392 /* finally remove the device */
393 list_del_init(&device->list);
394 device_unregister(&device->dev);
397 static void host1x_attach_driver(struct host1x *host1x,
398 struct host1x_driver *driver)
400 struct host1x_device *device;
401 int err;
403 mutex_lock(&host1x->devices_lock);
405 list_for_each_entry(device, &host1x->devices, list) {
406 if (device->driver == driver) {
407 mutex_unlock(&host1x->devices_lock);
408 return;
412 mutex_unlock(&host1x->devices_lock);
414 err = host1x_device_add(host1x, driver);
415 if (err < 0)
416 dev_err(host1x->dev, "failed to allocate device: %d\n", err);
419 static void host1x_detach_driver(struct host1x *host1x,
420 struct host1x_driver *driver)
422 struct host1x_device *device, *tmp;
424 mutex_lock(&host1x->devices_lock);
426 list_for_each_entry_safe(device, tmp, &host1x->devices, list)
427 if (device->driver == driver)
428 host1x_device_del(host1x, device);
430 mutex_unlock(&host1x->devices_lock);
433 int host1x_register(struct host1x *host1x)
435 struct host1x_driver *driver;
437 mutex_lock(&devices_lock);
438 list_add_tail(&host1x->list, &devices);
439 mutex_unlock(&devices_lock);
441 mutex_lock(&drivers_lock);
443 list_for_each_entry(driver, &drivers, list)
444 host1x_attach_driver(host1x, driver);
446 mutex_unlock(&drivers_lock);
448 return 0;
451 int host1x_unregister(struct host1x *host1x)
453 struct host1x_driver *driver;
455 mutex_lock(&drivers_lock);
457 list_for_each_entry(driver, &drivers, list)
458 host1x_detach_driver(host1x, driver);
460 mutex_unlock(&drivers_lock);
462 mutex_lock(&devices_lock);
463 list_del_init(&host1x->list);
464 mutex_unlock(&devices_lock);
466 return 0;
469 int host1x_driver_register(struct host1x_driver *driver)
471 struct host1x *host1x;
473 INIT_LIST_HEAD(&driver->list);
475 mutex_lock(&drivers_lock);
476 list_add_tail(&driver->list, &drivers);
477 mutex_unlock(&drivers_lock);
479 mutex_lock(&devices_lock);
481 list_for_each_entry(host1x, &devices, list)
482 host1x_attach_driver(host1x, driver);
484 mutex_unlock(&devices_lock);
486 return 0;
488 EXPORT_SYMBOL(host1x_driver_register);
490 void host1x_driver_unregister(struct host1x_driver *driver)
492 mutex_lock(&drivers_lock);
493 list_del_init(&driver->list);
494 mutex_unlock(&drivers_lock);
496 EXPORT_SYMBOL(host1x_driver_unregister);
498 int host1x_client_register(struct host1x_client *client)
500 struct host1x *host1x;
501 int err;
503 mutex_lock(&devices_lock);
505 list_for_each_entry(host1x, &devices, list) {
506 err = host1x_register_client(host1x, client);
507 if (!err) {
508 mutex_unlock(&devices_lock);
509 return 0;
513 mutex_unlock(&devices_lock);
515 mutex_lock(&clients_lock);
516 list_add_tail(&client->list, &clients);
517 mutex_unlock(&clients_lock);
519 return 0;
521 EXPORT_SYMBOL(host1x_client_register);
523 int host1x_client_unregister(struct host1x_client *client)
525 struct host1x_client *c;
526 struct host1x *host1x;
527 int err;
529 mutex_lock(&devices_lock);
531 list_for_each_entry(host1x, &devices, list) {
532 err = host1x_unregister_client(host1x, client);
533 if (!err) {
534 mutex_unlock(&devices_lock);
535 return 0;
539 mutex_unlock(&devices_lock);
540 mutex_lock(&clients_lock);
542 list_for_each_entry(c, &clients, list) {
543 if (c == client) {
544 list_del_init(&c->list);
545 break;
549 mutex_unlock(&clients_lock);
551 return 0;
553 EXPORT_SYMBOL(host1x_client_unregister);