IB/uverbs: Fix device cleanup
[linux/fpc-iii.git] / drivers / staging / greybus / connection.c
blob2cf64640e8ec55367dea4802233e05a70e0a3e6f
1 /*
2 * Greybus connections
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
7 * Released under the GPLv2 only.
8 */
10 #include <linux/workqueue.h>
12 #include "greybus.h"
13 #include "greybus_trace.h"
16 #define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000
19 static void gb_connection_kref_release(struct kref *kref);
22 static DEFINE_SPINLOCK(gb_connections_lock);
23 static DEFINE_MUTEX(gb_connection_mutex);
26 /* Caller holds gb_connection_mutex. */
27 static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
29 struct gb_host_device *hd = intf->hd;
30 struct gb_connection *connection;
32 list_for_each_entry(connection, &hd->connections, hd_links) {
33 if (connection->intf == intf &&
34 connection->intf_cport_id == cport_id)
35 return true;
38 return false;
41 static void gb_connection_get(struct gb_connection *connection)
43 kref_get(&connection->kref);
45 trace_gb_connection_get(connection);
48 static void gb_connection_put(struct gb_connection *connection)
50 trace_gb_connection_put(connection);
52 kref_put(&connection->kref, gb_connection_kref_release);
56 * Returns a reference-counted pointer to the connection if found.
58 static struct gb_connection *
59 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
61 struct gb_connection *connection;
62 unsigned long flags;
64 spin_lock_irqsave(&gb_connections_lock, flags);
65 list_for_each_entry(connection, &hd->connections, hd_links)
66 if (connection->hd_cport_id == cport_id) {
67 gb_connection_get(connection);
68 goto found;
70 connection = NULL;
71 found:
72 spin_unlock_irqrestore(&gb_connections_lock, flags);
74 return connection;
78 * Callback from the host driver to let us know that data has been
79 * received on the bundle.
81 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
82 u8 *data, size_t length)
84 struct gb_connection *connection;
86 trace_gb_hd_in(hd);
88 connection = gb_connection_hd_find(hd, cport_id);
89 if (!connection) {
90 dev_err(&hd->dev,
91 "nonexistent connection (%zu bytes dropped)\n", length);
92 return;
94 gb_connection_recv(connection, data, length);
95 gb_connection_put(connection);
97 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
99 static void gb_connection_kref_release(struct kref *kref)
101 struct gb_connection *connection;
103 connection = container_of(kref, struct gb_connection, kref);
105 trace_gb_connection_release(connection);
107 kfree(connection);
110 static void gb_connection_init_name(struct gb_connection *connection)
112 u16 hd_cport_id = connection->hd_cport_id;
113 u16 cport_id = 0;
114 u8 intf_id = 0;
116 if (connection->intf) {
117 intf_id = connection->intf->interface_id;
118 cport_id = connection->intf_cport_id;
121 snprintf(connection->name, sizeof(connection->name),
122 "%u/%u:%u", hd_cport_id, intf_id, cport_id);
126 * _gb_connection_create() - create a Greybus connection
127 * @hd: host device of the connection
128 * @hd_cport_id: host-device cport id, or -1 for dynamic allocation
129 * @intf: remote interface, or NULL for static connections
130 * @bundle: remote-interface bundle (may be NULL)
131 * @cport_id: remote-interface cport id, or 0 for static connections
132 * @handler: request handler (may be NULL)
133 * @flags: connection flags
135 * Create a Greybus connection, representing the bidirectional link
136 * between a CPort on a (local) Greybus host device and a CPort on
137 * another Greybus interface.
139 * A connection also maintains the state of operations sent over the
140 * connection.
142 * Serialised against concurrent create and destroy using the
143 * gb_connection_mutex.
145 * Return: A pointer to the new connection if successful, or an ERR_PTR
146 * otherwise.
148 static struct gb_connection *
149 _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
150 struct gb_interface *intf,
151 struct gb_bundle *bundle, int cport_id,
152 gb_request_handler_t handler,
153 unsigned long flags)
155 struct gb_connection *connection;
156 int ret;
158 mutex_lock(&gb_connection_mutex);
160 if (intf && gb_connection_cport_in_use(intf, cport_id)) {
161 dev_err(&intf->dev, "cport %u already in use\n", cport_id);
162 ret = -EBUSY;
163 goto err_unlock;
166 ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
167 if (ret < 0) {
168 dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
169 goto err_unlock;
171 hd_cport_id = ret;
173 connection = kzalloc(sizeof(*connection), GFP_KERNEL);
174 if (!connection) {
175 ret = -ENOMEM;
176 goto err_hd_cport_release;
179 connection->hd_cport_id = hd_cport_id;
180 connection->intf_cport_id = cport_id;
181 connection->hd = hd;
182 connection->intf = intf;
183 connection->bundle = bundle;
184 connection->handler = handler;
185 connection->flags = flags;
186 if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
187 connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
188 connection->state = GB_CONNECTION_STATE_DISABLED;
190 atomic_set(&connection->op_cycle, 0);
191 mutex_init(&connection->mutex);
192 spin_lock_init(&connection->lock);
193 INIT_LIST_HEAD(&connection->operations);
195 connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
196 dev_name(&hd->dev), hd_cport_id);
197 if (!connection->wq) {
198 ret = -ENOMEM;
199 goto err_free_connection;
202 kref_init(&connection->kref);
204 gb_connection_init_name(connection);
206 spin_lock_irq(&gb_connections_lock);
207 list_add(&connection->hd_links, &hd->connections);
209 if (bundle)
210 list_add(&connection->bundle_links, &bundle->connections);
211 else
212 INIT_LIST_HEAD(&connection->bundle_links);
214 spin_unlock_irq(&gb_connections_lock);
216 mutex_unlock(&gb_connection_mutex);
218 trace_gb_connection_create(connection);
220 return connection;
222 err_free_connection:
223 kfree(connection);
224 err_hd_cport_release:
225 gb_hd_cport_release(hd, hd_cport_id);
226 err_unlock:
227 mutex_unlock(&gb_connection_mutex);
229 return ERR_PTR(ret);
232 struct gb_connection *
233 gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
234 gb_request_handler_t handler)
236 return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
237 GB_CONNECTION_FLAG_HIGH_PRIO);
240 struct gb_connection *
241 gb_connection_create_control(struct gb_interface *intf)
243 return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
244 GB_CONNECTION_FLAG_CONTROL |
245 GB_CONNECTION_FLAG_HIGH_PRIO);
248 struct gb_connection *
249 gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
250 gb_request_handler_t handler)
252 struct gb_interface *intf = bundle->intf;
254 return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
255 handler, 0);
257 EXPORT_SYMBOL_GPL(gb_connection_create);
259 struct gb_connection *
260 gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
261 gb_request_handler_t handler,
262 unsigned long flags)
264 struct gb_interface *intf = bundle->intf;
266 if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK))
267 flags &= ~GB_CONNECTION_FLAG_CORE_MASK;
269 return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
270 handler, flags);
272 EXPORT_SYMBOL_GPL(gb_connection_create_flags);
274 struct gb_connection *
275 gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
276 unsigned long flags)
278 flags |= GB_CONNECTION_FLAG_OFFLOADED;
280 return gb_connection_create_flags(bundle, cport_id, NULL, flags);
282 EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
284 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
286 struct gb_host_device *hd = connection->hd;
287 int ret;
289 if (!hd->driver->cport_enable)
290 return 0;
292 ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
293 connection->flags);
294 if (ret) {
295 dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
296 connection->name, ret);
297 return ret;
300 return 0;
303 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
305 struct gb_host_device *hd = connection->hd;
306 int ret;
308 if (!hd->driver->cport_disable)
309 return;
311 ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
312 if (ret) {
313 dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
314 connection->name, ret);
318 static int gb_connection_hd_cport_connected(struct gb_connection *connection)
320 struct gb_host_device *hd = connection->hd;
321 int ret;
323 if (!hd->driver->cport_connected)
324 return 0;
326 ret = hd->driver->cport_connected(hd, connection->hd_cport_id);
327 if (ret) {
328 dev_err(&hd->dev, "%s: failed to set connected state: %d\n",
329 connection->name, ret);
330 return ret;
333 return 0;
336 static int gb_connection_hd_cport_flush(struct gb_connection *connection)
338 struct gb_host_device *hd = connection->hd;
339 int ret;
341 if (!hd->driver->cport_flush)
342 return 0;
344 ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
345 if (ret) {
346 dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
347 connection->name, ret);
348 return ret;
351 return 0;
354 static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
356 struct gb_host_device *hd = connection->hd;
357 size_t peer_space;
358 int ret;
360 if (!hd->driver->cport_quiesce)
361 return 0;
363 peer_space = sizeof(struct gb_operation_msg_hdr) +
364 sizeof(struct gb_cport_shutdown_request);
366 if (connection->mode_switch)
367 peer_space += sizeof(struct gb_operation_msg_hdr);
369 if (!hd->driver->cport_quiesce)
370 return 0;
372 ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
373 peer_space,
374 GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
375 if (ret) {
376 dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n",
377 connection->name, ret);
378 return ret;
381 return 0;
384 static int gb_connection_hd_cport_clear(struct gb_connection *connection)
386 struct gb_host_device *hd = connection->hd;
387 int ret;
389 if (!hd->driver->cport_clear)
390 return 0;
392 ret = hd->driver->cport_clear(hd, connection->hd_cport_id);
393 if (ret) {
394 dev_err(&hd->dev, "%s: failed to clear host cport: %d\n",
395 connection->name, ret);
396 return ret;
399 return 0;
403 * Request the SVC to create a connection from AP's cport to interface's
404 * cport.
406 static int
407 gb_connection_svc_connection_create(struct gb_connection *connection)
409 struct gb_host_device *hd = connection->hd;
410 struct gb_interface *intf;
411 u8 cport_flags;
412 int ret;
414 if (gb_connection_is_static(connection))
415 return 0;
417 intf = connection->intf;
420 * Enable either E2EFC or CSD, unless no flow control is requested.
422 cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
423 if (gb_connection_flow_control_disabled(connection)) {
424 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
425 } else if (gb_connection_e2efc_enabled(connection)) {
426 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
427 GB_SVC_CPORT_FLAG_E2EFC;
430 ret = gb_svc_connection_create(hd->svc,
431 hd->svc->ap_intf_id,
432 connection->hd_cport_id,
433 intf->interface_id,
434 connection->intf_cport_id,
435 cport_flags);
436 if (ret) {
437 dev_err(&connection->hd->dev,
438 "%s: failed to create svc connection: %d\n",
439 connection->name, ret);
440 return ret;
443 return 0;
446 static void
447 gb_connection_svc_connection_destroy(struct gb_connection *connection)
449 if (gb_connection_is_static(connection))
450 return;
452 gb_svc_connection_destroy(connection->hd->svc,
453 connection->hd->svc->ap_intf_id,
454 connection->hd_cport_id,
455 connection->intf->interface_id,
456 connection->intf_cport_id);
459 /* Inform Interface about active CPorts */
460 static int gb_connection_control_connected(struct gb_connection *connection)
462 struct gb_control *control;
463 u16 cport_id = connection->intf_cport_id;
464 int ret;
466 if (gb_connection_is_static(connection))
467 return 0;
469 if (gb_connection_is_control(connection))
470 return 0;
472 control = connection->intf->control;
474 ret = gb_control_connected_operation(control, cport_id);
475 if (ret) {
476 dev_err(&connection->bundle->dev,
477 "failed to connect cport: %d\n", ret);
478 return ret;
481 return 0;
484 static void
485 gb_connection_control_disconnecting(struct gb_connection *connection)
487 struct gb_control *control;
488 u16 cport_id = connection->intf_cport_id;
489 int ret;
491 if (gb_connection_is_static(connection))
492 return;
494 control = connection->intf->control;
496 ret = gb_control_disconnecting_operation(control, cport_id);
497 if (ret) {
498 dev_err(&connection->hd->dev,
499 "%s: failed to send disconnecting: %d\n",
500 connection->name, ret);
504 static void
505 gb_connection_control_disconnected(struct gb_connection *connection)
507 struct gb_control *control;
508 u16 cport_id = connection->intf_cport_id;
509 int ret;
511 if (gb_connection_is_static(connection))
512 return;
514 control = connection->intf->control;
516 if (gb_connection_is_control(connection)) {
517 if (connection->mode_switch) {
518 ret = gb_control_mode_switch_operation(control);
519 if (ret) {
521 * Allow mode switch to time out waiting for
522 * mailbox event.
524 return;
528 return;
531 ret = gb_control_disconnected_operation(control, cport_id);
532 if (ret) {
533 dev_warn(&connection->bundle->dev,
534 "failed to disconnect cport: %d\n", ret);
538 static int gb_connection_shutdown_operation(struct gb_connection *connection,
539 u8 phase)
541 struct gb_cport_shutdown_request *req;
542 struct gb_operation *operation;
543 int ret;
545 operation = gb_operation_create_core(connection,
546 GB_REQUEST_TYPE_CPORT_SHUTDOWN,
547 sizeof(*req), 0, 0,
548 GFP_KERNEL);
549 if (!operation)
550 return -ENOMEM;
552 req = operation->request->payload;
553 req->phase = phase;
555 ret = gb_operation_request_send_sync(operation);
557 gb_operation_put(operation);
559 return ret;
562 static int gb_connection_cport_shutdown(struct gb_connection *connection,
563 u8 phase)
565 struct gb_host_device *hd = connection->hd;
566 const struct gb_hd_driver *drv = hd->driver;
567 int ret;
569 if (gb_connection_is_static(connection))
570 return 0;
572 if (gb_connection_is_offloaded(connection)) {
573 if (!drv->cport_shutdown)
574 return 0;
576 ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase,
577 GB_OPERATION_TIMEOUT_DEFAULT);
578 } else {
579 ret = gb_connection_shutdown_operation(connection, phase);
582 if (ret) {
583 dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n",
584 connection->name, phase, ret);
585 return ret;
588 return 0;
591 static int
592 gb_connection_cport_shutdown_phase_1(struct gb_connection *connection)
594 return gb_connection_cport_shutdown(connection, 1);
597 static int
598 gb_connection_cport_shutdown_phase_2(struct gb_connection *connection)
600 return gb_connection_cport_shutdown(connection, 2);
604 * Cancel all active operations on a connection.
606 * Locking: Called with connection lock held and state set to DISABLED or
607 * DISCONNECTING.
609 static void gb_connection_cancel_operations(struct gb_connection *connection,
610 int errno)
611 __must_hold(&connection->lock)
613 struct gb_operation *operation;
615 while (!list_empty(&connection->operations)) {
616 operation = list_last_entry(&connection->operations,
617 struct gb_operation, links);
618 gb_operation_get(operation);
619 spin_unlock_irq(&connection->lock);
621 if (gb_operation_is_incoming(operation))
622 gb_operation_cancel_incoming(operation, errno);
623 else
624 gb_operation_cancel(operation, errno);
626 gb_operation_put(operation);
628 spin_lock_irq(&connection->lock);
633 * Cancel all active incoming operations on a connection.
635 * Locking: Called with connection lock held and state set to ENABLED_TX.
637 static void
638 gb_connection_flush_incoming_operations(struct gb_connection *connection,
639 int errno)
640 __must_hold(&connection->lock)
642 struct gb_operation *operation;
643 bool incoming;
645 while (!list_empty(&connection->operations)) {
646 incoming = false;
647 list_for_each_entry(operation, &connection->operations,
648 links) {
649 if (gb_operation_is_incoming(operation)) {
650 gb_operation_get(operation);
651 incoming = true;
652 break;
656 if (!incoming)
657 break;
659 spin_unlock_irq(&connection->lock);
661 /* FIXME: flush, not cancel? */
662 gb_operation_cancel_incoming(operation, errno);
663 gb_operation_put(operation);
665 spin_lock_irq(&connection->lock);
670 * _gb_connection_enable() - enable a connection
671 * @connection: connection to enable
672 * @rx: whether to enable incoming requests
674 * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
675 * ENABLED_TX->ENABLED state transitions.
677 * Locking: Caller holds connection->mutex.
679 static int _gb_connection_enable(struct gb_connection *connection, bool rx)
681 int ret;
683 /* Handle ENABLED_TX -> ENABLED transitions. */
684 if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
685 if (!(connection->handler && rx))
686 return 0;
688 spin_lock_irq(&connection->lock);
689 connection->state = GB_CONNECTION_STATE_ENABLED;
690 spin_unlock_irq(&connection->lock);
692 return 0;
695 ret = gb_connection_hd_cport_enable(connection);
696 if (ret)
697 return ret;
699 ret = gb_connection_svc_connection_create(connection);
700 if (ret)
701 goto err_hd_cport_clear;
703 ret = gb_connection_hd_cport_connected(connection);
704 if (ret)
705 goto err_svc_connection_destroy;
707 spin_lock_irq(&connection->lock);
708 if (connection->handler && rx)
709 connection->state = GB_CONNECTION_STATE_ENABLED;
710 else
711 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
712 spin_unlock_irq(&connection->lock);
714 ret = gb_connection_control_connected(connection);
715 if (ret)
716 goto err_control_disconnecting;
718 return 0;
720 err_control_disconnecting:
721 spin_lock_irq(&connection->lock);
722 connection->state = GB_CONNECTION_STATE_DISCONNECTING;
723 gb_connection_cancel_operations(connection, -ESHUTDOWN);
724 spin_unlock_irq(&connection->lock);
726 /* Transmit queue should already be empty. */
727 gb_connection_hd_cport_flush(connection);
729 gb_connection_control_disconnecting(connection);
730 gb_connection_cport_shutdown_phase_1(connection);
731 gb_connection_hd_cport_quiesce(connection);
732 gb_connection_cport_shutdown_phase_2(connection);
733 gb_connection_control_disconnected(connection);
734 connection->state = GB_CONNECTION_STATE_DISABLED;
735 err_svc_connection_destroy:
736 gb_connection_svc_connection_destroy(connection);
737 err_hd_cport_clear:
738 gb_connection_hd_cport_clear(connection);
740 gb_connection_hd_cport_disable(connection);
742 return ret;
745 int gb_connection_enable(struct gb_connection *connection)
747 int ret = 0;
749 mutex_lock(&connection->mutex);
751 if (connection->state == GB_CONNECTION_STATE_ENABLED)
752 goto out_unlock;
754 ret = _gb_connection_enable(connection, true);
755 if (!ret)
756 trace_gb_connection_enable(connection);
758 out_unlock:
759 mutex_unlock(&connection->mutex);
761 return ret;
763 EXPORT_SYMBOL_GPL(gb_connection_enable);
765 int gb_connection_enable_tx(struct gb_connection *connection)
767 int ret = 0;
769 mutex_lock(&connection->mutex);
771 if (connection->state == GB_CONNECTION_STATE_ENABLED) {
772 ret = -EINVAL;
773 goto out_unlock;
776 if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
777 goto out_unlock;
779 ret = _gb_connection_enable(connection, false);
780 if (!ret)
781 trace_gb_connection_enable(connection);
783 out_unlock:
784 mutex_unlock(&connection->mutex);
786 return ret;
788 EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
790 void gb_connection_disable_rx(struct gb_connection *connection)
792 mutex_lock(&connection->mutex);
794 spin_lock_irq(&connection->lock);
795 if (connection->state != GB_CONNECTION_STATE_ENABLED) {
796 spin_unlock_irq(&connection->lock);
797 goto out_unlock;
799 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
800 gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
801 spin_unlock_irq(&connection->lock);
803 trace_gb_connection_disable(connection);
805 out_unlock:
806 mutex_unlock(&connection->mutex);
808 EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
810 void gb_connection_mode_switch_prepare(struct gb_connection *connection)
812 connection->mode_switch = true;
815 void gb_connection_mode_switch_complete(struct gb_connection *connection)
817 gb_connection_svc_connection_destroy(connection);
818 gb_connection_hd_cport_clear(connection);
820 gb_connection_hd_cport_disable(connection);
822 connection->mode_switch = false;
825 void gb_connection_disable(struct gb_connection *connection)
827 mutex_lock(&connection->mutex);
829 if (connection->state == GB_CONNECTION_STATE_DISABLED)
830 goto out_unlock;
832 trace_gb_connection_disable(connection);
834 spin_lock_irq(&connection->lock);
835 connection->state = GB_CONNECTION_STATE_DISCONNECTING;
836 gb_connection_cancel_operations(connection, -ESHUTDOWN);
837 spin_unlock_irq(&connection->lock);
839 gb_connection_hd_cport_flush(connection);
841 gb_connection_control_disconnecting(connection);
842 gb_connection_cport_shutdown_phase_1(connection);
843 gb_connection_hd_cport_quiesce(connection);
844 gb_connection_cport_shutdown_phase_2(connection);
845 gb_connection_control_disconnected(connection);
847 connection->state = GB_CONNECTION_STATE_DISABLED;
849 /* control-connection tear down is deferred when mode switching */
850 if (!connection->mode_switch) {
851 gb_connection_svc_connection_destroy(connection);
852 gb_connection_hd_cport_clear(connection);
854 gb_connection_hd_cport_disable(connection);
857 out_unlock:
858 mutex_unlock(&connection->mutex);
860 EXPORT_SYMBOL_GPL(gb_connection_disable);
862 /* Disable a connection without communicating with the remote end. */
863 void gb_connection_disable_forced(struct gb_connection *connection)
865 mutex_lock(&connection->mutex);
867 if (connection->state == GB_CONNECTION_STATE_DISABLED)
868 goto out_unlock;
870 trace_gb_connection_disable(connection);
872 spin_lock_irq(&connection->lock);
873 connection->state = GB_CONNECTION_STATE_DISABLED;
874 gb_connection_cancel_operations(connection, -ESHUTDOWN);
875 spin_unlock_irq(&connection->lock);
877 gb_connection_hd_cport_flush(connection);
879 gb_connection_svc_connection_destroy(connection);
880 gb_connection_hd_cport_clear(connection);
882 gb_connection_hd_cport_disable(connection);
883 out_unlock:
884 mutex_unlock(&connection->mutex);
886 EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
888 /* Caller must have disabled the connection before destroying it. */
889 void gb_connection_destroy(struct gb_connection *connection)
891 if (!connection)
892 return;
894 if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
895 gb_connection_disable(connection);
897 mutex_lock(&gb_connection_mutex);
899 spin_lock_irq(&gb_connections_lock);
900 list_del(&connection->bundle_links);
901 list_del(&connection->hd_links);
902 spin_unlock_irq(&gb_connections_lock);
904 destroy_workqueue(connection->wq);
906 gb_hd_cport_release(connection->hd, connection->hd_cport_id);
907 connection->hd_cport_id = CPORT_ID_BAD;
909 mutex_unlock(&gb_connection_mutex);
911 gb_connection_put(connection);
913 EXPORT_SYMBOL_GPL(gb_connection_destroy);
915 void gb_connection_latency_tag_enable(struct gb_connection *connection)
917 struct gb_host_device *hd = connection->hd;
918 int ret;
920 if (!hd->driver->latency_tag_enable)
921 return;
923 ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
924 if (ret) {
925 dev_err(&connection->hd->dev,
926 "%s: failed to enable latency tag: %d\n",
927 connection->name, ret);
930 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
932 void gb_connection_latency_tag_disable(struct gb_connection *connection)
934 struct gb_host_device *hd = connection->hd;
935 int ret;
937 if (!hd->driver->latency_tag_disable)
938 return;
940 ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
941 if (ret) {
942 dev_err(&connection->hd->dev,
943 "%s: failed to disable latency tag: %d\n",
944 connection->name, ret);
947 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);