4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
7 * Released under the GPLv2 only.
10 #include <linux/workqueue.h>
13 #include "greybus_trace.h"
16 #define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000
19 static void gb_connection_kref_release(struct kref
*kref
);
22 static DEFINE_SPINLOCK(gb_connections_lock
);
23 static DEFINE_MUTEX(gb_connection_mutex
);
26 /* Caller holds gb_connection_mutex. */
27 static bool gb_connection_cport_in_use(struct gb_interface
*intf
, u16 cport_id
)
29 struct gb_host_device
*hd
= intf
->hd
;
30 struct gb_connection
*connection
;
32 list_for_each_entry(connection
, &hd
->connections
, hd_links
) {
33 if (connection
->intf
== intf
&&
34 connection
->intf_cport_id
== cport_id
)
41 static void gb_connection_get(struct gb_connection
*connection
)
43 kref_get(&connection
->kref
);
45 trace_gb_connection_get(connection
);
48 static void gb_connection_put(struct gb_connection
*connection
)
50 trace_gb_connection_put(connection
);
52 kref_put(&connection
->kref
, gb_connection_kref_release
);
56 * Returns a reference-counted pointer to the connection if found.
58 static struct gb_connection
*
59 gb_connection_hd_find(struct gb_host_device
*hd
, u16 cport_id
)
61 struct gb_connection
*connection
;
64 spin_lock_irqsave(&gb_connections_lock
, flags
);
65 list_for_each_entry(connection
, &hd
->connections
, hd_links
)
66 if (connection
->hd_cport_id
== cport_id
) {
67 gb_connection_get(connection
);
72 spin_unlock_irqrestore(&gb_connections_lock
, flags
);
78 * Callback from the host driver to let us know that data has been
79 * received on the bundle.
81 void greybus_data_rcvd(struct gb_host_device
*hd
, u16 cport_id
,
82 u8
*data
, size_t length
)
84 struct gb_connection
*connection
;
88 connection
= gb_connection_hd_find(hd
, cport_id
);
91 "nonexistent connection (%zu bytes dropped)\n", length
);
94 gb_connection_recv(connection
, data
, length
);
95 gb_connection_put(connection
);
97 EXPORT_SYMBOL_GPL(greybus_data_rcvd
);
99 static void gb_connection_kref_release(struct kref
*kref
)
101 struct gb_connection
*connection
;
103 connection
= container_of(kref
, struct gb_connection
, kref
);
105 trace_gb_connection_release(connection
);
110 static void gb_connection_init_name(struct gb_connection
*connection
)
112 u16 hd_cport_id
= connection
->hd_cport_id
;
116 if (connection
->intf
) {
117 intf_id
= connection
->intf
->interface_id
;
118 cport_id
= connection
->intf_cport_id
;
121 snprintf(connection
->name
, sizeof(connection
->name
),
122 "%u/%u:%u", hd_cport_id
, intf_id
, cport_id
);
126 * _gb_connection_create() - create a Greybus connection
127 * @hd: host device of the connection
128 * @hd_cport_id: host-device cport id, or -1 for dynamic allocation
129 * @intf: remote interface, or NULL for static connections
130 * @bundle: remote-interface bundle (may be NULL)
131 * @cport_id: remote-interface cport id, or 0 for static connections
132 * @handler: request handler (may be NULL)
133 * @flags: connection flags
135 * Create a Greybus connection, representing the bidirectional link
136 * between a CPort on a (local) Greybus host device and a CPort on
137 * another Greybus interface.
139 * A connection also maintains the state of operations sent over the
142 * Serialised against concurrent create and destroy using the
143 * gb_connection_mutex.
145 * Return: A pointer to the new connection if successful, or an ERR_PTR
148 static struct gb_connection
*
149 _gb_connection_create(struct gb_host_device
*hd
, int hd_cport_id
,
150 struct gb_interface
*intf
,
151 struct gb_bundle
*bundle
, int cport_id
,
152 gb_request_handler_t handler
,
155 struct gb_connection
*connection
;
158 mutex_lock(&gb_connection_mutex
);
160 if (intf
&& gb_connection_cport_in_use(intf
, cport_id
)) {
161 dev_err(&intf
->dev
, "cport %u already in use\n", cport_id
);
166 ret
= gb_hd_cport_allocate(hd
, hd_cport_id
, flags
);
168 dev_err(&hd
->dev
, "failed to allocate cport: %d\n", ret
);
173 connection
= kzalloc(sizeof(*connection
), GFP_KERNEL
);
176 goto err_hd_cport_release
;
179 connection
->hd_cport_id
= hd_cport_id
;
180 connection
->intf_cport_id
= cport_id
;
182 connection
->intf
= intf
;
183 connection
->bundle
= bundle
;
184 connection
->handler
= handler
;
185 connection
->flags
= flags
;
186 if (intf
&& (intf
->quirks
& GB_INTERFACE_QUIRK_NO_CPORT_FEATURES
))
187 connection
->flags
|= GB_CONNECTION_FLAG_NO_FLOWCTRL
;
188 connection
->state
= GB_CONNECTION_STATE_DISABLED
;
190 atomic_set(&connection
->op_cycle
, 0);
191 mutex_init(&connection
->mutex
);
192 spin_lock_init(&connection
->lock
);
193 INIT_LIST_HEAD(&connection
->operations
);
195 connection
->wq
= alloc_workqueue("%s:%d", WQ_UNBOUND
, 1,
196 dev_name(&hd
->dev
), hd_cport_id
);
197 if (!connection
->wq
) {
199 goto err_free_connection
;
202 kref_init(&connection
->kref
);
204 gb_connection_init_name(connection
);
206 spin_lock_irq(&gb_connections_lock
);
207 list_add(&connection
->hd_links
, &hd
->connections
);
210 list_add(&connection
->bundle_links
, &bundle
->connections
);
212 INIT_LIST_HEAD(&connection
->bundle_links
);
214 spin_unlock_irq(&gb_connections_lock
);
216 mutex_unlock(&gb_connection_mutex
);
218 trace_gb_connection_create(connection
);
224 err_hd_cport_release
:
225 gb_hd_cport_release(hd
, hd_cport_id
);
227 mutex_unlock(&gb_connection_mutex
);
232 struct gb_connection
*
233 gb_connection_create_static(struct gb_host_device
*hd
, u16 hd_cport_id
,
234 gb_request_handler_t handler
)
236 return _gb_connection_create(hd
, hd_cport_id
, NULL
, NULL
, 0, handler
,
237 GB_CONNECTION_FLAG_HIGH_PRIO
);
240 struct gb_connection
*
241 gb_connection_create_control(struct gb_interface
*intf
)
243 return _gb_connection_create(intf
->hd
, -1, intf
, NULL
, 0, NULL
,
244 GB_CONNECTION_FLAG_CONTROL
|
245 GB_CONNECTION_FLAG_HIGH_PRIO
);
248 struct gb_connection
*
249 gb_connection_create(struct gb_bundle
*bundle
, u16 cport_id
,
250 gb_request_handler_t handler
)
252 struct gb_interface
*intf
= bundle
->intf
;
254 return _gb_connection_create(intf
->hd
, -1, intf
, bundle
, cport_id
,
257 EXPORT_SYMBOL_GPL(gb_connection_create
);
259 struct gb_connection
*
260 gb_connection_create_flags(struct gb_bundle
*bundle
, u16 cport_id
,
261 gb_request_handler_t handler
,
264 struct gb_interface
*intf
= bundle
->intf
;
266 if (WARN_ON_ONCE(flags
& GB_CONNECTION_FLAG_CORE_MASK
))
267 flags
&= ~GB_CONNECTION_FLAG_CORE_MASK
;
269 return _gb_connection_create(intf
->hd
, -1, intf
, bundle
, cport_id
,
272 EXPORT_SYMBOL_GPL(gb_connection_create_flags
);
274 struct gb_connection
*
275 gb_connection_create_offloaded(struct gb_bundle
*bundle
, u16 cport_id
,
278 flags
|= GB_CONNECTION_FLAG_OFFLOADED
;
280 return gb_connection_create_flags(bundle
, cport_id
, NULL
, flags
);
282 EXPORT_SYMBOL_GPL(gb_connection_create_offloaded
);
284 static int gb_connection_hd_cport_enable(struct gb_connection
*connection
)
286 struct gb_host_device
*hd
= connection
->hd
;
289 if (!hd
->driver
->cport_enable
)
292 ret
= hd
->driver
->cport_enable(hd
, connection
->hd_cport_id
,
295 dev_err(&hd
->dev
, "%s: failed to enable host cport: %d\n",
296 connection
->name
, ret
);
303 static void gb_connection_hd_cport_disable(struct gb_connection
*connection
)
305 struct gb_host_device
*hd
= connection
->hd
;
308 if (!hd
->driver
->cport_disable
)
311 ret
= hd
->driver
->cport_disable(hd
, connection
->hd_cport_id
);
313 dev_err(&hd
->dev
, "%s: failed to disable host cport: %d\n",
314 connection
->name
, ret
);
318 static int gb_connection_hd_cport_connected(struct gb_connection
*connection
)
320 struct gb_host_device
*hd
= connection
->hd
;
323 if (!hd
->driver
->cport_connected
)
326 ret
= hd
->driver
->cport_connected(hd
, connection
->hd_cport_id
);
328 dev_err(&hd
->dev
, "%s: failed to set connected state: %d\n",
329 connection
->name
, ret
);
336 static int gb_connection_hd_cport_flush(struct gb_connection
*connection
)
338 struct gb_host_device
*hd
= connection
->hd
;
341 if (!hd
->driver
->cport_flush
)
344 ret
= hd
->driver
->cport_flush(hd
, connection
->hd_cport_id
);
346 dev_err(&hd
->dev
, "%s: failed to flush host cport: %d\n",
347 connection
->name
, ret
);
354 static int gb_connection_hd_cport_quiesce(struct gb_connection
*connection
)
356 struct gb_host_device
*hd
= connection
->hd
;
360 if (!hd
->driver
->cport_quiesce
)
363 peer_space
= sizeof(struct gb_operation_msg_hdr
) +
364 sizeof(struct gb_cport_shutdown_request
);
366 if (connection
->mode_switch
)
367 peer_space
+= sizeof(struct gb_operation_msg_hdr
);
369 if (!hd
->driver
->cport_quiesce
)
372 ret
= hd
->driver
->cport_quiesce(hd
, connection
->hd_cport_id
,
374 GB_CONNECTION_CPORT_QUIESCE_TIMEOUT
);
376 dev_err(&hd
->dev
, "%s: failed to quiesce host cport: %d\n",
377 connection
->name
, ret
);
384 static int gb_connection_hd_cport_clear(struct gb_connection
*connection
)
386 struct gb_host_device
*hd
= connection
->hd
;
389 if (!hd
->driver
->cport_clear
)
392 ret
= hd
->driver
->cport_clear(hd
, connection
->hd_cport_id
);
394 dev_err(&hd
->dev
, "%s: failed to clear host cport: %d\n",
395 connection
->name
, ret
);
403 * Request the SVC to create a connection from AP's cport to interface's
407 gb_connection_svc_connection_create(struct gb_connection
*connection
)
409 struct gb_host_device
*hd
= connection
->hd
;
410 struct gb_interface
*intf
;
414 if (gb_connection_is_static(connection
))
417 intf
= connection
->intf
;
420 * Enable either E2EFC or CSD, unless no flow control is requested.
422 cport_flags
= GB_SVC_CPORT_FLAG_CSV_N
;
423 if (gb_connection_flow_control_disabled(connection
)) {
424 cport_flags
|= GB_SVC_CPORT_FLAG_CSD_N
;
425 } else if (gb_connection_e2efc_enabled(connection
)) {
426 cport_flags
|= GB_SVC_CPORT_FLAG_CSD_N
|
427 GB_SVC_CPORT_FLAG_E2EFC
;
430 ret
= gb_svc_connection_create(hd
->svc
,
432 connection
->hd_cport_id
,
434 connection
->intf_cport_id
,
437 dev_err(&connection
->hd
->dev
,
438 "%s: failed to create svc connection: %d\n",
439 connection
->name
, ret
);
447 gb_connection_svc_connection_destroy(struct gb_connection
*connection
)
449 if (gb_connection_is_static(connection
))
452 gb_svc_connection_destroy(connection
->hd
->svc
,
453 connection
->hd
->svc
->ap_intf_id
,
454 connection
->hd_cport_id
,
455 connection
->intf
->interface_id
,
456 connection
->intf_cport_id
);
459 /* Inform Interface about active CPorts */
460 static int gb_connection_control_connected(struct gb_connection
*connection
)
462 struct gb_control
*control
;
463 u16 cport_id
= connection
->intf_cport_id
;
466 if (gb_connection_is_static(connection
))
469 if (gb_connection_is_control(connection
))
472 control
= connection
->intf
->control
;
474 ret
= gb_control_connected_operation(control
, cport_id
);
476 dev_err(&connection
->bundle
->dev
,
477 "failed to connect cport: %d\n", ret
);
485 gb_connection_control_disconnecting(struct gb_connection
*connection
)
487 struct gb_control
*control
;
488 u16 cport_id
= connection
->intf_cport_id
;
491 if (gb_connection_is_static(connection
))
494 control
= connection
->intf
->control
;
496 ret
= gb_control_disconnecting_operation(control
, cport_id
);
498 dev_err(&connection
->hd
->dev
,
499 "%s: failed to send disconnecting: %d\n",
500 connection
->name
, ret
);
505 gb_connection_control_disconnected(struct gb_connection
*connection
)
507 struct gb_control
*control
;
508 u16 cport_id
= connection
->intf_cport_id
;
511 if (gb_connection_is_static(connection
))
514 control
= connection
->intf
->control
;
516 if (gb_connection_is_control(connection
)) {
517 if (connection
->mode_switch
) {
518 ret
= gb_control_mode_switch_operation(control
);
521 * Allow mode switch to time out waiting for
531 ret
= gb_control_disconnected_operation(control
, cport_id
);
533 dev_warn(&connection
->bundle
->dev
,
534 "failed to disconnect cport: %d\n", ret
);
538 static int gb_connection_shutdown_operation(struct gb_connection
*connection
,
541 struct gb_cport_shutdown_request
*req
;
542 struct gb_operation
*operation
;
545 operation
= gb_operation_create_core(connection
,
546 GB_REQUEST_TYPE_CPORT_SHUTDOWN
,
552 req
= operation
->request
->payload
;
555 ret
= gb_operation_request_send_sync(operation
);
557 gb_operation_put(operation
);
562 static int gb_connection_cport_shutdown(struct gb_connection
*connection
,
565 struct gb_host_device
*hd
= connection
->hd
;
566 const struct gb_hd_driver
*drv
= hd
->driver
;
569 if (gb_connection_is_static(connection
))
572 if (gb_connection_is_offloaded(connection
)) {
573 if (!drv
->cport_shutdown
)
576 ret
= drv
->cport_shutdown(hd
, connection
->hd_cport_id
, phase
,
577 GB_OPERATION_TIMEOUT_DEFAULT
);
579 ret
= gb_connection_shutdown_operation(connection
, phase
);
583 dev_err(&hd
->dev
, "%s: failed to send cport shutdown (phase %d): %d\n",
584 connection
->name
, phase
, ret
);
592 gb_connection_cport_shutdown_phase_1(struct gb_connection
*connection
)
594 return gb_connection_cport_shutdown(connection
, 1);
598 gb_connection_cport_shutdown_phase_2(struct gb_connection
*connection
)
600 return gb_connection_cport_shutdown(connection
, 2);
604 * Cancel all active operations on a connection.
606 * Locking: Called with connection lock held and state set to DISABLED or
609 static void gb_connection_cancel_operations(struct gb_connection
*connection
,
611 __must_hold(&connection
->lock
)
613 struct gb_operation
*operation
;
615 while (!list_empty(&connection
->operations
)) {
616 operation
= list_last_entry(&connection
->operations
,
617 struct gb_operation
, links
);
618 gb_operation_get(operation
);
619 spin_unlock_irq(&connection
->lock
);
621 if (gb_operation_is_incoming(operation
))
622 gb_operation_cancel_incoming(operation
, errno
);
624 gb_operation_cancel(operation
, errno
);
626 gb_operation_put(operation
);
628 spin_lock_irq(&connection
->lock
);
633 * Cancel all active incoming operations on a connection.
635 * Locking: Called with connection lock held and state set to ENABLED_TX.
638 gb_connection_flush_incoming_operations(struct gb_connection
*connection
,
640 __must_hold(&connection
->lock
)
642 struct gb_operation
*operation
;
645 while (!list_empty(&connection
->operations
)) {
647 list_for_each_entry(operation
, &connection
->operations
,
649 if (gb_operation_is_incoming(operation
)) {
650 gb_operation_get(operation
);
659 spin_unlock_irq(&connection
->lock
);
661 /* FIXME: flush, not cancel? */
662 gb_operation_cancel_incoming(operation
, errno
);
663 gb_operation_put(operation
);
665 spin_lock_irq(&connection
->lock
);
670 * _gb_connection_enable() - enable a connection
671 * @connection: connection to enable
672 * @rx: whether to enable incoming requests
674 * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
675 * ENABLED_TX->ENABLED state transitions.
677 * Locking: Caller holds connection->mutex.
679 static int _gb_connection_enable(struct gb_connection
*connection
, bool rx
)
683 /* Handle ENABLED_TX -> ENABLED transitions. */
684 if (connection
->state
== GB_CONNECTION_STATE_ENABLED_TX
) {
685 if (!(connection
->handler
&& rx
))
688 spin_lock_irq(&connection
->lock
);
689 connection
->state
= GB_CONNECTION_STATE_ENABLED
;
690 spin_unlock_irq(&connection
->lock
);
695 ret
= gb_connection_hd_cport_enable(connection
);
699 ret
= gb_connection_svc_connection_create(connection
);
701 goto err_hd_cport_clear
;
703 ret
= gb_connection_hd_cport_connected(connection
);
705 goto err_svc_connection_destroy
;
707 spin_lock_irq(&connection
->lock
);
708 if (connection
->handler
&& rx
)
709 connection
->state
= GB_CONNECTION_STATE_ENABLED
;
711 connection
->state
= GB_CONNECTION_STATE_ENABLED_TX
;
712 spin_unlock_irq(&connection
->lock
);
714 ret
= gb_connection_control_connected(connection
);
716 goto err_control_disconnecting
;
720 err_control_disconnecting
:
721 spin_lock_irq(&connection
->lock
);
722 connection
->state
= GB_CONNECTION_STATE_DISCONNECTING
;
723 gb_connection_cancel_operations(connection
, -ESHUTDOWN
);
724 spin_unlock_irq(&connection
->lock
);
726 /* Transmit queue should already be empty. */
727 gb_connection_hd_cport_flush(connection
);
729 gb_connection_control_disconnecting(connection
);
730 gb_connection_cport_shutdown_phase_1(connection
);
731 gb_connection_hd_cport_quiesce(connection
);
732 gb_connection_cport_shutdown_phase_2(connection
);
733 gb_connection_control_disconnected(connection
);
734 connection
->state
= GB_CONNECTION_STATE_DISABLED
;
735 err_svc_connection_destroy
:
736 gb_connection_svc_connection_destroy(connection
);
738 gb_connection_hd_cport_clear(connection
);
740 gb_connection_hd_cport_disable(connection
);
745 int gb_connection_enable(struct gb_connection
*connection
)
749 mutex_lock(&connection
->mutex
);
751 if (connection
->state
== GB_CONNECTION_STATE_ENABLED
)
754 ret
= _gb_connection_enable(connection
, true);
756 trace_gb_connection_enable(connection
);
759 mutex_unlock(&connection
->mutex
);
763 EXPORT_SYMBOL_GPL(gb_connection_enable
);
765 int gb_connection_enable_tx(struct gb_connection
*connection
)
769 mutex_lock(&connection
->mutex
);
771 if (connection
->state
== GB_CONNECTION_STATE_ENABLED
) {
776 if (connection
->state
== GB_CONNECTION_STATE_ENABLED_TX
)
779 ret
= _gb_connection_enable(connection
, false);
781 trace_gb_connection_enable(connection
);
784 mutex_unlock(&connection
->mutex
);
788 EXPORT_SYMBOL_GPL(gb_connection_enable_tx
);
790 void gb_connection_disable_rx(struct gb_connection
*connection
)
792 mutex_lock(&connection
->mutex
);
794 spin_lock_irq(&connection
->lock
);
795 if (connection
->state
!= GB_CONNECTION_STATE_ENABLED
) {
796 spin_unlock_irq(&connection
->lock
);
799 connection
->state
= GB_CONNECTION_STATE_ENABLED_TX
;
800 gb_connection_flush_incoming_operations(connection
, -ESHUTDOWN
);
801 spin_unlock_irq(&connection
->lock
);
803 trace_gb_connection_disable(connection
);
806 mutex_unlock(&connection
->mutex
);
808 EXPORT_SYMBOL_GPL(gb_connection_disable_rx
);
810 void gb_connection_mode_switch_prepare(struct gb_connection
*connection
)
812 connection
->mode_switch
= true;
815 void gb_connection_mode_switch_complete(struct gb_connection
*connection
)
817 gb_connection_svc_connection_destroy(connection
);
818 gb_connection_hd_cport_clear(connection
);
820 gb_connection_hd_cport_disable(connection
);
822 connection
->mode_switch
= false;
825 void gb_connection_disable(struct gb_connection
*connection
)
827 mutex_lock(&connection
->mutex
);
829 if (connection
->state
== GB_CONNECTION_STATE_DISABLED
)
832 trace_gb_connection_disable(connection
);
834 spin_lock_irq(&connection
->lock
);
835 connection
->state
= GB_CONNECTION_STATE_DISCONNECTING
;
836 gb_connection_cancel_operations(connection
, -ESHUTDOWN
);
837 spin_unlock_irq(&connection
->lock
);
839 gb_connection_hd_cport_flush(connection
);
841 gb_connection_control_disconnecting(connection
);
842 gb_connection_cport_shutdown_phase_1(connection
);
843 gb_connection_hd_cport_quiesce(connection
);
844 gb_connection_cport_shutdown_phase_2(connection
);
845 gb_connection_control_disconnected(connection
);
847 connection
->state
= GB_CONNECTION_STATE_DISABLED
;
849 /* control-connection tear down is deferred when mode switching */
850 if (!connection
->mode_switch
) {
851 gb_connection_svc_connection_destroy(connection
);
852 gb_connection_hd_cport_clear(connection
);
854 gb_connection_hd_cport_disable(connection
);
858 mutex_unlock(&connection
->mutex
);
860 EXPORT_SYMBOL_GPL(gb_connection_disable
);
862 /* Disable a connection without communicating with the remote end. */
863 void gb_connection_disable_forced(struct gb_connection
*connection
)
865 mutex_lock(&connection
->mutex
);
867 if (connection
->state
== GB_CONNECTION_STATE_DISABLED
)
870 trace_gb_connection_disable(connection
);
872 spin_lock_irq(&connection
->lock
);
873 connection
->state
= GB_CONNECTION_STATE_DISABLED
;
874 gb_connection_cancel_operations(connection
, -ESHUTDOWN
);
875 spin_unlock_irq(&connection
->lock
);
877 gb_connection_hd_cport_flush(connection
);
879 gb_connection_svc_connection_destroy(connection
);
880 gb_connection_hd_cport_clear(connection
);
882 gb_connection_hd_cport_disable(connection
);
884 mutex_unlock(&connection
->mutex
);
886 EXPORT_SYMBOL_GPL(gb_connection_disable_forced
);
888 /* Caller must have disabled the connection before destroying it. */
889 void gb_connection_destroy(struct gb_connection
*connection
)
894 if (WARN_ON(connection
->state
!= GB_CONNECTION_STATE_DISABLED
))
895 gb_connection_disable(connection
);
897 mutex_lock(&gb_connection_mutex
);
899 spin_lock_irq(&gb_connections_lock
);
900 list_del(&connection
->bundle_links
);
901 list_del(&connection
->hd_links
);
902 spin_unlock_irq(&gb_connections_lock
);
904 destroy_workqueue(connection
->wq
);
906 gb_hd_cport_release(connection
->hd
, connection
->hd_cport_id
);
907 connection
->hd_cport_id
= CPORT_ID_BAD
;
909 mutex_unlock(&gb_connection_mutex
);
911 gb_connection_put(connection
);
913 EXPORT_SYMBOL_GPL(gb_connection_destroy
);
915 void gb_connection_latency_tag_enable(struct gb_connection
*connection
)
917 struct gb_host_device
*hd
= connection
->hd
;
920 if (!hd
->driver
->latency_tag_enable
)
923 ret
= hd
->driver
->latency_tag_enable(hd
, connection
->hd_cport_id
);
925 dev_err(&connection
->hd
->dev
,
926 "%s: failed to enable latency tag: %d\n",
927 connection
->name
, ret
);
930 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable
);
932 void gb_connection_latency_tag_disable(struct gb_connection
*connection
)
934 struct gb_host_device
*hd
= connection
->hd
;
937 if (!hd
->driver
->latency_tag_disable
)
940 ret
= hd
->driver
->latency_tag_disable(hd
, connection
->hd_cport_id
);
942 dev_err(&connection
->hd
->dev
,
943 "%s: failed to disable latency tag: %d\n",
944 connection
->name
, ret
);
947 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable
);