1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright 2014 Google Inc.
6 * Copyright 2014 Linaro Ltd.
9 #include <linux/workqueue.h>
12 #include "greybus_trace.h"
15 #define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000
18 static void gb_connection_kref_release(struct kref
*kref
);
21 static DEFINE_SPINLOCK(gb_connections_lock
);
22 static DEFINE_MUTEX(gb_connection_mutex
);
25 /* Caller holds gb_connection_mutex. */
26 static bool gb_connection_cport_in_use(struct gb_interface
*intf
, u16 cport_id
)
28 struct gb_host_device
*hd
= intf
->hd
;
29 struct gb_connection
*connection
;
31 list_for_each_entry(connection
, &hd
->connections
, hd_links
) {
32 if (connection
->intf
== intf
&&
33 connection
->intf_cport_id
== cport_id
)
40 static void gb_connection_get(struct gb_connection
*connection
)
42 kref_get(&connection
->kref
);
44 trace_gb_connection_get(connection
);
47 static void gb_connection_put(struct gb_connection
*connection
)
49 trace_gb_connection_put(connection
);
51 kref_put(&connection
->kref
, gb_connection_kref_release
);
55 * Returns a reference-counted pointer to the connection if found.
57 static struct gb_connection
*
58 gb_connection_hd_find(struct gb_host_device
*hd
, u16 cport_id
)
60 struct gb_connection
*connection
;
63 spin_lock_irqsave(&gb_connections_lock
, flags
);
64 list_for_each_entry(connection
, &hd
->connections
, hd_links
)
65 if (connection
->hd_cport_id
== cport_id
) {
66 gb_connection_get(connection
);
71 spin_unlock_irqrestore(&gb_connections_lock
, flags
);
77 * Callback from the host driver to let us know that data has been
78 * received on the bundle.
80 void greybus_data_rcvd(struct gb_host_device
*hd
, u16 cport_id
,
81 u8
*data
, size_t length
)
83 struct gb_connection
*connection
;
87 connection
= gb_connection_hd_find(hd
, cport_id
);
90 "nonexistent connection (%zu bytes dropped)\n", length
);
93 gb_connection_recv(connection
, data
, length
);
94 gb_connection_put(connection
);
96 EXPORT_SYMBOL_GPL(greybus_data_rcvd
);
98 static void gb_connection_kref_release(struct kref
*kref
)
100 struct gb_connection
*connection
;
102 connection
= container_of(kref
, struct gb_connection
, kref
);
104 trace_gb_connection_release(connection
);
109 static void gb_connection_init_name(struct gb_connection
*connection
)
111 u16 hd_cport_id
= connection
->hd_cport_id
;
115 if (connection
->intf
) {
116 intf_id
= connection
->intf
->interface_id
;
117 cport_id
= connection
->intf_cport_id
;
120 snprintf(connection
->name
, sizeof(connection
->name
),
121 "%u/%u:%u", hd_cport_id
, intf_id
, cport_id
);
125 * _gb_connection_create() - create a Greybus connection
126 * @hd: host device of the connection
127 * @hd_cport_id: host-device cport id, or -1 for dynamic allocation
128 * @intf: remote interface, or NULL for static connections
129 * @bundle: remote-interface bundle (may be NULL)
130 * @cport_id: remote-interface cport id, or 0 for static connections
131 * @handler: request handler (may be NULL)
132 * @flags: connection flags
134 * Create a Greybus connection, representing the bidirectional link
135 * between a CPort on a (local) Greybus host device and a CPort on
136 * another Greybus interface.
138 * A connection also maintains the state of operations sent over the
141 * Serialised against concurrent create and destroy using the
142 * gb_connection_mutex.
144 * Return: A pointer to the new connection if successful, or an ERR_PTR
147 static struct gb_connection
*
148 _gb_connection_create(struct gb_host_device
*hd
, int hd_cport_id
,
149 struct gb_interface
*intf
,
150 struct gb_bundle
*bundle
, int cport_id
,
151 gb_request_handler_t handler
,
154 struct gb_connection
*connection
;
157 mutex_lock(&gb_connection_mutex
);
159 if (intf
&& gb_connection_cport_in_use(intf
, cport_id
)) {
160 dev_err(&intf
->dev
, "cport %u already in use\n", cport_id
);
165 ret
= gb_hd_cport_allocate(hd
, hd_cport_id
, flags
);
167 dev_err(&hd
->dev
, "failed to allocate cport: %d\n", ret
);
172 connection
= kzalloc(sizeof(*connection
), GFP_KERNEL
);
175 goto err_hd_cport_release
;
178 connection
->hd_cport_id
= hd_cport_id
;
179 connection
->intf_cport_id
= cport_id
;
181 connection
->intf
= intf
;
182 connection
->bundle
= bundle
;
183 connection
->handler
= handler
;
184 connection
->flags
= flags
;
185 if (intf
&& (intf
->quirks
& GB_INTERFACE_QUIRK_NO_CPORT_FEATURES
))
186 connection
->flags
|= GB_CONNECTION_FLAG_NO_FLOWCTRL
;
187 connection
->state
= GB_CONNECTION_STATE_DISABLED
;
189 atomic_set(&connection
->op_cycle
, 0);
190 mutex_init(&connection
->mutex
);
191 spin_lock_init(&connection
->lock
);
192 INIT_LIST_HEAD(&connection
->operations
);
194 connection
->wq
= alloc_workqueue("%s:%d", WQ_UNBOUND
, 1,
195 dev_name(&hd
->dev
), hd_cport_id
);
196 if (!connection
->wq
) {
198 goto err_free_connection
;
201 kref_init(&connection
->kref
);
203 gb_connection_init_name(connection
);
205 spin_lock_irq(&gb_connections_lock
);
206 list_add(&connection
->hd_links
, &hd
->connections
);
209 list_add(&connection
->bundle_links
, &bundle
->connections
);
211 INIT_LIST_HEAD(&connection
->bundle_links
);
213 spin_unlock_irq(&gb_connections_lock
);
215 mutex_unlock(&gb_connection_mutex
);
217 trace_gb_connection_create(connection
);
223 err_hd_cport_release
:
224 gb_hd_cport_release(hd
, hd_cport_id
);
226 mutex_unlock(&gb_connection_mutex
);
231 struct gb_connection
*
232 gb_connection_create_static(struct gb_host_device
*hd
, u16 hd_cport_id
,
233 gb_request_handler_t handler
)
235 return _gb_connection_create(hd
, hd_cport_id
, NULL
, NULL
, 0, handler
,
236 GB_CONNECTION_FLAG_HIGH_PRIO
);
239 struct gb_connection
*
240 gb_connection_create_control(struct gb_interface
*intf
)
242 return _gb_connection_create(intf
->hd
, -1, intf
, NULL
, 0, NULL
,
243 GB_CONNECTION_FLAG_CONTROL
|
244 GB_CONNECTION_FLAG_HIGH_PRIO
);
247 struct gb_connection
*
248 gb_connection_create(struct gb_bundle
*bundle
, u16 cport_id
,
249 gb_request_handler_t handler
)
251 struct gb_interface
*intf
= bundle
->intf
;
253 return _gb_connection_create(intf
->hd
, -1, intf
, bundle
, cport_id
,
256 EXPORT_SYMBOL_GPL(gb_connection_create
);
258 struct gb_connection
*
259 gb_connection_create_flags(struct gb_bundle
*bundle
, u16 cport_id
,
260 gb_request_handler_t handler
,
263 struct gb_interface
*intf
= bundle
->intf
;
265 if (WARN_ON_ONCE(flags
& GB_CONNECTION_FLAG_CORE_MASK
))
266 flags
&= ~GB_CONNECTION_FLAG_CORE_MASK
;
268 return _gb_connection_create(intf
->hd
, -1, intf
, bundle
, cport_id
,
271 EXPORT_SYMBOL_GPL(gb_connection_create_flags
);
273 struct gb_connection
*
274 gb_connection_create_offloaded(struct gb_bundle
*bundle
, u16 cport_id
,
277 flags
|= GB_CONNECTION_FLAG_OFFLOADED
;
279 return gb_connection_create_flags(bundle
, cport_id
, NULL
, flags
);
281 EXPORT_SYMBOL_GPL(gb_connection_create_offloaded
);
283 static int gb_connection_hd_cport_enable(struct gb_connection
*connection
)
285 struct gb_host_device
*hd
= connection
->hd
;
288 if (!hd
->driver
->cport_enable
)
291 ret
= hd
->driver
->cport_enable(hd
, connection
->hd_cport_id
,
294 dev_err(&hd
->dev
, "%s: failed to enable host cport: %d\n",
295 connection
->name
, ret
);
302 static void gb_connection_hd_cport_disable(struct gb_connection
*connection
)
304 struct gb_host_device
*hd
= connection
->hd
;
307 if (!hd
->driver
->cport_disable
)
310 ret
= hd
->driver
->cport_disable(hd
, connection
->hd_cport_id
);
312 dev_err(&hd
->dev
, "%s: failed to disable host cport: %d\n",
313 connection
->name
, ret
);
317 static int gb_connection_hd_cport_connected(struct gb_connection
*connection
)
319 struct gb_host_device
*hd
= connection
->hd
;
322 if (!hd
->driver
->cport_connected
)
325 ret
= hd
->driver
->cport_connected(hd
, connection
->hd_cport_id
);
327 dev_err(&hd
->dev
, "%s: failed to set connected state: %d\n",
328 connection
->name
, ret
);
335 static int gb_connection_hd_cport_flush(struct gb_connection
*connection
)
337 struct gb_host_device
*hd
= connection
->hd
;
340 if (!hd
->driver
->cport_flush
)
343 ret
= hd
->driver
->cport_flush(hd
, connection
->hd_cport_id
);
345 dev_err(&hd
->dev
, "%s: failed to flush host cport: %d\n",
346 connection
->name
, ret
);
353 static int gb_connection_hd_cport_quiesce(struct gb_connection
*connection
)
355 struct gb_host_device
*hd
= connection
->hd
;
359 if (!hd
->driver
->cport_quiesce
)
362 peer_space
= sizeof(struct gb_operation_msg_hdr
) +
363 sizeof(struct gb_cport_shutdown_request
);
365 if (connection
->mode_switch
)
366 peer_space
+= sizeof(struct gb_operation_msg_hdr
);
368 if (!hd
->driver
->cport_quiesce
)
371 ret
= hd
->driver
->cport_quiesce(hd
, connection
->hd_cport_id
,
373 GB_CONNECTION_CPORT_QUIESCE_TIMEOUT
);
375 dev_err(&hd
->dev
, "%s: failed to quiesce host cport: %d\n",
376 connection
->name
, ret
);
383 static int gb_connection_hd_cport_clear(struct gb_connection
*connection
)
385 struct gb_host_device
*hd
= connection
->hd
;
388 if (!hd
->driver
->cport_clear
)
391 ret
= hd
->driver
->cport_clear(hd
, connection
->hd_cport_id
);
393 dev_err(&hd
->dev
, "%s: failed to clear host cport: %d\n",
394 connection
->name
, ret
);
402 * Request the SVC to create a connection from AP's cport to interface's
406 gb_connection_svc_connection_create(struct gb_connection
*connection
)
408 struct gb_host_device
*hd
= connection
->hd
;
409 struct gb_interface
*intf
;
413 if (gb_connection_is_static(connection
))
416 intf
= connection
->intf
;
419 * Enable either E2EFC or CSD, unless no flow control is requested.
421 cport_flags
= GB_SVC_CPORT_FLAG_CSV_N
;
422 if (gb_connection_flow_control_disabled(connection
)) {
423 cport_flags
|= GB_SVC_CPORT_FLAG_CSD_N
;
424 } else if (gb_connection_e2efc_enabled(connection
)) {
425 cport_flags
|= GB_SVC_CPORT_FLAG_CSD_N
|
426 GB_SVC_CPORT_FLAG_E2EFC
;
429 ret
= gb_svc_connection_create(hd
->svc
,
431 connection
->hd_cport_id
,
433 connection
->intf_cport_id
,
436 dev_err(&connection
->hd
->dev
,
437 "%s: failed to create svc connection: %d\n",
438 connection
->name
, ret
);
446 gb_connection_svc_connection_destroy(struct gb_connection
*connection
)
448 if (gb_connection_is_static(connection
))
451 gb_svc_connection_destroy(connection
->hd
->svc
,
452 connection
->hd
->svc
->ap_intf_id
,
453 connection
->hd_cport_id
,
454 connection
->intf
->interface_id
,
455 connection
->intf_cport_id
);
458 /* Inform Interface about active CPorts */
459 static int gb_connection_control_connected(struct gb_connection
*connection
)
461 struct gb_control
*control
;
462 u16 cport_id
= connection
->intf_cport_id
;
465 if (gb_connection_is_static(connection
))
468 if (gb_connection_is_control(connection
))
471 control
= connection
->intf
->control
;
473 ret
= gb_control_connected_operation(control
, cport_id
);
475 dev_err(&connection
->bundle
->dev
,
476 "failed to connect cport: %d\n", ret
);
484 gb_connection_control_disconnecting(struct gb_connection
*connection
)
486 struct gb_control
*control
;
487 u16 cport_id
= connection
->intf_cport_id
;
490 if (gb_connection_is_static(connection
))
493 control
= connection
->intf
->control
;
495 ret
= gb_control_disconnecting_operation(control
, cport_id
);
497 dev_err(&connection
->hd
->dev
,
498 "%s: failed to send disconnecting: %d\n",
499 connection
->name
, ret
);
504 gb_connection_control_disconnected(struct gb_connection
*connection
)
506 struct gb_control
*control
;
507 u16 cport_id
= connection
->intf_cport_id
;
510 if (gb_connection_is_static(connection
))
513 control
= connection
->intf
->control
;
515 if (gb_connection_is_control(connection
)) {
516 if (connection
->mode_switch
) {
517 ret
= gb_control_mode_switch_operation(control
);
520 * Allow mode switch to time out waiting for
530 ret
= gb_control_disconnected_operation(control
, cport_id
);
532 dev_warn(&connection
->bundle
->dev
,
533 "failed to disconnect cport: %d\n", ret
);
537 static int gb_connection_shutdown_operation(struct gb_connection
*connection
,
540 struct gb_cport_shutdown_request
*req
;
541 struct gb_operation
*operation
;
544 operation
= gb_operation_create_core(connection
,
545 GB_REQUEST_TYPE_CPORT_SHUTDOWN
,
551 req
= operation
->request
->payload
;
554 ret
= gb_operation_request_send_sync(operation
);
556 gb_operation_put(operation
);
561 static int gb_connection_cport_shutdown(struct gb_connection
*connection
,
564 struct gb_host_device
*hd
= connection
->hd
;
565 const struct gb_hd_driver
*drv
= hd
->driver
;
568 if (gb_connection_is_static(connection
))
571 if (gb_connection_is_offloaded(connection
)) {
572 if (!drv
->cport_shutdown
)
575 ret
= drv
->cport_shutdown(hd
, connection
->hd_cport_id
, phase
,
576 GB_OPERATION_TIMEOUT_DEFAULT
);
578 ret
= gb_connection_shutdown_operation(connection
, phase
);
582 dev_err(&hd
->dev
, "%s: failed to send cport shutdown (phase %d): %d\n",
583 connection
->name
, phase
, ret
);
591 gb_connection_cport_shutdown_phase_1(struct gb_connection
*connection
)
593 return gb_connection_cport_shutdown(connection
, 1);
597 gb_connection_cport_shutdown_phase_2(struct gb_connection
*connection
)
599 return gb_connection_cport_shutdown(connection
, 2);
603 * Cancel all active operations on a connection.
605 * Locking: Called with connection lock held and state set to DISABLED or
608 static void gb_connection_cancel_operations(struct gb_connection
*connection
,
610 __must_hold(&connection
->lock
)
612 struct gb_operation
*operation
;
614 while (!list_empty(&connection
->operations
)) {
615 operation
= list_last_entry(&connection
->operations
,
616 struct gb_operation
, links
);
617 gb_operation_get(operation
);
618 spin_unlock_irq(&connection
->lock
);
620 if (gb_operation_is_incoming(operation
))
621 gb_operation_cancel_incoming(operation
, errno
);
623 gb_operation_cancel(operation
, errno
);
625 gb_operation_put(operation
);
627 spin_lock_irq(&connection
->lock
);
632 * Cancel all active incoming operations on a connection.
634 * Locking: Called with connection lock held and state set to ENABLED_TX.
637 gb_connection_flush_incoming_operations(struct gb_connection
*connection
,
639 __must_hold(&connection
->lock
)
641 struct gb_operation
*operation
;
644 while (!list_empty(&connection
->operations
)) {
646 list_for_each_entry(operation
, &connection
->operations
,
648 if (gb_operation_is_incoming(operation
)) {
649 gb_operation_get(operation
);
658 spin_unlock_irq(&connection
->lock
);
660 /* FIXME: flush, not cancel? */
661 gb_operation_cancel_incoming(operation
, errno
);
662 gb_operation_put(operation
);
664 spin_lock_irq(&connection
->lock
);
669 * _gb_connection_enable() - enable a connection
670 * @connection: connection to enable
671 * @rx: whether to enable incoming requests
673 * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
674 * ENABLED_TX->ENABLED state transitions.
676 * Locking: Caller holds connection->mutex.
678 static int _gb_connection_enable(struct gb_connection
*connection
, bool rx
)
682 /* Handle ENABLED_TX -> ENABLED transitions. */
683 if (connection
->state
== GB_CONNECTION_STATE_ENABLED_TX
) {
684 if (!(connection
->handler
&& rx
))
687 spin_lock_irq(&connection
->lock
);
688 connection
->state
= GB_CONNECTION_STATE_ENABLED
;
689 spin_unlock_irq(&connection
->lock
);
694 ret
= gb_connection_hd_cport_enable(connection
);
698 ret
= gb_connection_svc_connection_create(connection
);
700 goto err_hd_cport_clear
;
702 ret
= gb_connection_hd_cport_connected(connection
);
704 goto err_svc_connection_destroy
;
706 spin_lock_irq(&connection
->lock
);
707 if (connection
->handler
&& rx
)
708 connection
->state
= GB_CONNECTION_STATE_ENABLED
;
710 connection
->state
= GB_CONNECTION_STATE_ENABLED_TX
;
711 spin_unlock_irq(&connection
->lock
);
713 ret
= gb_connection_control_connected(connection
);
715 goto err_control_disconnecting
;
719 err_control_disconnecting
:
720 spin_lock_irq(&connection
->lock
);
721 connection
->state
= GB_CONNECTION_STATE_DISCONNECTING
;
722 gb_connection_cancel_operations(connection
, -ESHUTDOWN
);
723 spin_unlock_irq(&connection
->lock
);
725 /* Transmit queue should already be empty. */
726 gb_connection_hd_cport_flush(connection
);
728 gb_connection_control_disconnecting(connection
);
729 gb_connection_cport_shutdown_phase_1(connection
);
730 gb_connection_hd_cport_quiesce(connection
);
731 gb_connection_cport_shutdown_phase_2(connection
);
732 gb_connection_control_disconnected(connection
);
733 connection
->state
= GB_CONNECTION_STATE_DISABLED
;
734 err_svc_connection_destroy
:
735 gb_connection_svc_connection_destroy(connection
);
737 gb_connection_hd_cport_clear(connection
);
739 gb_connection_hd_cport_disable(connection
);
744 int gb_connection_enable(struct gb_connection
*connection
)
748 mutex_lock(&connection
->mutex
);
750 if (connection
->state
== GB_CONNECTION_STATE_ENABLED
)
753 ret
= _gb_connection_enable(connection
, true);
755 trace_gb_connection_enable(connection
);
758 mutex_unlock(&connection
->mutex
);
762 EXPORT_SYMBOL_GPL(gb_connection_enable
);
764 int gb_connection_enable_tx(struct gb_connection
*connection
)
768 mutex_lock(&connection
->mutex
);
770 if (connection
->state
== GB_CONNECTION_STATE_ENABLED
) {
775 if (connection
->state
== GB_CONNECTION_STATE_ENABLED_TX
)
778 ret
= _gb_connection_enable(connection
, false);
780 trace_gb_connection_enable(connection
);
783 mutex_unlock(&connection
->mutex
);
787 EXPORT_SYMBOL_GPL(gb_connection_enable_tx
);
789 void gb_connection_disable_rx(struct gb_connection
*connection
)
791 mutex_lock(&connection
->mutex
);
793 spin_lock_irq(&connection
->lock
);
794 if (connection
->state
!= GB_CONNECTION_STATE_ENABLED
) {
795 spin_unlock_irq(&connection
->lock
);
798 connection
->state
= GB_CONNECTION_STATE_ENABLED_TX
;
799 gb_connection_flush_incoming_operations(connection
, -ESHUTDOWN
);
800 spin_unlock_irq(&connection
->lock
);
802 trace_gb_connection_disable(connection
);
805 mutex_unlock(&connection
->mutex
);
807 EXPORT_SYMBOL_GPL(gb_connection_disable_rx
);
809 void gb_connection_mode_switch_prepare(struct gb_connection
*connection
)
811 connection
->mode_switch
= true;
814 void gb_connection_mode_switch_complete(struct gb_connection
*connection
)
816 gb_connection_svc_connection_destroy(connection
);
817 gb_connection_hd_cport_clear(connection
);
819 gb_connection_hd_cport_disable(connection
);
821 connection
->mode_switch
= false;
824 void gb_connection_disable(struct gb_connection
*connection
)
826 mutex_lock(&connection
->mutex
);
828 if (connection
->state
== GB_CONNECTION_STATE_DISABLED
)
831 trace_gb_connection_disable(connection
);
833 spin_lock_irq(&connection
->lock
);
834 connection
->state
= GB_CONNECTION_STATE_DISCONNECTING
;
835 gb_connection_cancel_operations(connection
, -ESHUTDOWN
);
836 spin_unlock_irq(&connection
->lock
);
838 gb_connection_hd_cport_flush(connection
);
840 gb_connection_control_disconnecting(connection
);
841 gb_connection_cport_shutdown_phase_1(connection
);
842 gb_connection_hd_cport_quiesce(connection
);
843 gb_connection_cport_shutdown_phase_2(connection
);
844 gb_connection_control_disconnected(connection
);
846 connection
->state
= GB_CONNECTION_STATE_DISABLED
;
848 /* control-connection tear down is deferred when mode switching */
849 if (!connection
->mode_switch
) {
850 gb_connection_svc_connection_destroy(connection
);
851 gb_connection_hd_cport_clear(connection
);
853 gb_connection_hd_cport_disable(connection
);
857 mutex_unlock(&connection
->mutex
);
859 EXPORT_SYMBOL_GPL(gb_connection_disable
);
861 /* Disable a connection without communicating with the remote end. */
862 void gb_connection_disable_forced(struct gb_connection
*connection
)
864 mutex_lock(&connection
->mutex
);
866 if (connection
->state
== GB_CONNECTION_STATE_DISABLED
)
869 trace_gb_connection_disable(connection
);
871 spin_lock_irq(&connection
->lock
);
872 connection
->state
= GB_CONNECTION_STATE_DISABLED
;
873 gb_connection_cancel_operations(connection
, -ESHUTDOWN
);
874 spin_unlock_irq(&connection
->lock
);
876 gb_connection_hd_cport_flush(connection
);
878 gb_connection_svc_connection_destroy(connection
);
879 gb_connection_hd_cport_clear(connection
);
881 gb_connection_hd_cport_disable(connection
);
883 mutex_unlock(&connection
->mutex
);
885 EXPORT_SYMBOL_GPL(gb_connection_disable_forced
);
887 /* Caller must have disabled the connection before destroying it. */
888 void gb_connection_destroy(struct gb_connection
*connection
)
893 if (WARN_ON(connection
->state
!= GB_CONNECTION_STATE_DISABLED
))
894 gb_connection_disable(connection
);
896 mutex_lock(&gb_connection_mutex
);
898 spin_lock_irq(&gb_connections_lock
);
899 list_del(&connection
->bundle_links
);
900 list_del(&connection
->hd_links
);
901 spin_unlock_irq(&gb_connections_lock
);
903 destroy_workqueue(connection
->wq
);
905 gb_hd_cport_release(connection
->hd
, connection
->hd_cport_id
);
906 connection
->hd_cport_id
= CPORT_ID_BAD
;
908 mutex_unlock(&gb_connection_mutex
);
910 gb_connection_put(connection
);
912 EXPORT_SYMBOL_GPL(gb_connection_destroy
);
914 void gb_connection_latency_tag_enable(struct gb_connection
*connection
)
916 struct gb_host_device
*hd
= connection
->hd
;
919 if (!hd
->driver
->latency_tag_enable
)
922 ret
= hd
->driver
->latency_tag_enable(hd
, connection
->hd_cport_id
);
924 dev_err(&connection
->hd
->dev
,
925 "%s: failed to enable latency tag: %d\n",
926 connection
->name
, ret
);
929 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable
);
931 void gb_connection_latency_tag_disable(struct gb_connection
*connection
)
933 struct gb_host_device
*hd
= connection
->hd
;
936 if (!hd
->driver
->latency_tag_disable
)
939 ret
= hd
->driver
->latency_tag_disable(hd
, connection
->hd_cport_id
);
941 dev_err(&connection
->hd
->dev
,
942 "%s: failed to disable latency tag: %d\n",
943 connection
->name
, ret
);
946 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable
);