1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright 2014 Google Inc.
6 * Copyright 2014 Linaro Ltd.
9 #include <linux/workqueue.h>
12 #include "greybus_trace.h"
14 #define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000
16 static void gb_connection_kref_release(struct kref
*kref
);
18 static DEFINE_SPINLOCK(gb_connections_lock
);
19 static DEFINE_MUTEX(gb_connection_mutex
);
21 /* Caller holds gb_connection_mutex. */
22 static bool gb_connection_cport_in_use(struct gb_interface
*intf
, u16 cport_id
)
24 struct gb_host_device
*hd
= intf
->hd
;
25 struct gb_connection
*connection
;
27 list_for_each_entry(connection
, &hd
->connections
, hd_links
) {
28 if (connection
->intf
== intf
&&
29 connection
->intf_cport_id
== cport_id
)
36 static void gb_connection_get(struct gb_connection
*connection
)
38 kref_get(&connection
->kref
);
40 trace_gb_connection_get(connection
);
43 static void gb_connection_put(struct gb_connection
*connection
)
45 trace_gb_connection_put(connection
);
47 kref_put(&connection
->kref
, gb_connection_kref_release
);
51 * Returns a reference-counted pointer to the connection if found.
53 static struct gb_connection
*
54 gb_connection_hd_find(struct gb_host_device
*hd
, u16 cport_id
)
56 struct gb_connection
*connection
;
59 spin_lock_irqsave(&gb_connections_lock
, flags
);
60 list_for_each_entry(connection
, &hd
->connections
, hd_links
)
61 if (connection
->hd_cport_id
== cport_id
) {
62 gb_connection_get(connection
);
67 spin_unlock_irqrestore(&gb_connections_lock
, flags
);
73 * Callback from the host driver to let us know that data has been
74 * received on the bundle.
76 void greybus_data_rcvd(struct gb_host_device
*hd
, u16 cport_id
,
77 u8
*data
, size_t length
)
79 struct gb_connection
*connection
;
83 connection
= gb_connection_hd_find(hd
, cport_id
);
86 "nonexistent connection (%zu bytes dropped)\n", length
);
89 gb_connection_recv(connection
, data
, length
);
90 gb_connection_put(connection
);
92 EXPORT_SYMBOL_GPL(greybus_data_rcvd
);
94 static void gb_connection_kref_release(struct kref
*kref
)
96 struct gb_connection
*connection
;
98 connection
= container_of(kref
, struct gb_connection
, kref
);
100 trace_gb_connection_release(connection
);
105 static void gb_connection_init_name(struct gb_connection
*connection
)
107 u16 hd_cport_id
= connection
->hd_cport_id
;
111 if (connection
->intf
) {
112 intf_id
= connection
->intf
->interface_id
;
113 cport_id
= connection
->intf_cport_id
;
116 snprintf(connection
->name
, sizeof(connection
->name
),
117 "%u/%u:%u", hd_cport_id
, intf_id
, cport_id
);
121 * _gb_connection_create() - create a Greybus connection
122 * @hd: host device of the connection
123 * @hd_cport_id: host-device cport id, or -1 for dynamic allocation
124 * @intf: remote interface, or NULL for static connections
125 * @bundle: remote-interface bundle (may be NULL)
126 * @cport_id: remote-interface cport id, or 0 for static connections
127 * @handler: request handler (may be NULL)
128 * @flags: connection flags
130 * Create a Greybus connection, representing the bidirectional link
131 * between a CPort on a (local) Greybus host device and a CPort on
132 * another Greybus interface.
134 * A connection also maintains the state of operations sent over the
137 * Serialised against concurrent create and destroy using the
138 * gb_connection_mutex.
140 * Return: A pointer to the new connection if successful, or an ERR_PTR
143 static struct gb_connection
*
144 _gb_connection_create(struct gb_host_device
*hd
, int hd_cport_id
,
145 struct gb_interface
*intf
,
146 struct gb_bundle
*bundle
, int cport_id
,
147 gb_request_handler_t handler
,
150 struct gb_connection
*connection
;
153 mutex_lock(&gb_connection_mutex
);
155 if (intf
&& gb_connection_cport_in_use(intf
, cport_id
)) {
156 dev_err(&intf
->dev
, "cport %u already in use\n", cport_id
);
161 ret
= gb_hd_cport_allocate(hd
, hd_cport_id
, flags
);
163 dev_err(&hd
->dev
, "failed to allocate cport: %d\n", ret
);
168 connection
= kzalloc(sizeof(*connection
), GFP_KERNEL
);
171 goto err_hd_cport_release
;
174 connection
->hd_cport_id
= hd_cport_id
;
175 connection
->intf_cport_id
= cport_id
;
177 connection
->intf
= intf
;
178 connection
->bundle
= bundle
;
179 connection
->handler
= handler
;
180 connection
->flags
= flags
;
181 if (intf
&& (intf
->quirks
& GB_INTERFACE_QUIRK_NO_CPORT_FEATURES
))
182 connection
->flags
|= GB_CONNECTION_FLAG_NO_FLOWCTRL
;
183 connection
->state
= GB_CONNECTION_STATE_DISABLED
;
185 atomic_set(&connection
->op_cycle
, 0);
186 mutex_init(&connection
->mutex
);
187 spin_lock_init(&connection
->lock
);
188 INIT_LIST_HEAD(&connection
->operations
);
190 connection
->wq
= alloc_workqueue("%s:%d", WQ_UNBOUND
, 1,
191 dev_name(&hd
->dev
), hd_cport_id
);
192 if (!connection
->wq
) {
194 goto err_free_connection
;
197 kref_init(&connection
->kref
);
199 gb_connection_init_name(connection
);
201 spin_lock_irq(&gb_connections_lock
);
202 list_add(&connection
->hd_links
, &hd
->connections
);
205 list_add(&connection
->bundle_links
, &bundle
->connections
);
207 INIT_LIST_HEAD(&connection
->bundle_links
);
209 spin_unlock_irq(&gb_connections_lock
);
211 mutex_unlock(&gb_connection_mutex
);
213 trace_gb_connection_create(connection
);
219 err_hd_cport_release
:
220 gb_hd_cport_release(hd
, hd_cport_id
);
222 mutex_unlock(&gb_connection_mutex
);
227 struct gb_connection
*
228 gb_connection_create_static(struct gb_host_device
*hd
, u16 hd_cport_id
,
229 gb_request_handler_t handler
)
231 return _gb_connection_create(hd
, hd_cport_id
, NULL
, NULL
, 0, handler
,
232 GB_CONNECTION_FLAG_HIGH_PRIO
);
235 struct gb_connection
*
236 gb_connection_create_control(struct gb_interface
*intf
)
238 return _gb_connection_create(intf
->hd
, -1, intf
, NULL
, 0, NULL
,
239 GB_CONNECTION_FLAG_CONTROL
|
240 GB_CONNECTION_FLAG_HIGH_PRIO
);
243 struct gb_connection
*
244 gb_connection_create(struct gb_bundle
*bundle
, u16 cport_id
,
245 gb_request_handler_t handler
)
247 struct gb_interface
*intf
= bundle
->intf
;
249 return _gb_connection_create(intf
->hd
, -1, intf
, bundle
, cport_id
,
252 EXPORT_SYMBOL_GPL(gb_connection_create
);
254 struct gb_connection
*
255 gb_connection_create_flags(struct gb_bundle
*bundle
, u16 cport_id
,
256 gb_request_handler_t handler
,
259 struct gb_interface
*intf
= bundle
->intf
;
261 if (WARN_ON_ONCE(flags
& GB_CONNECTION_FLAG_CORE_MASK
))
262 flags
&= ~GB_CONNECTION_FLAG_CORE_MASK
;
264 return _gb_connection_create(intf
->hd
, -1, intf
, bundle
, cport_id
,
267 EXPORT_SYMBOL_GPL(gb_connection_create_flags
);
269 struct gb_connection
*
270 gb_connection_create_offloaded(struct gb_bundle
*bundle
, u16 cport_id
,
273 flags
|= GB_CONNECTION_FLAG_OFFLOADED
;
275 return gb_connection_create_flags(bundle
, cport_id
, NULL
, flags
);
277 EXPORT_SYMBOL_GPL(gb_connection_create_offloaded
);
279 static int gb_connection_hd_cport_enable(struct gb_connection
*connection
)
281 struct gb_host_device
*hd
= connection
->hd
;
284 if (!hd
->driver
->cport_enable
)
287 ret
= hd
->driver
->cport_enable(hd
, connection
->hd_cport_id
,
290 dev_err(&hd
->dev
, "%s: failed to enable host cport: %d\n",
291 connection
->name
, ret
);
298 static void gb_connection_hd_cport_disable(struct gb_connection
*connection
)
300 struct gb_host_device
*hd
= connection
->hd
;
303 if (!hd
->driver
->cport_disable
)
306 ret
= hd
->driver
->cport_disable(hd
, connection
->hd_cport_id
);
308 dev_err(&hd
->dev
, "%s: failed to disable host cport: %d\n",
309 connection
->name
, ret
);
313 static int gb_connection_hd_cport_connected(struct gb_connection
*connection
)
315 struct gb_host_device
*hd
= connection
->hd
;
318 if (!hd
->driver
->cport_connected
)
321 ret
= hd
->driver
->cport_connected(hd
, connection
->hd_cport_id
);
323 dev_err(&hd
->dev
, "%s: failed to set connected state: %d\n",
324 connection
->name
, ret
);
331 static int gb_connection_hd_cport_flush(struct gb_connection
*connection
)
333 struct gb_host_device
*hd
= connection
->hd
;
336 if (!hd
->driver
->cport_flush
)
339 ret
= hd
->driver
->cport_flush(hd
, connection
->hd_cport_id
);
341 dev_err(&hd
->dev
, "%s: failed to flush host cport: %d\n",
342 connection
->name
, ret
);
349 static int gb_connection_hd_cport_quiesce(struct gb_connection
*connection
)
351 struct gb_host_device
*hd
= connection
->hd
;
355 if (!hd
->driver
->cport_quiesce
)
358 peer_space
= sizeof(struct gb_operation_msg_hdr
) +
359 sizeof(struct gb_cport_shutdown_request
);
361 if (connection
->mode_switch
)
362 peer_space
+= sizeof(struct gb_operation_msg_hdr
);
364 if (!hd
->driver
->cport_quiesce
)
367 ret
= hd
->driver
->cport_quiesce(hd
, connection
->hd_cport_id
,
369 GB_CONNECTION_CPORT_QUIESCE_TIMEOUT
);
371 dev_err(&hd
->dev
, "%s: failed to quiesce host cport: %d\n",
372 connection
->name
, ret
);
379 static int gb_connection_hd_cport_clear(struct gb_connection
*connection
)
381 struct gb_host_device
*hd
= connection
->hd
;
384 if (!hd
->driver
->cport_clear
)
387 ret
= hd
->driver
->cport_clear(hd
, connection
->hd_cport_id
);
389 dev_err(&hd
->dev
, "%s: failed to clear host cport: %d\n",
390 connection
->name
, ret
);
398 * Request the SVC to create a connection from AP's cport to interface's
402 gb_connection_svc_connection_create(struct gb_connection
*connection
)
404 struct gb_host_device
*hd
= connection
->hd
;
405 struct gb_interface
*intf
;
409 if (gb_connection_is_static(connection
))
412 intf
= connection
->intf
;
415 * Enable either E2EFC or CSD, unless no flow control is requested.
417 cport_flags
= GB_SVC_CPORT_FLAG_CSV_N
;
418 if (gb_connection_flow_control_disabled(connection
)) {
419 cport_flags
|= GB_SVC_CPORT_FLAG_CSD_N
;
420 } else if (gb_connection_e2efc_enabled(connection
)) {
421 cport_flags
|= GB_SVC_CPORT_FLAG_CSD_N
|
422 GB_SVC_CPORT_FLAG_E2EFC
;
425 ret
= gb_svc_connection_create(hd
->svc
,
427 connection
->hd_cport_id
,
429 connection
->intf_cport_id
,
432 dev_err(&connection
->hd
->dev
,
433 "%s: failed to create svc connection: %d\n",
434 connection
->name
, ret
);
442 gb_connection_svc_connection_destroy(struct gb_connection
*connection
)
444 if (gb_connection_is_static(connection
))
447 gb_svc_connection_destroy(connection
->hd
->svc
,
448 connection
->hd
->svc
->ap_intf_id
,
449 connection
->hd_cport_id
,
450 connection
->intf
->interface_id
,
451 connection
->intf_cport_id
);
454 /* Inform Interface about active CPorts */
455 static int gb_connection_control_connected(struct gb_connection
*connection
)
457 struct gb_control
*control
;
458 u16 cport_id
= connection
->intf_cport_id
;
461 if (gb_connection_is_static(connection
))
464 if (gb_connection_is_control(connection
))
467 control
= connection
->intf
->control
;
469 ret
= gb_control_connected_operation(control
, cport_id
);
471 dev_err(&connection
->bundle
->dev
,
472 "failed to connect cport: %d\n", ret
);
480 gb_connection_control_disconnecting(struct gb_connection
*connection
)
482 struct gb_control
*control
;
483 u16 cport_id
= connection
->intf_cport_id
;
486 if (gb_connection_is_static(connection
))
489 control
= connection
->intf
->control
;
491 ret
= gb_control_disconnecting_operation(control
, cport_id
);
493 dev_err(&connection
->hd
->dev
,
494 "%s: failed to send disconnecting: %d\n",
495 connection
->name
, ret
);
500 gb_connection_control_disconnected(struct gb_connection
*connection
)
502 struct gb_control
*control
;
503 u16 cport_id
= connection
->intf_cport_id
;
506 if (gb_connection_is_static(connection
))
509 control
= connection
->intf
->control
;
511 if (gb_connection_is_control(connection
)) {
512 if (connection
->mode_switch
) {
513 ret
= gb_control_mode_switch_operation(control
);
516 * Allow mode switch to time out waiting for
526 ret
= gb_control_disconnected_operation(control
, cport_id
);
528 dev_warn(&connection
->bundle
->dev
,
529 "failed to disconnect cport: %d\n", ret
);
533 static int gb_connection_shutdown_operation(struct gb_connection
*connection
,
536 struct gb_cport_shutdown_request
*req
;
537 struct gb_operation
*operation
;
540 operation
= gb_operation_create_core(connection
,
541 GB_REQUEST_TYPE_CPORT_SHUTDOWN
,
547 req
= operation
->request
->payload
;
550 ret
= gb_operation_request_send_sync(operation
);
552 gb_operation_put(operation
);
557 static int gb_connection_cport_shutdown(struct gb_connection
*connection
,
560 struct gb_host_device
*hd
= connection
->hd
;
561 const struct gb_hd_driver
*drv
= hd
->driver
;
564 if (gb_connection_is_static(connection
))
567 if (gb_connection_is_offloaded(connection
)) {
568 if (!drv
->cport_shutdown
)
571 ret
= drv
->cport_shutdown(hd
, connection
->hd_cport_id
, phase
,
572 GB_OPERATION_TIMEOUT_DEFAULT
);
574 ret
= gb_connection_shutdown_operation(connection
, phase
);
578 dev_err(&hd
->dev
, "%s: failed to send cport shutdown (phase %d): %d\n",
579 connection
->name
, phase
, ret
);
587 gb_connection_cport_shutdown_phase_1(struct gb_connection
*connection
)
589 return gb_connection_cport_shutdown(connection
, 1);
593 gb_connection_cport_shutdown_phase_2(struct gb_connection
*connection
)
595 return gb_connection_cport_shutdown(connection
, 2);
599 * Cancel all active operations on a connection.
601 * Locking: Called with connection lock held and state set to DISABLED or
604 static void gb_connection_cancel_operations(struct gb_connection
*connection
,
606 __must_hold(&connection
->lock
)
608 struct gb_operation
*operation
;
610 while (!list_empty(&connection
->operations
)) {
611 operation
= list_last_entry(&connection
->operations
,
612 struct gb_operation
, links
);
613 gb_operation_get(operation
);
614 spin_unlock_irq(&connection
->lock
);
616 if (gb_operation_is_incoming(operation
))
617 gb_operation_cancel_incoming(operation
, errno
);
619 gb_operation_cancel(operation
, errno
);
621 gb_operation_put(operation
);
623 spin_lock_irq(&connection
->lock
);
628 * Cancel all active incoming operations on a connection.
630 * Locking: Called with connection lock held and state set to ENABLED_TX.
633 gb_connection_flush_incoming_operations(struct gb_connection
*connection
,
635 __must_hold(&connection
->lock
)
637 struct gb_operation
*operation
;
640 while (!list_empty(&connection
->operations
)) {
642 list_for_each_entry(operation
, &connection
->operations
,
644 if (gb_operation_is_incoming(operation
)) {
645 gb_operation_get(operation
);
654 spin_unlock_irq(&connection
->lock
);
656 /* FIXME: flush, not cancel? */
657 gb_operation_cancel_incoming(operation
, errno
);
658 gb_operation_put(operation
);
660 spin_lock_irq(&connection
->lock
);
665 * _gb_connection_enable() - enable a connection
666 * @connection: connection to enable
667 * @rx: whether to enable incoming requests
669 * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
670 * ENABLED_TX->ENABLED state transitions.
672 * Locking: Caller holds connection->mutex.
674 static int _gb_connection_enable(struct gb_connection
*connection
, bool rx
)
678 /* Handle ENABLED_TX -> ENABLED transitions. */
679 if (connection
->state
== GB_CONNECTION_STATE_ENABLED_TX
) {
680 if (!(connection
->handler
&& rx
))
683 spin_lock_irq(&connection
->lock
);
684 connection
->state
= GB_CONNECTION_STATE_ENABLED
;
685 spin_unlock_irq(&connection
->lock
);
690 ret
= gb_connection_hd_cport_enable(connection
);
694 ret
= gb_connection_svc_connection_create(connection
);
696 goto err_hd_cport_clear
;
698 ret
= gb_connection_hd_cport_connected(connection
);
700 goto err_svc_connection_destroy
;
702 spin_lock_irq(&connection
->lock
);
703 if (connection
->handler
&& rx
)
704 connection
->state
= GB_CONNECTION_STATE_ENABLED
;
706 connection
->state
= GB_CONNECTION_STATE_ENABLED_TX
;
707 spin_unlock_irq(&connection
->lock
);
709 ret
= gb_connection_control_connected(connection
);
711 goto err_control_disconnecting
;
715 err_control_disconnecting
:
716 spin_lock_irq(&connection
->lock
);
717 connection
->state
= GB_CONNECTION_STATE_DISCONNECTING
;
718 gb_connection_cancel_operations(connection
, -ESHUTDOWN
);
719 spin_unlock_irq(&connection
->lock
);
721 /* Transmit queue should already be empty. */
722 gb_connection_hd_cport_flush(connection
);
724 gb_connection_control_disconnecting(connection
);
725 gb_connection_cport_shutdown_phase_1(connection
);
726 gb_connection_hd_cport_quiesce(connection
);
727 gb_connection_cport_shutdown_phase_2(connection
);
728 gb_connection_control_disconnected(connection
);
729 connection
->state
= GB_CONNECTION_STATE_DISABLED
;
730 err_svc_connection_destroy
:
731 gb_connection_svc_connection_destroy(connection
);
733 gb_connection_hd_cport_clear(connection
);
735 gb_connection_hd_cport_disable(connection
);
740 int gb_connection_enable(struct gb_connection
*connection
)
744 mutex_lock(&connection
->mutex
);
746 if (connection
->state
== GB_CONNECTION_STATE_ENABLED
)
749 ret
= _gb_connection_enable(connection
, true);
751 trace_gb_connection_enable(connection
);
754 mutex_unlock(&connection
->mutex
);
758 EXPORT_SYMBOL_GPL(gb_connection_enable
);
760 int gb_connection_enable_tx(struct gb_connection
*connection
)
764 mutex_lock(&connection
->mutex
);
766 if (connection
->state
== GB_CONNECTION_STATE_ENABLED
) {
771 if (connection
->state
== GB_CONNECTION_STATE_ENABLED_TX
)
774 ret
= _gb_connection_enable(connection
, false);
776 trace_gb_connection_enable(connection
);
779 mutex_unlock(&connection
->mutex
);
783 EXPORT_SYMBOL_GPL(gb_connection_enable_tx
);
785 void gb_connection_disable_rx(struct gb_connection
*connection
)
787 mutex_lock(&connection
->mutex
);
789 spin_lock_irq(&connection
->lock
);
790 if (connection
->state
!= GB_CONNECTION_STATE_ENABLED
) {
791 spin_unlock_irq(&connection
->lock
);
794 connection
->state
= GB_CONNECTION_STATE_ENABLED_TX
;
795 gb_connection_flush_incoming_operations(connection
, -ESHUTDOWN
);
796 spin_unlock_irq(&connection
->lock
);
798 trace_gb_connection_disable(connection
);
801 mutex_unlock(&connection
->mutex
);
803 EXPORT_SYMBOL_GPL(gb_connection_disable_rx
);
805 void gb_connection_mode_switch_prepare(struct gb_connection
*connection
)
807 connection
->mode_switch
= true;
810 void gb_connection_mode_switch_complete(struct gb_connection
*connection
)
812 gb_connection_svc_connection_destroy(connection
);
813 gb_connection_hd_cport_clear(connection
);
815 gb_connection_hd_cport_disable(connection
);
817 connection
->mode_switch
= false;
820 void gb_connection_disable(struct gb_connection
*connection
)
822 mutex_lock(&connection
->mutex
);
824 if (connection
->state
== GB_CONNECTION_STATE_DISABLED
)
827 trace_gb_connection_disable(connection
);
829 spin_lock_irq(&connection
->lock
);
830 connection
->state
= GB_CONNECTION_STATE_DISCONNECTING
;
831 gb_connection_cancel_operations(connection
, -ESHUTDOWN
);
832 spin_unlock_irq(&connection
->lock
);
834 gb_connection_hd_cport_flush(connection
);
836 gb_connection_control_disconnecting(connection
);
837 gb_connection_cport_shutdown_phase_1(connection
);
838 gb_connection_hd_cport_quiesce(connection
);
839 gb_connection_cport_shutdown_phase_2(connection
);
840 gb_connection_control_disconnected(connection
);
842 connection
->state
= GB_CONNECTION_STATE_DISABLED
;
844 /* control-connection tear down is deferred when mode switching */
845 if (!connection
->mode_switch
) {
846 gb_connection_svc_connection_destroy(connection
);
847 gb_connection_hd_cport_clear(connection
);
849 gb_connection_hd_cport_disable(connection
);
853 mutex_unlock(&connection
->mutex
);
855 EXPORT_SYMBOL_GPL(gb_connection_disable
);
857 /* Disable a connection without communicating with the remote end. */
858 void gb_connection_disable_forced(struct gb_connection
*connection
)
860 mutex_lock(&connection
->mutex
);
862 if (connection
->state
== GB_CONNECTION_STATE_DISABLED
)
865 trace_gb_connection_disable(connection
);
867 spin_lock_irq(&connection
->lock
);
868 connection
->state
= GB_CONNECTION_STATE_DISABLED
;
869 gb_connection_cancel_operations(connection
, -ESHUTDOWN
);
870 spin_unlock_irq(&connection
->lock
);
872 gb_connection_hd_cport_flush(connection
);
874 gb_connection_svc_connection_destroy(connection
);
875 gb_connection_hd_cport_clear(connection
);
877 gb_connection_hd_cport_disable(connection
);
879 mutex_unlock(&connection
->mutex
);
881 EXPORT_SYMBOL_GPL(gb_connection_disable_forced
);
883 /* Caller must have disabled the connection before destroying it. */
884 void gb_connection_destroy(struct gb_connection
*connection
)
889 if (WARN_ON(connection
->state
!= GB_CONNECTION_STATE_DISABLED
))
890 gb_connection_disable(connection
);
892 mutex_lock(&gb_connection_mutex
);
894 spin_lock_irq(&gb_connections_lock
);
895 list_del(&connection
->bundle_links
);
896 list_del(&connection
->hd_links
);
897 spin_unlock_irq(&gb_connections_lock
);
899 destroy_workqueue(connection
->wq
);
901 gb_hd_cport_release(connection
->hd
, connection
->hd_cport_id
);
902 connection
->hd_cport_id
= CPORT_ID_BAD
;
904 mutex_unlock(&gb_connection_mutex
);
906 gb_connection_put(connection
);
908 EXPORT_SYMBOL_GPL(gb_connection_destroy
);
910 void gb_connection_latency_tag_enable(struct gb_connection
*connection
)
912 struct gb_host_device
*hd
= connection
->hd
;
915 if (!hd
->driver
->latency_tag_enable
)
918 ret
= hd
->driver
->latency_tag_enable(hd
, connection
->hd_cport_id
);
920 dev_err(&connection
->hd
->dev
,
921 "%s: failed to enable latency tag: %d\n",
922 connection
->name
, ret
);
925 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable
);
927 void gb_connection_latency_tag_disable(struct gb_connection
*connection
)
929 struct gb_host_device
*hd
= connection
->hd
;
932 if (!hd
->driver
->latency_tag_disable
)
935 ret
= hd
->driver
->latency_tag_disable(hd
, connection
->hd_cport_id
);
937 dev_err(&connection
->hd
->dev
,
938 "%s: failed to disable latency tag: %d\n",
939 connection
->name
, ret
);
942 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable
);