1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright 2014 Google Inc.
6 * Copyright 2014 Linaro Ltd.
9 #include <linux/workqueue.h>
10 #include <linux/greybus.h>
12 #include "greybus_trace.h"
14 #define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000
16 static void gb_connection_kref_release(struct kref
*kref
);
18 static DEFINE_SPINLOCK(gb_connections_lock
);
19 static DEFINE_MUTEX(gb_connection_mutex
);
21 /* Caller holds gb_connection_mutex. */
22 static bool gb_connection_cport_in_use(struct gb_interface
*intf
, u16 cport_id
)
24 struct gb_host_device
*hd
= intf
->hd
;
25 struct gb_connection
*connection
;
27 list_for_each_entry(connection
, &hd
->connections
, hd_links
) {
28 if (connection
->intf
== intf
&&
29 connection
->intf_cport_id
== cport_id
)
36 static void gb_connection_get(struct gb_connection
*connection
)
38 kref_get(&connection
->kref
);
40 trace_gb_connection_get(connection
);
43 static void gb_connection_put(struct gb_connection
*connection
)
45 trace_gb_connection_put(connection
);
47 kref_put(&connection
->kref
, gb_connection_kref_release
);
51 * Returns a reference-counted pointer to the connection if found.
53 static struct gb_connection
*
54 gb_connection_hd_find(struct gb_host_device
*hd
, u16 cport_id
)
56 struct gb_connection
*connection
;
59 spin_lock_irqsave(&gb_connections_lock
, flags
);
60 list_for_each_entry(connection
, &hd
->connections
, hd_links
)
61 if (connection
->hd_cport_id
== cport_id
) {
62 gb_connection_get(connection
);
67 spin_unlock_irqrestore(&gb_connections_lock
, flags
);
73 * Callback from the host driver to let us know that data has been
74 * received on the bundle.
76 void greybus_data_rcvd(struct gb_host_device
*hd
, u16 cport_id
,
77 u8
*data
, size_t length
)
79 struct gb_connection
*connection
;
83 connection
= gb_connection_hd_find(hd
, cport_id
);
86 "nonexistent connection (%zu bytes dropped)\n", length
);
89 gb_connection_recv(connection
, data
, length
);
90 gb_connection_put(connection
);
92 EXPORT_SYMBOL_GPL(greybus_data_rcvd
);
94 static void gb_connection_kref_release(struct kref
*kref
)
96 struct gb_connection
*connection
;
98 connection
= container_of(kref
, struct gb_connection
, kref
);
100 trace_gb_connection_release(connection
);
105 static void gb_connection_init_name(struct gb_connection
*connection
)
107 u16 hd_cport_id
= connection
->hd_cport_id
;
111 if (connection
->intf
) {
112 intf_id
= connection
->intf
->interface_id
;
113 cport_id
= connection
->intf_cport_id
;
116 snprintf(connection
->name
, sizeof(connection
->name
),
117 "%u/%u:%u", hd_cport_id
, intf_id
, cport_id
);
121 * _gb_connection_create() - create a Greybus connection
122 * @hd: host device of the connection
123 * @hd_cport_id: host-device cport id, or -1 for dynamic allocation
124 * @intf: remote interface, or NULL for static connections
125 * @bundle: remote-interface bundle (may be NULL)
126 * @cport_id: remote-interface cport id, or 0 for static connections
127 * @handler: request handler (may be NULL)
128 * @flags: connection flags
130 * Create a Greybus connection, representing the bidirectional link
131 * between a CPort on a (local) Greybus host device and a CPort on
132 * another Greybus interface.
134 * A connection also maintains the state of operations sent over the
137 * Serialised against concurrent create and destroy using the
138 * gb_connection_mutex.
140 * Return: A pointer to the new connection if successful, or an ERR_PTR
143 static struct gb_connection
*
144 _gb_connection_create(struct gb_host_device
*hd
, int hd_cport_id
,
145 struct gb_interface
*intf
,
146 struct gb_bundle
*bundle
, int cport_id
,
147 gb_request_handler_t handler
,
150 struct gb_connection
*connection
;
153 mutex_lock(&gb_connection_mutex
);
155 if (intf
&& gb_connection_cport_in_use(intf
, cport_id
)) {
156 dev_err(&intf
->dev
, "cport %u already in use\n", cport_id
);
161 ret
= gb_hd_cport_allocate(hd
, hd_cport_id
, flags
);
163 dev_err(&hd
->dev
, "failed to allocate cport: %d\n", ret
);
168 connection
= kzalloc(sizeof(*connection
), GFP_KERNEL
);
171 goto err_hd_cport_release
;
174 connection
->hd_cport_id
= hd_cport_id
;
175 connection
->intf_cport_id
= cport_id
;
177 connection
->intf
= intf
;
178 connection
->bundle
= bundle
;
179 connection
->handler
= handler
;
180 connection
->flags
= flags
;
181 if (intf
&& (intf
->quirks
& GB_INTERFACE_QUIRK_NO_CPORT_FEATURES
))
182 connection
->flags
|= GB_CONNECTION_FLAG_NO_FLOWCTRL
;
183 connection
->state
= GB_CONNECTION_STATE_DISABLED
;
185 atomic_set(&connection
->op_cycle
, 0);
186 mutex_init(&connection
->mutex
);
187 spin_lock_init(&connection
->lock
);
188 INIT_LIST_HEAD(&connection
->operations
);
190 connection
->wq
= alloc_workqueue("%s:%d", WQ_UNBOUND
, 1,
191 dev_name(&hd
->dev
), hd_cport_id
);
192 if (!connection
->wq
) {
194 goto err_free_connection
;
197 kref_init(&connection
->kref
);
199 gb_connection_init_name(connection
);
201 spin_lock_irq(&gb_connections_lock
);
202 list_add(&connection
->hd_links
, &hd
->connections
);
205 list_add(&connection
->bundle_links
, &bundle
->connections
);
207 INIT_LIST_HEAD(&connection
->bundle_links
);
209 spin_unlock_irq(&gb_connections_lock
);
211 mutex_unlock(&gb_connection_mutex
);
213 trace_gb_connection_create(connection
);
219 err_hd_cport_release
:
220 gb_hd_cport_release(hd
, hd_cport_id
);
222 mutex_unlock(&gb_connection_mutex
);
227 struct gb_connection
*
228 gb_connection_create_static(struct gb_host_device
*hd
, u16 hd_cport_id
,
229 gb_request_handler_t handler
)
231 return _gb_connection_create(hd
, hd_cport_id
, NULL
, NULL
, 0, handler
,
232 GB_CONNECTION_FLAG_HIGH_PRIO
);
235 struct gb_connection
*
236 gb_connection_create_control(struct gb_interface
*intf
)
238 return _gb_connection_create(intf
->hd
, -1, intf
, NULL
, 0, NULL
,
239 GB_CONNECTION_FLAG_CONTROL
|
240 GB_CONNECTION_FLAG_HIGH_PRIO
);
243 struct gb_connection
*
244 gb_connection_create(struct gb_bundle
*bundle
, u16 cport_id
,
245 gb_request_handler_t handler
)
247 struct gb_interface
*intf
= bundle
->intf
;
249 return _gb_connection_create(intf
->hd
, -1, intf
, bundle
, cport_id
,
252 EXPORT_SYMBOL_GPL(gb_connection_create
);
254 struct gb_connection
*
255 gb_connection_create_flags(struct gb_bundle
*bundle
, u16 cport_id
,
256 gb_request_handler_t handler
,
259 struct gb_interface
*intf
= bundle
->intf
;
261 if (WARN_ON_ONCE(flags
& GB_CONNECTION_FLAG_CORE_MASK
))
262 flags
&= ~GB_CONNECTION_FLAG_CORE_MASK
;
264 return _gb_connection_create(intf
->hd
, -1, intf
, bundle
, cport_id
,
267 EXPORT_SYMBOL_GPL(gb_connection_create_flags
);
269 struct gb_connection
*
270 gb_connection_create_offloaded(struct gb_bundle
*bundle
, u16 cport_id
,
273 flags
|= GB_CONNECTION_FLAG_OFFLOADED
;
275 return gb_connection_create_flags(bundle
, cport_id
, NULL
, flags
);
277 EXPORT_SYMBOL_GPL(gb_connection_create_offloaded
);
279 static int gb_connection_hd_cport_enable(struct gb_connection
*connection
)
281 struct gb_host_device
*hd
= connection
->hd
;
284 if (!hd
->driver
->cport_enable
)
287 ret
= hd
->driver
->cport_enable(hd
, connection
->hd_cport_id
,
290 dev_err(&hd
->dev
, "%s: failed to enable host cport: %d\n",
291 connection
->name
, ret
);
298 static void gb_connection_hd_cport_disable(struct gb_connection
*connection
)
300 struct gb_host_device
*hd
= connection
->hd
;
303 if (!hd
->driver
->cport_disable
)
306 ret
= hd
->driver
->cport_disable(hd
, connection
->hd_cport_id
);
308 dev_err(&hd
->dev
, "%s: failed to disable host cport: %d\n",
309 connection
->name
, ret
);
313 static int gb_connection_hd_cport_connected(struct gb_connection
*connection
)
315 struct gb_host_device
*hd
= connection
->hd
;
318 if (!hd
->driver
->cport_connected
)
321 ret
= hd
->driver
->cport_connected(hd
, connection
->hd_cport_id
);
323 dev_err(&hd
->dev
, "%s: failed to set connected state: %d\n",
324 connection
->name
, ret
);
331 static int gb_connection_hd_cport_flush(struct gb_connection
*connection
)
333 struct gb_host_device
*hd
= connection
->hd
;
336 if (!hd
->driver
->cport_flush
)
339 ret
= hd
->driver
->cport_flush(hd
, connection
->hd_cport_id
);
341 dev_err(&hd
->dev
, "%s: failed to flush host cport: %d\n",
342 connection
->name
, ret
);
349 static int gb_connection_hd_cport_quiesce(struct gb_connection
*connection
)
351 struct gb_host_device
*hd
= connection
->hd
;
355 if (!hd
->driver
->cport_quiesce
)
358 peer_space
= sizeof(struct gb_operation_msg_hdr
) +
359 sizeof(struct gb_cport_shutdown_request
);
361 if (connection
->mode_switch
)
362 peer_space
+= sizeof(struct gb_operation_msg_hdr
);
364 ret
= hd
->driver
->cport_quiesce(hd
, connection
->hd_cport_id
,
366 GB_CONNECTION_CPORT_QUIESCE_TIMEOUT
);
368 dev_err(&hd
->dev
, "%s: failed to quiesce host cport: %d\n",
369 connection
->name
, ret
);
376 static int gb_connection_hd_cport_clear(struct gb_connection
*connection
)
378 struct gb_host_device
*hd
= connection
->hd
;
381 if (!hd
->driver
->cport_clear
)
384 ret
= hd
->driver
->cport_clear(hd
, connection
->hd_cport_id
);
386 dev_err(&hd
->dev
, "%s: failed to clear host cport: %d\n",
387 connection
->name
, ret
);
395 * Request the SVC to create a connection from AP's cport to interface's
399 gb_connection_svc_connection_create(struct gb_connection
*connection
)
401 struct gb_host_device
*hd
= connection
->hd
;
402 struct gb_interface
*intf
;
406 if (gb_connection_is_static(connection
))
409 intf
= connection
->intf
;
412 * Enable either E2EFC or CSD, unless no flow control is requested.
414 cport_flags
= GB_SVC_CPORT_FLAG_CSV_N
;
415 if (gb_connection_flow_control_disabled(connection
)) {
416 cport_flags
|= GB_SVC_CPORT_FLAG_CSD_N
;
417 } else if (gb_connection_e2efc_enabled(connection
)) {
418 cport_flags
|= GB_SVC_CPORT_FLAG_CSD_N
|
419 GB_SVC_CPORT_FLAG_E2EFC
;
422 ret
= gb_svc_connection_create(hd
->svc
,
424 connection
->hd_cport_id
,
426 connection
->intf_cport_id
,
429 dev_err(&connection
->hd
->dev
,
430 "%s: failed to create svc connection: %d\n",
431 connection
->name
, ret
);
439 gb_connection_svc_connection_destroy(struct gb_connection
*connection
)
441 if (gb_connection_is_static(connection
))
444 gb_svc_connection_destroy(connection
->hd
->svc
,
445 connection
->hd
->svc
->ap_intf_id
,
446 connection
->hd_cport_id
,
447 connection
->intf
->interface_id
,
448 connection
->intf_cport_id
);
451 /* Inform Interface about active CPorts */
452 static int gb_connection_control_connected(struct gb_connection
*connection
)
454 struct gb_control
*control
;
455 u16 cport_id
= connection
->intf_cport_id
;
458 if (gb_connection_is_static(connection
))
461 if (gb_connection_is_control(connection
))
464 control
= connection
->intf
->control
;
466 ret
= gb_control_connected_operation(control
, cport_id
);
468 dev_err(&connection
->bundle
->dev
,
469 "failed to connect cport: %d\n", ret
);
477 gb_connection_control_disconnecting(struct gb_connection
*connection
)
479 struct gb_control
*control
;
480 u16 cport_id
= connection
->intf_cport_id
;
483 if (gb_connection_is_static(connection
))
486 control
= connection
->intf
->control
;
488 ret
= gb_control_disconnecting_operation(control
, cport_id
);
490 dev_err(&connection
->hd
->dev
,
491 "%s: failed to send disconnecting: %d\n",
492 connection
->name
, ret
);
497 gb_connection_control_disconnected(struct gb_connection
*connection
)
499 struct gb_control
*control
;
500 u16 cport_id
= connection
->intf_cport_id
;
503 if (gb_connection_is_static(connection
))
506 control
= connection
->intf
->control
;
508 if (gb_connection_is_control(connection
)) {
509 if (connection
->mode_switch
) {
510 ret
= gb_control_mode_switch_operation(control
);
513 * Allow mode switch to time out waiting for
523 ret
= gb_control_disconnected_operation(control
, cport_id
);
525 dev_warn(&connection
->bundle
->dev
,
526 "failed to disconnect cport: %d\n", ret
);
530 static int gb_connection_shutdown_operation(struct gb_connection
*connection
,
533 struct gb_cport_shutdown_request
*req
;
534 struct gb_operation
*operation
;
537 operation
= gb_operation_create_core(connection
,
538 GB_REQUEST_TYPE_CPORT_SHUTDOWN
,
544 req
= operation
->request
->payload
;
547 ret
= gb_operation_request_send_sync(operation
);
549 gb_operation_put(operation
);
554 static int gb_connection_cport_shutdown(struct gb_connection
*connection
,
557 struct gb_host_device
*hd
= connection
->hd
;
558 const struct gb_hd_driver
*drv
= hd
->driver
;
561 if (gb_connection_is_static(connection
))
564 if (gb_connection_is_offloaded(connection
)) {
565 if (!drv
->cport_shutdown
)
568 ret
= drv
->cport_shutdown(hd
, connection
->hd_cport_id
, phase
,
569 GB_OPERATION_TIMEOUT_DEFAULT
);
571 ret
= gb_connection_shutdown_operation(connection
, phase
);
575 dev_err(&hd
->dev
, "%s: failed to send cport shutdown (phase %d): %d\n",
576 connection
->name
, phase
, ret
);
584 gb_connection_cport_shutdown_phase_1(struct gb_connection
*connection
)
586 return gb_connection_cport_shutdown(connection
, 1);
590 gb_connection_cport_shutdown_phase_2(struct gb_connection
*connection
)
592 return gb_connection_cport_shutdown(connection
, 2);
596 * Cancel all active operations on a connection.
598 * Locking: Called with connection lock held and state set to DISABLED or
601 static void gb_connection_cancel_operations(struct gb_connection
*connection
,
603 __must_hold(&connection
->lock
)
605 struct gb_operation
*operation
;
607 while (!list_empty(&connection
->operations
)) {
608 operation
= list_last_entry(&connection
->operations
,
609 struct gb_operation
, links
);
610 gb_operation_get(operation
);
611 spin_unlock_irq(&connection
->lock
);
613 if (gb_operation_is_incoming(operation
))
614 gb_operation_cancel_incoming(operation
, errno
);
616 gb_operation_cancel(operation
, errno
);
618 gb_operation_put(operation
);
620 spin_lock_irq(&connection
->lock
);
625 * Cancel all active incoming operations on a connection.
627 * Locking: Called with connection lock held and state set to ENABLED_TX.
630 gb_connection_flush_incoming_operations(struct gb_connection
*connection
,
632 __must_hold(&connection
->lock
)
634 struct gb_operation
*operation
;
637 while (!list_empty(&connection
->operations
)) {
639 list_for_each_entry(operation
, &connection
->operations
,
641 if (gb_operation_is_incoming(operation
)) {
642 gb_operation_get(operation
);
651 spin_unlock_irq(&connection
->lock
);
653 /* FIXME: flush, not cancel? */
654 gb_operation_cancel_incoming(operation
, errno
);
655 gb_operation_put(operation
);
657 spin_lock_irq(&connection
->lock
);
662 * _gb_connection_enable() - enable a connection
663 * @connection: connection to enable
664 * @rx: whether to enable incoming requests
666 * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
667 * ENABLED_TX->ENABLED state transitions.
669 * Locking: Caller holds connection->mutex.
671 static int _gb_connection_enable(struct gb_connection
*connection
, bool rx
)
675 /* Handle ENABLED_TX -> ENABLED transitions. */
676 if (connection
->state
== GB_CONNECTION_STATE_ENABLED_TX
) {
677 if (!(connection
->handler
&& rx
))
680 spin_lock_irq(&connection
->lock
);
681 connection
->state
= GB_CONNECTION_STATE_ENABLED
;
682 spin_unlock_irq(&connection
->lock
);
687 ret
= gb_connection_hd_cport_enable(connection
);
691 ret
= gb_connection_svc_connection_create(connection
);
693 goto err_hd_cport_clear
;
695 ret
= gb_connection_hd_cport_connected(connection
);
697 goto err_svc_connection_destroy
;
699 spin_lock_irq(&connection
->lock
);
700 if (connection
->handler
&& rx
)
701 connection
->state
= GB_CONNECTION_STATE_ENABLED
;
703 connection
->state
= GB_CONNECTION_STATE_ENABLED_TX
;
704 spin_unlock_irq(&connection
->lock
);
706 ret
= gb_connection_control_connected(connection
);
708 goto err_control_disconnecting
;
712 err_control_disconnecting
:
713 spin_lock_irq(&connection
->lock
);
714 connection
->state
= GB_CONNECTION_STATE_DISCONNECTING
;
715 gb_connection_cancel_operations(connection
, -ESHUTDOWN
);
716 spin_unlock_irq(&connection
->lock
);
718 /* Transmit queue should already be empty. */
719 gb_connection_hd_cport_flush(connection
);
721 gb_connection_control_disconnecting(connection
);
722 gb_connection_cport_shutdown_phase_1(connection
);
723 gb_connection_hd_cport_quiesce(connection
);
724 gb_connection_cport_shutdown_phase_2(connection
);
725 gb_connection_control_disconnected(connection
);
726 connection
->state
= GB_CONNECTION_STATE_DISABLED
;
727 err_svc_connection_destroy
:
728 gb_connection_svc_connection_destroy(connection
);
730 gb_connection_hd_cport_clear(connection
);
732 gb_connection_hd_cport_disable(connection
);
737 int gb_connection_enable(struct gb_connection
*connection
)
741 mutex_lock(&connection
->mutex
);
743 if (connection
->state
== GB_CONNECTION_STATE_ENABLED
)
746 ret
= _gb_connection_enable(connection
, true);
748 trace_gb_connection_enable(connection
);
751 mutex_unlock(&connection
->mutex
);
755 EXPORT_SYMBOL_GPL(gb_connection_enable
);
757 int gb_connection_enable_tx(struct gb_connection
*connection
)
761 mutex_lock(&connection
->mutex
);
763 if (connection
->state
== GB_CONNECTION_STATE_ENABLED
) {
768 if (connection
->state
== GB_CONNECTION_STATE_ENABLED_TX
)
771 ret
= _gb_connection_enable(connection
, false);
773 trace_gb_connection_enable(connection
);
776 mutex_unlock(&connection
->mutex
);
780 EXPORT_SYMBOL_GPL(gb_connection_enable_tx
);
782 void gb_connection_disable_rx(struct gb_connection
*connection
)
784 mutex_lock(&connection
->mutex
);
786 spin_lock_irq(&connection
->lock
);
787 if (connection
->state
!= GB_CONNECTION_STATE_ENABLED
) {
788 spin_unlock_irq(&connection
->lock
);
791 connection
->state
= GB_CONNECTION_STATE_ENABLED_TX
;
792 gb_connection_flush_incoming_operations(connection
, -ESHUTDOWN
);
793 spin_unlock_irq(&connection
->lock
);
795 trace_gb_connection_disable(connection
);
798 mutex_unlock(&connection
->mutex
);
800 EXPORT_SYMBOL_GPL(gb_connection_disable_rx
);
802 void gb_connection_mode_switch_prepare(struct gb_connection
*connection
)
804 connection
->mode_switch
= true;
807 void gb_connection_mode_switch_complete(struct gb_connection
*connection
)
809 gb_connection_svc_connection_destroy(connection
);
810 gb_connection_hd_cport_clear(connection
);
812 gb_connection_hd_cport_disable(connection
);
814 connection
->mode_switch
= false;
817 void gb_connection_disable(struct gb_connection
*connection
)
819 mutex_lock(&connection
->mutex
);
821 if (connection
->state
== GB_CONNECTION_STATE_DISABLED
)
824 trace_gb_connection_disable(connection
);
826 spin_lock_irq(&connection
->lock
);
827 connection
->state
= GB_CONNECTION_STATE_DISCONNECTING
;
828 gb_connection_cancel_operations(connection
, -ESHUTDOWN
);
829 spin_unlock_irq(&connection
->lock
);
831 gb_connection_hd_cport_flush(connection
);
833 gb_connection_control_disconnecting(connection
);
834 gb_connection_cport_shutdown_phase_1(connection
);
835 gb_connection_hd_cport_quiesce(connection
);
836 gb_connection_cport_shutdown_phase_2(connection
);
837 gb_connection_control_disconnected(connection
);
839 connection
->state
= GB_CONNECTION_STATE_DISABLED
;
841 /* control-connection tear down is deferred when mode switching */
842 if (!connection
->mode_switch
) {
843 gb_connection_svc_connection_destroy(connection
);
844 gb_connection_hd_cport_clear(connection
);
846 gb_connection_hd_cport_disable(connection
);
850 mutex_unlock(&connection
->mutex
);
852 EXPORT_SYMBOL_GPL(gb_connection_disable
);
854 /* Disable a connection without communicating with the remote end. */
855 void gb_connection_disable_forced(struct gb_connection
*connection
)
857 mutex_lock(&connection
->mutex
);
859 if (connection
->state
== GB_CONNECTION_STATE_DISABLED
)
862 trace_gb_connection_disable(connection
);
864 spin_lock_irq(&connection
->lock
);
865 connection
->state
= GB_CONNECTION_STATE_DISABLED
;
866 gb_connection_cancel_operations(connection
, -ESHUTDOWN
);
867 spin_unlock_irq(&connection
->lock
);
869 gb_connection_hd_cport_flush(connection
);
871 gb_connection_svc_connection_destroy(connection
);
872 gb_connection_hd_cport_clear(connection
);
874 gb_connection_hd_cport_disable(connection
);
876 mutex_unlock(&connection
->mutex
);
878 EXPORT_SYMBOL_GPL(gb_connection_disable_forced
);
880 /* Caller must have disabled the connection before destroying it. */
881 void gb_connection_destroy(struct gb_connection
*connection
)
886 if (WARN_ON(connection
->state
!= GB_CONNECTION_STATE_DISABLED
))
887 gb_connection_disable(connection
);
889 mutex_lock(&gb_connection_mutex
);
891 spin_lock_irq(&gb_connections_lock
);
892 list_del(&connection
->bundle_links
);
893 list_del(&connection
->hd_links
);
894 spin_unlock_irq(&gb_connections_lock
);
896 destroy_workqueue(connection
->wq
);
898 gb_hd_cport_release(connection
->hd
, connection
->hd_cport_id
);
899 connection
->hd_cport_id
= CPORT_ID_BAD
;
901 mutex_unlock(&gb_connection_mutex
);
903 gb_connection_put(connection
);
905 EXPORT_SYMBOL_GPL(gb_connection_destroy
);
907 void gb_connection_latency_tag_enable(struct gb_connection
*connection
)
909 struct gb_host_device
*hd
= connection
->hd
;
912 if (!hd
->driver
->latency_tag_enable
)
915 ret
= hd
->driver
->latency_tag_enable(hd
, connection
->hd_cport_id
);
917 dev_err(&connection
->hd
->dev
,
918 "%s: failed to enable latency tag: %d\n",
919 connection
->name
, ret
);
922 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable
);
924 void gb_connection_latency_tag_disable(struct gb_connection
*connection
)
926 struct gb_host_device
*hd
= connection
->hd
;
929 if (!hd
->driver
->latency_tag_disable
)
932 ret
= hd
->driver
->latency_tag_disable(hd
, connection
->hd_cport_id
);
934 dev_err(&connection
->hd
->dev
,
935 "%s: failed to disable latency tag: %d\n",
936 connection
->name
, ret
);
939 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable
);