1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright 2014-2015 Google Inc.
6 * Copyright 2014-2015 Linaro Ltd.
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/wait.h>
14 #include <linux/workqueue.h>
17 #include "greybus_trace.h"
19 static struct kmem_cache
*gb_operation_cache
;
20 static struct kmem_cache
*gb_message_cache
;
22 /* Workqueue to handle Greybus operation completions. */
23 static struct workqueue_struct
*gb_operation_completion_wq
;
25 /* Wait queue for synchronous cancellations. */
26 static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue
);
29 * Protects updates to operation->errno.
31 static DEFINE_SPINLOCK(gb_operations_lock
);
33 static int gb_operation_response_send(struct gb_operation
*operation
,
37 * Increment operation active count and add to connection list unless the
38 * connection is going away.
40 * Caller holds operation reference.
42 static int gb_operation_get_active(struct gb_operation
*operation
)
44 struct gb_connection
*connection
= operation
->connection
;
47 spin_lock_irqsave(&connection
->lock
, flags
);
48 switch (connection
->state
) {
49 case GB_CONNECTION_STATE_ENABLED
:
51 case GB_CONNECTION_STATE_ENABLED_TX
:
52 if (gb_operation_is_incoming(operation
))
55 case GB_CONNECTION_STATE_DISCONNECTING
:
56 if (!gb_operation_is_core(operation
))
63 if (operation
->active
++ == 0)
64 list_add_tail(&operation
->links
, &connection
->operations
);
66 trace_gb_operation_get_active(operation
);
68 spin_unlock_irqrestore(&connection
->lock
, flags
);
73 spin_unlock_irqrestore(&connection
->lock
, flags
);
78 /* Caller holds operation reference. */
79 static void gb_operation_put_active(struct gb_operation
*operation
)
81 struct gb_connection
*connection
= operation
->connection
;
84 spin_lock_irqsave(&connection
->lock
, flags
);
86 trace_gb_operation_put_active(operation
);
88 if (--operation
->active
== 0) {
89 list_del(&operation
->links
);
90 if (atomic_read(&operation
->waiters
))
91 wake_up(&gb_operation_cancellation_queue
);
93 spin_unlock_irqrestore(&connection
->lock
, flags
);
96 static bool gb_operation_is_active(struct gb_operation
*operation
)
98 struct gb_connection
*connection
= operation
->connection
;
102 spin_lock_irqsave(&connection
->lock
, flags
);
103 ret
= operation
->active
;
104 spin_unlock_irqrestore(&connection
->lock
, flags
);
110 * Set an operation's result.
112 * Initially an outgoing operation's errno value is -EBADR.
113 * If no error occurs before sending the request message the only
114 * valid value operation->errno can be set to is -EINPROGRESS,
115 * indicating the request has been (or rather is about to be) sent.
116 * At that point nobody should be looking at the result until the
119 * The first time the result gets set after the request has been
120 * sent, that result "sticks." That is, if two concurrent threads
121 * race to set the result, the first one wins. The return value
122 * tells the caller whether its result was recorded; if not the
123 * caller has nothing more to do.
125 * The result value -EILSEQ is reserved to signal an implementation
126 * error; if it's ever observed, the code performing the request has
127 * done something fundamentally wrong. It is an error to try to set
128 * the result to -EBADR, and attempts to do so result in a warning,
129 * and -EILSEQ is used instead. Similarly, the only valid result
130 * value to set for an operation in initial state is -EINPROGRESS.
131 * Attempts to do otherwise will also record a (successful) -EILSEQ
134 static bool gb_operation_result_set(struct gb_operation
*operation
, int result
)
139 if (result
== -EINPROGRESS
) {
141 * -EINPROGRESS is used to indicate the request is
142 * in flight. It should be the first result value
143 * set after the initial -EBADR. Issue a warning
144 * and record an implementation error if it's
145 * set at any other time.
147 spin_lock_irqsave(&gb_operations_lock
, flags
);
148 prev
= operation
->errno
;
150 operation
->errno
= result
;
152 operation
->errno
= -EILSEQ
;
153 spin_unlock_irqrestore(&gb_operations_lock
, flags
);
154 WARN_ON(prev
!= -EBADR
);
160 * The first result value set after a request has been sent
161 * will be the final result of the operation. Subsequent
162 * attempts to set the result are ignored.
164 * Note that -EBADR is a reserved "initial state" result
165 * value. Attempts to set this value result in a warning,
166 * and the result code is set to -EILSEQ instead.
168 if (WARN_ON(result
== -EBADR
))
169 result
= -EILSEQ
; /* Nobody should be setting -EBADR */
171 spin_lock_irqsave(&gb_operations_lock
, flags
);
172 prev
= operation
->errno
;
173 if (prev
== -EINPROGRESS
)
174 operation
->errno
= result
; /* First and final result */
175 spin_unlock_irqrestore(&gb_operations_lock
, flags
);
177 return prev
== -EINPROGRESS
;
180 int gb_operation_result(struct gb_operation
*operation
)
182 int result
= operation
->errno
;
184 WARN_ON(result
== -EBADR
);
185 WARN_ON(result
== -EINPROGRESS
);
189 EXPORT_SYMBOL_GPL(gb_operation_result
);
192 * Looks up an outgoing operation on a connection and returns a refcounted
193 * pointer if found, or NULL otherwise.
195 static struct gb_operation
*
196 gb_operation_find_outgoing(struct gb_connection
*connection
, u16 operation_id
)
198 struct gb_operation
*operation
;
202 spin_lock_irqsave(&connection
->lock
, flags
);
203 list_for_each_entry(operation
, &connection
->operations
, links
)
204 if (operation
->id
== operation_id
&&
205 !gb_operation_is_incoming(operation
)) {
206 gb_operation_get(operation
);
210 spin_unlock_irqrestore(&connection
->lock
, flags
);
212 return found
? operation
: NULL
;
215 static int gb_message_send(struct gb_message
*message
, gfp_t gfp
)
217 struct gb_connection
*connection
= message
->operation
->connection
;
219 trace_gb_message_send(message
);
220 return connection
->hd
->driver
->message_send(connection
->hd
,
221 connection
->hd_cport_id
,
227 * Cancel a message we have passed to the host device layer to be sent.
229 static void gb_message_cancel(struct gb_message
*message
)
231 struct gb_host_device
*hd
= message
->operation
->connection
->hd
;
233 hd
->driver
->message_cancel(message
);
236 static void gb_operation_request_handle(struct gb_operation
*operation
)
238 struct gb_connection
*connection
= operation
->connection
;
242 if (connection
->handler
) {
243 status
= connection
->handler(operation
);
245 dev_err(&connection
->hd
->dev
,
246 "%s: unexpected incoming request of type 0x%02x\n",
247 connection
->name
, operation
->type
);
249 status
= -EPROTONOSUPPORT
;
252 ret
= gb_operation_response_send(operation
, status
);
254 dev_err(&connection
->hd
->dev
,
255 "%s: failed to send response %d for type 0x%02x: %d\n",
256 connection
->name
, status
, operation
->type
, ret
);
262 * Process operation work.
264 * For incoming requests, call the protocol request handler. The operation
265 * result should be -EINPROGRESS at this point.
267 * For outgoing requests, the operation result value should have
268 * been set before queueing this. The operation callback function
269 * allows the original requester to know the request has completed
270 * and its result is available.
272 static void gb_operation_work(struct work_struct
*work
)
274 struct gb_operation
*operation
;
277 operation
= container_of(work
, struct gb_operation
, work
);
279 if (gb_operation_is_incoming(operation
)) {
280 gb_operation_request_handle(operation
);
282 ret
= del_timer_sync(&operation
->timer
);
284 /* Cancel request message if scheduled by timeout. */
285 if (gb_operation_result(operation
) == -ETIMEDOUT
)
286 gb_message_cancel(operation
->request
);
289 operation
->callback(operation
);
292 gb_operation_put_active(operation
);
293 gb_operation_put(operation
);
296 static void gb_operation_timeout(struct timer_list
*t
)
298 struct gb_operation
*operation
= from_timer(operation
, t
, timer
);
300 if (gb_operation_result_set(operation
, -ETIMEDOUT
)) {
302 * A stuck request message will be cancelled from the
305 queue_work(gb_operation_completion_wq
, &operation
->work
);
309 static void gb_operation_message_init(struct gb_host_device
*hd
,
310 struct gb_message
*message
, u16 operation_id
,
311 size_t payload_size
, u8 type
)
313 struct gb_operation_msg_hdr
*header
;
315 header
= message
->buffer
;
317 message
->header
= header
;
318 message
->payload
= payload_size
? header
+ 1 : NULL
;
319 message
->payload_size
= payload_size
;
322 * The type supplied for incoming message buffers will be
323 * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by
324 * arriving data so there's no need to initialize the message header.
326 if (type
!= GB_REQUEST_TYPE_INVALID
) {
327 u16 message_size
= (u16
)(sizeof(*header
) + payload_size
);
330 * For a request, the operation id gets filled in
331 * when the message is sent. For a response, it
332 * will be copied from the request by the caller.
334 * The result field in a request message must be
335 * zero. It will be set just prior to sending for
338 header
->size
= cpu_to_le16(message_size
);
339 header
->operation_id
= 0;
346 * Allocate a message to be used for an operation request or response.
347 * Both types of message contain a common header. The request message
348 * for an outgoing operation is outbound, as is the response message
349 * for an incoming operation. The message header for an outbound
350 * message is partially initialized here.
352 * The headers for inbound messages don't need to be initialized;
353 * they'll be filled in by arriving data.
355 * Our message buffers have the following layout:
356 * message header \_ these combined are
357 * message payload / the message size
359 static struct gb_message
*
360 gb_operation_message_alloc(struct gb_host_device
*hd
, u8 type
,
361 size_t payload_size
, gfp_t gfp_flags
)
363 struct gb_message
*message
;
364 struct gb_operation_msg_hdr
*header
;
365 size_t message_size
= payload_size
+ sizeof(*header
);
367 if (message_size
> hd
->buffer_size_max
) {
368 dev_warn(&hd
->dev
, "requested message size too big (%zu > %zu)\n",
369 message_size
, hd
->buffer_size_max
);
373 /* Allocate the message structure and buffer. */
374 message
= kmem_cache_zalloc(gb_message_cache
, gfp_flags
);
378 message
->buffer
= kzalloc(message_size
, gfp_flags
);
379 if (!message
->buffer
)
380 goto err_free_message
;
382 /* Initialize the message. Operation id is filled in later. */
383 gb_operation_message_init(hd
, message
, 0, payload_size
, type
);
388 kmem_cache_free(gb_message_cache
, message
);
393 static void gb_operation_message_free(struct gb_message
*message
)
395 kfree(message
->buffer
);
396 kmem_cache_free(gb_message_cache
, message
);
400 * Map an enum gb_operation_status value (which is represented in a
401 * message as a single byte) to an appropriate Linux negative errno.
403 static int gb_operation_status_map(u8 status
)
408 case GB_OP_INTERRUPTED
:
412 case GB_OP_NO_MEMORY
:
414 case GB_OP_PROTOCOL_BAD
:
415 return -EPROTONOSUPPORT
;
422 case GB_OP_NONEXISTENT
:
424 case GB_OP_MALFUNCTION
:
426 case GB_OP_UNKNOWN_ERROR
:
433 * Map a Linux errno value (from operation->errno) into the value
434 * that should represent it in a response message status sent
435 * over the wire. Returns an enum gb_operation_status value (which
436 * is represented in a message as a single byte).
438 static u8
gb_operation_errno_map(int errno
)
442 return GB_OP_SUCCESS
;
444 return GB_OP_INTERRUPTED
;
446 return GB_OP_TIMEOUT
;
448 return GB_OP_NO_MEMORY
;
449 case -EPROTONOSUPPORT
:
450 return GB_OP_PROTOCOL_BAD
;
452 return GB_OP_OVERFLOW
; /* Could be underflow too */
454 return GB_OP_INVALID
;
458 return GB_OP_MALFUNCTION
;
460 return GB_OP_NONEXISTENT
;
463 return GB_OP_UNKNOWN_ERROR
;
467 bool gb_operation_response_alloc(struct gb_operation
*operation
,
468 size_t response_size
, gfp_t gfp
)
470 struct gb_host_device
*hd
= operation
->connection
->hd
;
471 struct gb_operation_msg_hdr
*request_header
;
472 struct gb_message
*response
;
475 type
= operation
->type
| GB_MESSAGE_TYPE_RESPONSE
;
476 response
= gb_operation_message_alloc(hd
, type
, response_size
, gfp
);
479 response
->operation
= operation
;
482 * Size and type get initialized when the message is
483 * allocated. The errno will be set before sending. All
484 * that's left is the operation id, which we copy from the
485 * request message header (as-is, in little-endian order).
487 request_header
= operation
->request
->header
;
488 response
->header
->operation_id
= request_header
->operation_id
;
489 operation
->response
= response
;
493 EXPORT_SYMBOL_GPL(gb_operation_response_alloc
);
496 * Create a Greybus operation to be sent over the given connection.
497 * The request buffer will be big enough for a payload of the given
500 * For outgoing requests, the request message's header will be
501 * initialized with the type of the request and the message size.
502 * Outgoing operations must also specify the response buffer size,
503 * which must be sufficient to hold all expected response data. The
504 * response message header will eventually be overwritten, so there's
505 * no need to initialize it here.
507 * Request messages for incoming operations can arrive in interrupt
508 * context, so they must be allocated with GFP_ATOMIC. In this case
509 * the request buffer will be immediately overwritten, so there is
510 * no need to initialize the message header. Responsibility for
511 * allocating a response buffer lies with the incoming request
512 * handler for a protocol. So we don't allocate that here.
514 * Returns a pointer to the new operation or a null pointer if an
517 static struct gb_operation
*
518 gb_operation_create_common(struct gb_connection
*connection
, u8 type
,
519 size_t request_size
, size_t response_size
,
520 unsigned long op_flags
, gfp_t gfp_flags
)
522 struct gb_host_device
*hd
= connection
->hd
;
523 struct gb_operation
*operation
;
525 operation
= kmem_cache_zalloc(gb_operation_cache
, gfp_flags
);
528 operation
->connection
= connection
;
530 operation
->request
= gb_operation_message_alloc(hd
, type
, request_size
,
532 if (!operation
->request
)
534 operation
->request
->operation
= operation
;
536 /* Allocate the response buffer for outgoing operations */
537 if (!(op_flags
& GB_OPERATION_FLAG_INCOMING
)) {
538 if (!gb_operation_response_alloc(operation
, response_size
,
543 timer_setup(&operation
->timer
, gb_operation_timeout
, 0);
546 operation
->flags
= op_flags
;
547 operation
->type
= type
;
548 operation
->errno
= -EBADR
; /* Initial value--means "never set" */
550 INIT_WORK(&operation
->work
, gb_operation_work
);
551 init_completion(&operation
->completion
);
552 kref_init(&operation
->kref
);
553 atomic_set(&operation
->waiters
, 0);
558 gb_operation_message_free(operation
->request
);
560 kmem_cache_free(gb_operation_cache
, operation
);
566 * Create a new operation associated with the given connection. The
567 * request and response sizes provided are the number of bytes
568 * required to hold the request/response payload only. Both of
569 * these are allowed to be 0. Note that 0x00 is reserved as an
570 * invalid operation type for all protocols, and this is enforced
573 struct gb_operation
*
574 gb_operation_create_flags(struct gb_connection
*connection
,
575 u8 type
, size_t request_size
,
576 size_t response_size
, unsigned long flags
,
579 struct gb_operation
*operation
;
581 if (WARN_ON_ONCE(type
== GB_REQUEST_TYPE_INVALID
))
583 if (WARN_ON_ONCE(type
& GB_MESSAGE_TYPE_RESPONSE
))
584 type
&= ~GB_MESSAGE_TYPE_RESPONSE
;
586 if (WARN_ON_ONCE(flags
& ~GB_OPERATION_FLAG_USER_MASK
))
587 flags
&= GB_OPERATION_FLAG_USER_MASK
;
589 operation
= gb_operation_create_common(connection
, type
,
590 request_size
, response_size
,
593 trace_gb_operation_create(operation
);
597 EXPORT_SYMBOL_GPL(gb_operation_create_flags
);
599 struct gb_operation
*
600 gb_operation_create_core(struct gb_connection
*connection
,
601 u8 type
, size_t request_size
,
602 size_t response_size
, unsigned long flags
,
605 struct gb_operation
*operation
;
607 flags
|= GB_OPERATION_FLAG_CORE
;
609 operation
= gb_operation_create_common(connection
, type
,
610 request_size
, response_size
,
613 trace_gb_operation_create_core(operation
);
617 /* Do not export this function. */
619 size_t gb_operation_get_payload_size_max(struct gb_connection
*connection
)
621 struct gb_host_device
*hd
= connection
->hd
;
623 return hd
->buffer_size_max
- sizeof(struct gb_operation_msg_hdr
);
625 EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max
);
627 static struct gb_operation
*
628 gb_operation_create_incoming(struct gb_connection
*connection
, u16 id
,
629 u8 type
, void *data
, size_t size
)
631 struct gb_operation
*operation
;
633 unsigned long flags
= GB_OPERATION_FLAG_INCOMING
;
635 /* Caller has made sure we at least have a message header. */
636 request_size
= size
- sizeof(struct gb_operation_msg_hdr
);
639 flags
|= GB_OPERATION_FLAG_UNIDIRECTIONAL
;
641 operation
= gb_operation_create_common(connection
, type
,
643 GB_REQUEST_TYPE_INVALID
,
649 memcpy(operation
->request
->header
, data
, size
);
650 trace_gb_operation_create_incoming(operation
);
656 * Get an additional reference on an operation.
658 void gb_operation_get(struct gb_operation
*operation
)
660 kref_get(&operation
->kref
);
662 EXPORT_SYMBOL_GPL(gb_operation_get
);
665 * Destroy a previously created operation.
667 static void _gb_operation_destroy(struct kref
*kref
)
669 struct gb_operation
*operation
;
671 operation
= container_of(kref
, struct gb_operation
, kref
);
673 trace_gb_operation_destroy(operation
);
675 if (operation
->response
)
676 gb_operation_message_free(operation
->response
);
677 gb_operation_message_free(operation
->request
);
679 kmem_cache_free(gb_operation_cache
, operation
);
683 * Drop a reference on an operation, and destroy it when the last
686 void gb_operation_put(struct gb_operation
*operation
)
688 if (WARN_ON(!operation
))
691 kref_put(&operation
->kref
, _gb_operation_destroy
);
693 EXPORT_SYMBOL_GPL(gb_operation_put
);
695 /* Tell the requester we're done */
696 static void gb_operation_sync_callback(struct gb_operation
*operation
)
698 complete(&operation
->completion
);
702 * gb_operation_request_send() - send an operation request message
703 * @operation: the operation to initiate
704 * @callback: the operation completion callback
705 * @timeout: operation timeout in milliseconds, or zero for no timeout
706 * @gfp: the memory flags to use for any allocations
708 * The caller has filled in any payload so the request message is ready to go.
709 * The callback function supplied will be called when the response message has
710 * arrived, a unidirectional request has been sent, or the operation is
711 * cancelled, indicating that the operation is complete. The callback function
712 * can fetch the result of the operation using gb_operation_result() if
715 * Return: 0 if the request was successfully queued in the host-driver queues,
716 * or a negative errno.
718 int gb_operation_request_send(struct gb_operation
*operation
,
719 gb_operation_callback callback
,
720 unsigned int timeout
,
723 struct gb_connection
*connection
= operation
->connection
;
724 struct gb_operation_msg_hdr
*header
;
728 if (gb_connection_is_offloaded(connection
))
735 * Record the callback function, which is executed in
736 * non-atomic (workqueue) context when the final result
737 * of an operation has been set.
739 operation
->callback
= callback
;
742 * Assign the operation's id, and store it in the request header.
743 * Zero is a reserved operation id for unidirectional operations.
745 if (gb_operation_is_unidirectional(operation
)) {
748 cycle
= (unsigned int)atomic_inc_return(&connection
->op_cycle
);
749 operation
->id
= (u16
)(cycle
% U16_MAX
+ 1);
752 header
= operation
->request
->header
;
753 header
->operation_id
= cpu_to_le16(operation
->id
);
755 gb_operation_result_set(operation
, -EINPROGRESS
);
758 * Get an extra reference on the operation. It'll be dropped when the
759 * operation completes.
761 gb_operation_get(operation
);
762 ret
= gb_operation_get_active(operation
);
766 ret
= gb_message_send(operation
->request
, gfp
);
771 operation
->timer
.expires
= jiffies
+ msecs_to_jiffies(timeout
);
772 add_timer(&operation
->timer
);
778 gb_operation_put_active(operation
);
780 gb_operation_put(operation
);
784 EXPORT_SYMBOL_GPL(gb_operation_request_send
);
787 * Send a synchronous operation. This function is expected to
788 * block, returning only when the response has arrived, (or when an
789 * error is detected. The return value is the result of the
792 int gb_operation_request_send_sync_timeout(struct gb_operation
*operation
,
793 unsigned int timeout
)
797 ret
= gb_operation_request_send(operation
, gb_operation_sync_callback
,
798 timeout
, GFP_KERNEL
);
802 ret
= wait_for_completion_interruptible(&operation
->completion
);
804 /* Cancel the operation if interrupted */
805 gb_operation_cancel(operation
, -ECANCELED
);
808 return gb_operation_result(operation
);
810 EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout
);
813 * Send a response for an incoming operation request. A non-zero
814 * errno indicates a failed operation.
816 * If there is any response payload, the incoming request handler is
817 * responsible for allocating the response message. Otherwise the
818 * it can simply supply the result errno; this function will
819 * allocate the response message if necessary.
821 static int gb_operation_response_send(struct gb_operation
*operation
,
824 struct gb_connection
*connection
= operation
->connection
;
827 if (!operation
->response
&&
828 !gb_operation_is_unidirectional(operation
)) {
829 if (!gb_operation_response_alloc(operation
, 0, GFP_KERNEL
))
833 /* Record the result */
834 if (!gb_operation_result_set(operation
, errno
)) {
835 dev_err(&connection
->hd
->dev
, "request result already set\n");
836 return -EIO
; /* Shouldn't happen */
839 /* Sender of request does not care about response. */
840 if (gb_operation_is_unidirectional(operation
))
843 /* Reference will be dropped when message has been sent. */
844 gb_operation_get(operation
);
845 ret
= gb_operation_get_active(operation
);
849 /* Fill in the response header and send it */
850 operation
->response
->header
->result
= gb_operation_errno_map(errno
);
852 ret
= gb_message_send(operation
->response
, GFP_KERNEL
);
859 gb_operation_put_active(operation
);
861 gb_operation_put(operation
);
867 * This function is called when a message send request has completed.
869 void greybus_message_sent(struct gb_host_device
*hd
,
870 struct gb_message
*message
, int status
)
872 struct gb_operation
*operation
= message
->operation
;
873 struct gb_connection
*connection
= operation
->connection
;
876 * If the message was a response, we just need to drop our
877 * reference to the operation. If an error occurred, report
880 * For requests, if there's no error and the operation in not
881 * unidirectional, there's nothing more to do until the response
882 * arrives. If an error occurred attempting to send it, or if the
883 * operation is unidrectional, record the result of the operation and
884 * schedule its completion.
886 if (message
== operation
->response
) {
888 dev_err(&connection
->hd
->dev
,
889 "%s: error sending response 0x%02x: %d\n",
890 connection
->name
, operation
->type
, status
);
893 gb_operation_put_active(operation
);
894 gb_operation_put(operation
);
895 } else if (status
|| gb_operation_is_unidirectional(operation
)) {
896 if (gb_operation_result_set(operation
, status
)) {
897 queue_work(gb_operation_completion_wq
,
902 EXPORT_SYMBOL_GPL(greybus_message_sent
);
905 * We've received data on a connection, and it doesn't look like a
906 * response, so we assume it's a request.
908 * This is called in interrupt context, so just copy the incoming
909 * data into the request buffer and handle the rest via workqueue.
911 static void gb_connection_recv_request(struct gb_connection
*connection
,
912 const struct gb_operation_msg_hdr
*header
,
913 void *data
, size_t size
)
915 struct gb_operation
*operation
;
920 operation_id
= le16_to_cpu(header
->operation_id
);
923 operation
= gb_operation_create_incoming(connection
, operation_id
,
926 dev_err(&connection
->hd
->dev
,
927 "%s: can't create incoming operation\n",
932 ret
= gb_operation_get_active(operation
);
934 gb_operation_put(operation
);
937 trace_gb_message_recv_request(operation
->request
);
940 * The initial reference to the operation will be dropped when the
941 * request handler returns.
943 if (gb_operation_result_set(operation
, -EINPROGRESS
))
944 queue_work(connection
->wq
, &operation
->work
);
948 * We've received data that appears to be an operation response
949 * message. Look up the operation, and record that we've received
952 * This is called in interrupt context, so just copy the incoming
953 * data into the response buffer and handle the rest via workqueue.
955 static void gb_connection_recv_response(struct gb_connection
*connection
,
956 const struct gb_operation_msg_hdr
*header
,
957 void *data
, size_t size
)
959 struct gb_operation
*operation
;
960 struct gb_message
*message
;
965 operation_id
= le16_to_cpu(header
->operation_id
);
968 dev_err_ratelimited(&connection
->hd
->dev
,
969 "%s: invalid response id 0 received\n",
974 operation
= gb_operation_find_outgoing(connection
, operation_id
);
976 dev_err_ratelimited(&connection
->hd
->dev
,
977 "%s: unexpected response id 0x%04x received\n",
978 connection
->name
, operation_id
);
982 errno
= gb_operation_status_map(header
->result
);
983 message
= operation
->response
;
984 message_size
= sizeof(*header
) + message
->payload_size
;
985 if (!errno
&& size
> message_size
) {
986 dev_err_ratelimited(&connection
->hd
->dev
,
987 "%s: malformed response 0x%02x received (%zu > %zu)\n",
988 connection
->name
, header
->type
,
991 } else if (!errno
&& size
< message_size
) {
992 if (gb_operation_short_response_allowed(operation
)) {
993 message
->payload_size
= size
- sizeof(*header
);
995 dev_err_ratelimited(&connection
->hd
->dev
,
996 "%s: short response 0x%02x received (%zu < %zu)\n",
997 connection
->name
, header
->type
,
1003 /* We must ignore the payload if a bad status is returned */
1005 size
= sizeof(*header
);
1007 /* The rest will be handled in work queue context */
1008 if (gb_operation_result_set(operation
, errno
)) {
1009 memcpy(message
->buffer
, data
, size
);
1011 trace_gb_message_recv_response(message
);
1013 queue_work(gb_operation_completion_wq
, &operation
->work
);
1016 gb_operation_put(operation
);
1020 * Handle data arriving on a connection. As soon as we return the
1021 * supplied data buffer will be reused (so unless we do something
1022 * with, it's effectively dropped).
1024 void gb_connection_recv(struct gb_connection
*connection
,
1025 void *data
, size_t size
)
1027 struct gb_operation_msg_hdr header
;
1028 struct device
*dev
= &connection
->hd
->dev
;
1031 if (connection
->state
== GB_CONNECTION_STATE_DISABLED
||
1032 gb_connection_is_offloaded(connection
)) {
1033 dev_warn_ratelimited(dev
, "%s: dropping %zu received bytes\n",
1034 connection
->name
, size
);
1038 if (size
< sizeof(header
)) {
1039 dev_err_ratelimited(dev
, "%s: short message received\n",
1044 /* Use memcpy as data may be unaligned */
1045 memcpy(&header
, data
, sizeof(header
));
1046 msg_size
= le16_to_cpu(header
.size
);
1047 if (size
< msg_size
) {
1048 dev_err_ratelimited(dev
,
1049 "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n",
1051 le16_to_cpu(header
.operation_id
),
1052 header
.type
, size
, msg_size
);
1053 return; /* XXX Should still complete operation */
1056 if (header
.type
& GB_MESSAGE_TYPE_RESPONSE
) {
1057 gb_connection_recv_response(connection
, &header
, data
,
1060 gb_connection_recv_request(connection
, &header
, data
,
1066 * Cancel an outgoing operation synchronously, and record the given error to
1069 void gb_operation_cancel(struct gb_operation
*operation
, int errno
)
1071 if (WARN_ON(gb_operation_is_incoming(operation
)))
1074 if (gb_operation_result_set(operation
, errno
)) {
1075 gb_message_cancel(operation
->request
);
1076 queue_work(gb_operation_completion_wq
, &operation
->work
);
1078 trace_gb_message_cancel_outgoing(operation
->request
);
1080 atomic_inc(&operation
->waiters
);
1081 wait_event(gb_operation_cancellation_queue
,
1082 !gb_operation_is_active(operation
));
1083 atomic_dec(&operation
->waiters
);
1085 EXPORT_SYMBOL_GPL(gb_operation_cancel
);
1088 * Cancel an incoming operation synchronously. Called during connection tear
1091 void gb_operation_cancel_incoming(struct gb_operation
*operation
, int errno
)
1093 if (WARN_ON(!gb_operation_is_incoming(operation
)))
1096 if (!gb_operation_is_unidirectional(operation
)) {
1098 * Make sure the request handler has submitted the response
1099 * before cancelling it.
1101 flush_work(&operation
->work
);
1102 if (!gb_operation_result_set(operation
, errno
))
1103 gb_message_cancel(operation
->response
);
1105 trace_gb_message_cancel_incoming(operation
->response
);
1107 atomic_inc(&operation
->waiters
);
1108 wait_event(gb_operation_cancellation_queue
,
1109 !gb_operation_is_active(operation
));
1110 atomic_dec(&operation
->waiters
);
1114 * gb_operation_sync_timeout() - implement a "simple" synchronous operation
1115 * @connection: the Greybus connection to send this to
1116 * @type: the type of operation to send
1117 * @request: pointer to a memory buffer to copy the request from
1118 * @request_size: size of @request
1119 * @response: pointer to a memory buffer to copy the response to
1120 * @response_size: the size of @response.
1121 * @timeout: operation timeout in milliseconds
1123 * This function implements a simple synchronous Greybus operation. It sends
1124 * the provided operation request and waits (sleeps) until the corresponding
1125 * operation response message has been successfully received, or an error
1126 * occurs. @request and @response are buffers to hold the request and response
1127 * data respectively, and if they are not NULL, their size must be specified in
1128 * @request_size and @response_size.
1130 * If a response payload is to come back, and @response is not NULL,
1131 * @response_size number of bytes will be copied into @response if the operation
1134 * If there is an error, the response buffer is left alone.
1136 int gb_operation_sync_timeout(struct gb_connection
*connection
, int type
,
1137 void *request
, int request_size
,
1138 void *response
, int response_size
,
1139 unsigned int timeout
)
1141 struct gb_operation
*operation
;
1144 if ((response_size
&& !response
) ||
1145 (request_size
&& !request
))
1148 operation
= gb_operation_create(connection
, type
,
1149 request_size
, response_size
,
1155 memcpy(operation
->request
->payload
, request
, request_size
);
1157 ret
= gb_operation_request_send_sync_timeout(operation
, timeout
);
1159 dev_err(&connection
->hd
->dev
,
1160 "%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n",
1161 connection
->name
, operation
->id
, type
, ret
);
1163 if (response_size
) {
1164 memcpy(response
, operation
->response
->payload
,
1169 gb_operation_put(operation
);
1173 EXPORT_SYMBOL_GPL(gb_operation_sync_timeout
);
1176 * gb_operation_unidirectional_timeout() - initiate a unidirectional operation
1177 * @connection: connection to use
1178 * @type: type of operation to send
1179 * @request: memory buffer to copy the request from
1180 * @request_size: size of @request
1181 * @timeout: send timeout in milliseconds
1183 * Initiate a unidirectional operation by sending a request message and
1184 * waiting for it to be acknowledged as sent by the host device.
1186 * Note that successful send of a unidirectional operation does not imply that
1187 * the request as actually reached the remote end of the connection.
1189 int gb_operation_unidirectional_timeout(struct gb_connection
*connection
,
1190 int type
, void *request
, int request_size
,
1191 unsigned int timeout
)
1193 struct gb_operation
*operation
;
1196 if (request_size
&& !request
)
1199 operation
= gb_operation_create_flags(connection
, type
,
1201 GB_OPERATION_FLAG_UNIDIRECTIONAL
,
1207 memcpy(operation
->request
->payload
, request
, request_size
);
1209 ret
= gb_operation_request_send_sync_timeout(operation
, timeout
);
1211 dev_err(&connection
->hd
->dev
,
1212 "%s: unidirectional operation of type 0x%02x failed: %d\n",
1213 connection
->name
, type
, ret
);
1216 gb_operation_put(operation
);
1220 EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout
);
1222 int __init
gb_operation_init(void)
1224 gb_message_cache
= kmem_cache_create("gb_message_cache",
1225 sizeof(struct gb_message
), 0, 0, NULL
);
1226 if (!gb_message_cache
)
1229 gb_operation_cache
= kmem_cache_create("gb_operation_cache",
1230 sizeof(struct gb_operation
), 0, 0, NULL
);
1231 if (!gb_operation_cache
)
1232 goto err_destroy_message_cache
;
1234 gb_operation_completion_wq
= alloc_workqueue("greybus_completion",
1236 if (!gb_operation_completion_wq
)
1237 goto err_destroy_operation_cache
;
1241 err_destroy_operation_cache
:
1242 kmem_cache_destroy(gb_operation_cache
);
1243 gb_operation_cache
= NULL
;
1244 err_destroy_message_cache
:
1245 kmem_cache_destroy(gb_message_cache
);
1246 gb_message_cache
= NULL
;
1251 void gb_operation_exit(void)
1253 destroy_workqueue(gb_operation_completion_wq
);
1254 gb_operation_completion_wq
= NULL
;
1255 kmem_cache_destroy(gb_operation_cache
);
1256 gb_operation_cache
= NULL
;
1257 kmem_cache_destroy(gb_message_cache
);
1258 gb_message_cache
= NULL
;