1 // SPDX-License-Identifier: GPL-2.0+
3 * SSH request transport layer.
5 * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
8 #include <linux/unaligned.h>
9 #include <linux/atomic.h>
10 #include <linux/completion.h>
11 #include <linux/error-injection.h>
12 #include <linux/ktime.h>
13 #include <linux/limits.h>
14 #include <linux/list.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/types.h>
18 #include <linux/workqueue.h>
20 #include <linux/surface_aggregator/serial_hub.h>
21 #include <linux/surface_aggregator/controller.h>
23 #include "ssh_packet_layer.h"
24 #include "ssh_request_layer.h"
29 * SSH_RTL_REQUEST_TIMEOUT - Request timeout.
31 * Timeout as ktime_t delta for request responses. If we have not received a
32 * response in this time-frame after finishing the underlying packet
33 * transmission, the request will be completed with %-ETIMEDOUT as status
36 #define SSH_RTL_REQUEST_TIMEOUT ms_to_ktime(3000)
39 * SSH_RTL_REQUEST_TIMEOUT_RESOLUTION - Request timeout granularity.
41 * Time-resolution for timeouts. Should be larger than one jiffy to avoid
42 * direct re-scheduling of reaper work_struct.
44 #define SSH_RTL_REQUEST_TIMEOUT_RESOLUTION ms_to_ktime(max(2000 / HZ, 50))
47 * SSH_RTL_MAX_PENDING - Maximum number of pending requests.
49 * Maximum number of requests concurrently waiting to be completed (i.e.
50 * waiting for the corresponding packet transmission to finish if they don't
51 * have a response or waiting for a response if they have one).
53 #define SSH_RTL_MAX_PENDING 3
56 * SSH_RTL_TX_BATCH - Maximum number of requests processed per work execution.
57 * Used to prevent livelocking of the workqueue. Value chosen via educated
58 * guess, may be adjusted.
60 #define SSH_RTL_TX_BATCH 10
62 #ifdef CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION
65 * ssh_rtl_should_drop_response() - Error injection hook to drop request
68 * Useful to cause request transmission timeouts in the driver by dropping the
69 * response to a request.
71 static noinline
bool ssh_rtl_should_drop_response(void)
75 ALLOW_ERROR_INJECTION(ssh_rtl_should_drop_response
, TRUE
);
79 static inline bool ssh_rtl_should_drop_response(void)
86 static u16
ssh_request_get_rqid(struct ssh_request
*rqst
)
88 return get_unaligned_le16(rqst
->packet
.data
.ptr
89 + SSH_MSGOFFSET_COMMAND(rqid
));
92 static u32
ssh_request_get_rqid_safe(struct ssh_request
*rqst
)
94 if (!rqst
->packet
.data
.ptr
)
97 return ssh_request_get_rqid(rqst
);
100 static void ssh_rtl_queue_remove(struct ssh_request
*rqst
)
102 struct ssh_rtl
*rtl
= ssh_request_rtl(rqst
);
104 spin_lock(&rtl
->queue
.lock
);
106 if (!test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT
, &rqst
->state
)) {
107 spin_unlock(&rtl
->queue
.lock
);
111 list_del(&rqst
->node
);
113 spin_unlock(&rtl
->queue
.lock
);
114 ssh_request_put(rqst
);
117 static bool ssh_rtl_queue_empty(struct ssh_rtl
*rtl
)
121 spin_lock(&rtl
->queue
.lock
);
122 empty
= list_empty(&rtl
->queue
.head
);
123 spin_unlock(&rtl
->queue
.lock
);
128 static void ssh_rtl_pending_remove(struct ssh_request
*rqst
)
130 struct ssh_rtl
*rtl
= ssh_request_rtl(rqst
);
132 spin_lock(&rtl
->pending
.lock
);
134 if (!test_and_clear_bit(SSH_REQUEST_SF_PENDING_BIT
, &rqst
->state
)) {
135 spin_unlock(&rtl
->pending
.lock
);
139 atomic_dec(&rtl
->pending
.count
);
140 list_del(&rqst
->node
);
142 spin_unlock(&rtl
->pending
.lock
);
144 ssh_request_put(rqst
);
147 static int ssh_rtl_tx_pending_push(struct ssh_request
*rqst
)
149 struct ssh_rtl
*rtl
= ssh_request_rtl(rqst
);
151 spin_lock(&rtl
->pending
.lock
);
153 if (test_bit(SSH_REQUEST_SF_LOCKED_BIT
, &rqst
->state
)) {
154 spin_unlock(&rtl
->pending
.lock
);
158 if (test_and_set_bit(SSH_REQUEST_SF_PENDING_BIT
, &rqst
->state
)) {
159 spin_unlock(&rtl
->pending
.lock
);
163 atomic_inc(&rtl
->pending
.count
);
164 list_add_tail(&ssh_request_get(rqst
)->node
, &rtl
->pending
.head
);
166 spin_unlock(&rtl
->pending
.lock
);
170 static void ssh_rtl_complete_with_status(struct ssh_request
*rqst
, int status
)
172 struct ssh_rtl
*rtl
= ssh_request_rtl(rqst
);
174 trace_ssam_request_complete(rqst
, status
);
176 /* rtl/ptl may not be set if we're canceling before submitting. */
177 rtl_dbg_cond(rtl
, "rtl: completing request (rqid: %#06x, status: %d)\n",
178 ssh_request_get_rqid_safe(rqst
), status
);
180 rqst
->ops
->complete(rqst
, NULL
, NULL
, status
);
183 static void ssh_rtl_complete_with_rsp(struct ssh_request
*rqst
,
184 const struct ssh_command
*cmd
,
185 const struct ssam_span
*data
)
187 struct ssh_rtl
*rtl
= ssh_request_rtl(rqst
);
189 trace_ssam_request_complete(rqst
, 0);
191 rtl_dbg(rtl
, "rtl: completing request with response (rqid: %#06x)\n",
192 ssh_request_get_rqid(rqst
));
194 rqst
->ops
->complete(rqst
, cmd
, data
, 0);
197 static bool ssh_rtl_tx_can_process(struct ssh_request
*rqst
)
199 struct ssh_rtl
*rtl
= ssh_request_rtl(rqst
);
201 if (test_bit(SSH_REQUEST_TY_FLUSH_BIT
, &rqst
->state
))
202 return !atomic_read(&rtl
->pending
.count
);
204 return atomic_read(&rtl
->pending
.count
) < SSH_RTL_MAX_PENDING
;
207 static struct ssh_request
*ssh_rtl_tx_next(struct ssh_rtl
*rtl
)
209 struct ssh_request
*rqst
= ERR_PTR(-ENOENT
);
210 struct ssh_request
*p
, *n
;
212 spin_lock(&rtl
->queue
.lock
);
214 /* Find first non-locked request and remove it. */
215 list_for_each_entry_safe(p
, n
, &rtl
->queue
.head
, node
) {
216 if (unlikely(test_bit(SSH_REQUEST_SF_LOCKED_BIT
, &p
->state
)))
219 if (!ssh_rtl_tx_can_process(p
)) {
220 rqst
= ERR_PTR(-EBUSY
);
224 /* Remove from queue and mark as transmitting. */
225 set_bit(SSH_REQUEST_SF_TRANSMITTING_BIT
, &p
->state
);
226 /* Ensure state never gets zero. */
227 smp_mb__before_atomic();
228 clear_bit(SSH_REQUEST_SF_QUEUED_BIT
, &p
->state
);
236 spin_unlock(&rtl
->queue
.lock
);
240 static int ssh_rtl_tx_try_process_one(struct ssh_rtl
*rtl
)
242 struct ssh_request
*rqst
;
245 /* Get and prepare next request for transmit. */
246 rqst
= ssh_rtl_tx_next(rtl
);
248 return PTR_ERR(rqst
);
250 /* Add it to/mark it as pending. */
251 status
= ssh_rtl_tx_pending_push(rqst
);
253 ssh_request_put(rqst
);
258 status
= ssh_ptl_submit(&rtl
->ptl
, &rqst
->packet
);
259 if (status
== -ESHUTDOWN
) {
261 * Packet has been refused due to the packet layer shutting
262 * down. Complete it here.
264 set_bit(SSH_REQUEST_SF_LOCKED_BIT
, &rqst
->state
);
266 * Note: A barrier is not required here, as there are only two
267 * references in the system at this point: The one that we have,
268 * and the other one that belongs to the pending set. Due to the
269 * request being marked as "transmitting", our process is the
270 * only one allowed to remove the pending node and change the
271 * state. Normally, the task would fall to the packet callback,
272 * but as this is a path where submission failed, this callback
273 * will never be executed.
276 ssh_rtl_pending_remove(rqst
);
277 ssh_rtl_complete_with_status(rqst
, -ESHUTDOWN
);
279 ssh_request_put(rqst
);
284 * If submitting the packet failed and the packet layer isn't
285 * shutting down, the packet has either been submitted/queued
286 * before (-EALREADY, which cannot happen as we have
287 * guaranteed that requests cannot be re-submitted), or the
288 * packet was marked as locked (-EINVAL). To mark the packet
289 * locked at this stage, the request, and thus the packets
290 * itself, had to have been canceled. Simply drop the
291 * reference. Cancellation itself will remove it from the set
292 * of pending requests.
295 WARN_ON(status
!= -EINVAL
);
297 ssh_request_put(rqst
);
301 ssh_request_put(rqst
);
305 static bool ssh_rtl_tx_schedule(struct ssh_rtl
*rtl
)
307 if (atomic_read(&rtl
->pending
.count
) >= SSH_RTL_MAX_PENDING
)
310 if (ssh_rtl_queue_empty(rtl
))
313 return schedule_work(&rtl
->tx
.work
);
316 static void ssh_rtl_tx_work_fn(struct work_struct
*work
)
318 struct ssh_rtl
*rtl
= to_ssh_rtl(work
, tx
.work
);
319 unsigned int iterations
= SSH_RTL_TX_BATCH
;
323 * Try to be nice and not block/live-lock the workqueue: Run a maximum
324 * of 10 tries, then re-submit if necessary. This should not be
325 * necessary for normal execution, but guarantee it anyway.
328 status
= ssh_rtl_tx_try_process_one(rtl
);
329 if (status
== -ENOENT
|| status
== -EBUSY
)
330 return; /* No more requests to process. */
332 if (status
== -ESHUTDOWN
) {
334 * Packet system shutting down. No new packets can be
335 * transmitted. Return silently, the party initiating
336 * the shutdown should handle the rest.
341 WARN_ON(status
!= 0 && status
!= -EAGAIN
);
342 } while (--iterations
);
344 /* Out of tries, reschedule. */
345 ssh_rtl_tx_schedule(rtl
);
349 * ssh_rtl_submit() - Submit a request to the transport layer.
350 * @rtl: The request transport layer.
351 * @rqst: The request to submit.
353 * Submits a request to the transport layer. A single request may not be
354 * submitted multiple times without reinitializing it.
356 * Return: Returns zero on success, %-EINVAL if the request type is invalid or
357 * the request has been canceled prior to submission, %-EALREADY if the
358 * request has already been submitted, or %-ESHUTDOWN in case the request
359 * transport layer has been shut down.
361 int ssh_rtl_submit(struct ssh_rtl
*rtl
, struct ssh_request
*rqst
)
363 trace_ssam_request_submit(rqst
);
366 * Ensure that requests expecting a response are sequenced. If this
367 * invariant ever changes, see the comment in ssh_rtl_complete() on what
368 * is required to be changed in the code.
370 if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT
, &rqst
->state
))
371 if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT
, &rqst
->packet
.state
))
374 spin_lock(&rtl
->queue
.lock
);
377 * Try to set ptl and check if this request has already been submitted.
379 * Must be inside lock as we might run into a lost update problem
380 * otherwise: If this were outside of the lock, cancellation in
381 * ssh_rtl_cancel_nonpending() may run after we've set the ptl
382 * reference but before we enter the lock. In that case, we'd detect
383 * that the request is being added to the queue and would try to remove
384 * it from that, but removal might fail because it hasn't actually been
385 * added yet. By putting this cmpxchg in the critical section, we
386 * ensure that the queuing detection only triggers when we are already
387 * in the critical section and the remove process will wait until the
388 * push operation has been completed (via lock) due to that. Only then,
389 * we can safely try to remove it.
391 if (cmpxchg(&rqst
->packet
.ptl
, NULL
, &rtl
->ptl
)) {
392 spin_unlock(&rtl
->queue
.lock
);
397 * Ensure that we set ptl reference before we continue modifying state.
398 * This is required for non-pending cancellation. This barrier is paired
399 * with the one in ssh_rtl_cancel_nonpending().
401 * By setting the ptl reference before we test for "locked", we can
402 * check if the "locked" test may have already run. See comments in
403 * ssh_rtl_cancel_nonpending() for more detail.
405 smp_mb__after_atomic();
407 if (test_bit(SSH_RTL_SF_SHUTDOWN_BIT
, &rtl
->state
)) {
408 spin_unlock(&rtl
->queue
.lock
);
412 if (test_bit(SSH_REQUEST_SF_LOCKED_BIT
, &rqst
->state
)) {
413 spin_unlock(&rtl
->queue
.lock
);
417 set_bit(SSH_REQUEST_SF_QUEUED_BIT
, &rqst
->state
);
418 list_add_tail(&ssh_request_get(rqst
)->node
, &rtl
->queue
.head
);
420 spin_unlock(&rtl
->queue
.lock
);
422 ssh_rtl_tx_schedule(rtl
);
426 static void ssh_rtl_timeout_reaper_mod(struct ssh_rtl
*rtl
, ktime_t now
,
429 unsigned long delta
= msecs_to_jiffies(ktime_ms_delta(expires
, now
));
430 ktime_t aexp
= ktime_add(expires
, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION
);
432 spin_lock(&rtl
->rtx_timeout
.lock
);
434 /* Re-adjust / schedule reaper only if it is above resolution delta. */
435 if (ktime_before(aexp
, rtl
->rtx_timeout
.expires
)) {
436 rtl
->rtx_timeout
.expires
= expires
;
437 mod_delayed_work(system_wq
, &rtl
->rtx_timeout
.reaper
, delta
);
440 spin_unlock(&rtl
->rtx_timeout
.lock
);
443 static void ssh_rtl_timeout_start(struct ssh_request
*rqst
)
445 struct ssh_rtl
*rtl
= ssh_request_rtl(rqst
);
446 ktime_t timestamp
= ktime_get_coarse_boottime();
447 ktime_t timeout
= rtl
->rtx_timeout
.timeout
;
449 if (test_bit(SSH_REQUEST_SF_LOCKED_BIT
, &rqst
->state
))
453 * Note: The timestamp gets set only once. This happens on the packet
454 * callback. All other access to it is read-only.
456 WRITE_ONCE(rqst
->timestamp
, timestamp
);
458 * Ensure timestamp is set before starting the reaper. Paired with
459 * implicit barrier following check on ssh_request_get_expiration() in
460 * ssh_rtl_timeout_reap.
462 smp_mb__after_atomic();
464 ssh_rtl_timeout_reaper_mod(rtl
, timestamp
, timestamp
+ timeout
);
467 static void ssh_rtl_complete(struct ssh_rtl
*rtl
,
468 const struct ssh_command
*command
,
469 const struct ssam_span
*command_data
)
471 struct ssh_request
*r
= NULL
;
472 struct ssh_request
*p
, *n
;
473 u16 rqid
= get_unaligned_le16(&command
->rqid
);
475 trace_ssam_rx_response_received(command
, command_data
->len
);
478 * Get request from pending based on request ID and mark it as response
479 * received and locked.
481 spin_lock(&rtl
->pending
.lock
);
482 list_for_each_entry_safe(p
, n
, &rtl
->pending
.head
, node
) {
483 /* We generally expect requests to be processed in order. */
484 if (unlikely(ssh_request_get_rqid(p
) != rqid
))
487 /* Simulate response timeout. */
488 if (ssh_rtl_should_drop_response()) {
489 spin_unlock(&rtl
->pending
.lock
);
491 trace_ssam_ei_rx_drop_response(p
);
492 rtl_info(rtl
, "request error injection: dropping response for request %p\n",
498 * Mark as "response received" and "locked" as we're going to
501 set_bit(SSH_REQUEST_SF_LOCKED_BIT
, &p
->state
);
502 set_bit(SSH_REQUEST_SF_RSPRCVD_BIT
, &p
->state
);
503 /* Ensure state never gets zero. */
504 smp_mb__before_atomic();
505 clear_bit(SSH_REQUEST_SF_PENDING_BIT
, &p
->state
);
507 atomic_dec(&rtl
->pending
.count
);
513 spin_unlock(&rtl
->pending
.lock
);
516 rtl_warn(rtl
, "rtl: dropping unexpected command message (rqid = %#06x)\n",
521 /* If the request hasn't been completed yet, we will do this now. */
522 if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT
, &r
->state
)) {
524 ssh_rtl_tx_schedule(rtl
);
529 * Make sure the request has been transmitted. In case of a sequenced
530 * request, we are guaranteed that the completion callback will run on
531 * the receiver thread directly when the ACK for the packet has been
532 * received. Similarly, this function is guaranteed to run on the
533 * receiver thread. Thus we are guaranteed that if the packet has been
534 * successfully transmitted and received an ACK, the transmitted flag
535 * has been set and is visible here.
537 * We are currently not handling unsequenced packets here, as those
538 * should never expect a response as ensured in ssh_rtl_submit. If this
539 * ever changes, one would have to test for
541 * (r->state & (transmitting | transmitted))
543 * on unsequenced packets to determine if they could have been
544 * transmitted. There are no synchronization guarantees as in the
545 * sequenced case, since, in this case, the callback function will not
546 * run on the same thread. Thus an exact determination is impossible.
548 if (!test_bit(SSH_REQUEST_SF_TRANSMITTED_BIT
, &r
->state
)) {
549 rtl_err(rtl
, "rtl: received response before ACK for request (rqid = %#06x)\n",
553 * NB: Timeout has already been canceled, request already been
554 * removed from pending and marked as locked and completed. As
555 * we receive a "false" response, the packet might still be
558 ssh_rtl_queue_remove(r
);
560 ssh_rtl_complete_with_status(r
, -EREMOTEIO
);
563 ssh_rtl_tx_schedule(rtl
);
568 * NB: Timeout has already been canceled, request already been
569 * removed from pending and marked as locked and completed. The request
570 * can also not be queued any more, as it has been marked as
571 * transmitting and later transmitted. Thus no need to remove it from
575 ssh_rtl_complete_with_rsp(r
, command
, command_data
);
578 ssh_rtl_tx_schedule(rtl
);
581 static bool ssh_rtl_cancel_nonpending(struct ssh_request
*r
)
584 unsigned long flags
, fixed
;
588 * Handle unsubmitted request: Try to mark the packet as locked,
589 * expecting the state to be zero (i.e. unsubmitted). Note that, if
590 * setting the state worked, we might still be adding the packet to the
591 * queue in a currently executing submit call. In that case, however,
592 * ptl reference must have been set previously, as locked is checked
593 * after setting ptl. Furthermore, when the ptl reference is set, the
594 * submission process is guaranteed to have entered the critical
595 * section. Thus only if we successfully locked this request and ptl is
596 * NULL, we have successfully removed the request, i.e. we are
597 * guaranteed that, due to the "locked" check in ssh_rtl_submit(), the
598 * packet will never be added. Otherwise, we need to try and grab it
599 * from the queue, where we are now guaranteed that the packet is or has
600 * been due to the critical section.
602 * Note that if the cmpxchg() fails, we are guaranteed that ptl has
603 * been set and is non-NULL, as states can only be nonzero after this
604 * has been set. Also note that we need to fetch the static (type)
605 * flags to ensure that they don't cause the cmpxchg() to fail.
607 fixed
= READ_ONCE(r
->state
) & SSH_REQUEST_FLAGS_TY_MASK
;
608 flags
= cmpxchg(&r
->state
, fixed
, SSH_REQUEST_SF_LOCKED_BIT
);
611 * Force correct ordering with regards to state and ptl reference access
612 * to safe-guard cancellation to concurrent submission against a
613 * lost-update problem. First try to exchange state, then also check
614 * ptl if that worked. This barrier is paired with the
615 * one in ssh_rtl_submit().
617 smp_mb__after_atomic();
619 if (flags
== fixed
&& !READ_ONCE(r
->packet
.ptl
)) {
620 if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT
, &r
->state
))
623 ssh_rtl_complete_with_status(r
, -ECANCELED
);
627 rtl
= ssh_request_rtl(r
);
628 spin_lock(&rtl
->queue
.lock
);
631 * Note: 1) Requests cannot be re-submitted. 2) If a request is
632 * queued, it cannot be "transmitting"/"pending" yet. Thus, if we
633 * successfully remove the request here, we have removed all its
634 * occurrences in the system.
637 remove
= test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT
, &r
->state
);
639 spin_unlock(&rtl
->queue
.lock
);
643 set_bit(SSH_REQUEST_SF_LOCKED_BIT
, &r
->state
);
646 spin_unlock(&rtl
->queue
.lock
);
648 ssh_request_put(r
); /* Drop reference obtained from queue. */
650 if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT
, &r
->state
))
653 ssh_rtl_complete_with_status(r
, -ECANCELED
);
657 static bool ssh_rtl_cancel_pending(struct ssh_request
*r
)
659 /* If the packet is already locked, it's going to be removed shortly. */
660 if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT
, &r
->state
))
664 * Now that we have locked the packet, we have guaranteed that it can't
665 * be added to the system any more. If ptl is NULL, the locked
666 * check in ssh_rtl_submit() has not been run and any submission,
667 * currently in progress or called later, won't add the packet. Thus we
668 * can directly complete it.
670 * The implicit memory barrier of test_and_set_bit() should be enough
671 * to ensure that the correct order (first lock, then check ptl) is
672 * ensured. This is paired with the barrier in ssh_rtl_submit().
674 if (!READ_ONCE(r
->packet
.ptl
)) {
675 if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT
, &r
->state
))
678 ssh_rtl_complete_with_status(r
, -ECANCELED
);
683 * Try to cancel the packet. If the packet has not been completed yet,
684 * this will subsequently (and synchronously) call the completion
685 * callback of the packet, which will complete the request.
687 ssh_ptl_cancel(&r
->packet
);
690 * If the packet has been completed with success, i.e. has not been
691 * canceled by the above call, the request may not have been completed
692 * yet (may be waiting for a response). Check if we need to do this
695 if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT
, &r
->state
))
698 ssh_rtl_queue_remove(r
);
699 ssh_rtl_pending_remove(r
);
700 ssh_rtl_complete_with_status(r
, -ECANCELED
);
706 * ssh_rtl_cancel() - Cancel request.
707 * @rqst: The request to cancel.
708 * @pending: Whether to also cancel pending requests.
710 * Cancels the given request. If @pending is %false, this will not cancel
711 * pending requests, i.e. requests that have already been submitted to the
712 * packet layer but not been completed yet. If @pending is %true, this will
713 * cancel the given request regardless of the state it is in.
715 * If the request has been canceled by calling this function, both completion
716 * and release callbacks of the request will be executed in a reasonable
717 * time-frame. This may happen during execution of this function, however,
718 * there is no guarantee for this. For example, a request currently
719 * transmitting will be canceled/completed only after transmission has
720 * completed, and the respective callbacks will be executed on the transmitter
721 * thread, which may happen during, but also some time after execution of the
724 * Return: Returns %true if the given request has been canceled or completed,
725 * either by this function or prior to calling this function, %false
726 * otherwise. If @pending is %true, this function will always return %true.
728 bool ssh_rtl_cancel(struct ssh_request
*rqst
, bool pending
)
733 if (test_and_set_bit(SSH_REQUEST_SF_CANCELED_BIT
, &rqst
->state
))
736 trace_ssam_request_cancel(rqst
);
739 canceled
= ssh_rtl_cancel_pending(rqst
);
741 canceled
= ssh_rtl_cancel_nonpending(rqst
);
743 /* Note: rtl may be NULL if request has not been submitted yet. */
744 rtl
= ssh_request_rtl(rqst
);
746 ssh_rtl_tx_schedule(rtl
);
751 static void ssh_rtl_packet_callback(struct ssh_packet
*p
, int status
)
753 struct ssh_request
*r
= to_ssh_request(p
);
755 if (unlikely(status
)) {
756 set_bit(SSH_REQUEST_SF_LOCKED_BIT
, &r
->state
);
758 if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT
, &r
->state
))
762 * The packet may get canceled even though it has not been
763 * submitted yet. The request may still be queued. Check the
764 * queue and remove it if necessary. As the timeout would have
765 * been started in this function on success, there's no need
768 ssh_rtl_queue_remove(r
);
769 ssh_rtl_pending_remove(r
);
770 ssh_rtl_complete_with_status(r
, status
);
772 ssh_rtl_tx_schedule(ssh_request_rtl(r
));
776 /* Update state: Mark as transmitted and clear transmitting. */
777 set_bit(SSH_REQUEST_SF_TRANSMITTED_BIT
, &r
->state
);
778 /* Ensure state never gets zero. */
779 smp_mb__before_atomic();
780 clear_bit(SSH_REQUEST_SF_TRANSMITTING_BIT
, &r
->state
);
782 /* If we expect a response, we just need to start the timeout. */
783 if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT
, &r
->state
)) {
785 * Note: This is the only place where the timestamp gets set,
786 * all other access to it is read-only.
788 ssh_rtl_timeout_start(r
);
793 * If we don't expect a response, lock, remove, and complete the
794 * request. Note that, at this point, the request is guaranteed to have
795 * left the queue and no timeout has been started. Thus we only need to
796 * remove it from pending. If the request has already been completed (it
797 * may have been canceled) return.
800 set_bit(SSH_REQUEST_SF_LOCKED_BIT
, &r
->state
);
801 if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT
, &r
->state
))
804 ssh_rtl_pending_remove(r
);
805 ssh_rtl_complete_with_status(r
, 0);
807 ssh_rtl_tx_schedule(ssh_request_rtl(r
));
810 static ktime_t
ssh_request_get_expiration(struct ssh_request
*r
, ktime_t timeout
)
812 ktime_t timestamp
= READ_ONCE(r
->timestamp
);
814 if (timestamp
!= KTIME_MAX
)
815 return ktime_add(timestamp
, timeout
);
820 static void ssh_rtl_timeout_reap(struct work_struct
*work
)
822 struct ssh_rtl
*rtl
= to_ssh_rtl(work
, rtx_timeout
.reaper
.work
);
823 struct ssh_request
*r
, *n
;
825 ktime_t now
= ktime_get_coarse_boottime();
826 ktime_t timeout
= rtl
->rtx_timeout
.timeout
;
827 ktime_t next
= KTIME_MAX
;
829 trace_ssam_rtl_timeout_reap(atomic_read(&rtl
->pending
.count
));
832 * Mark reaper as "not pending". This is done before checking any
833 * requests to avoid lost-update type problems.
835 spin_lock(&rtl
->rtx_timeout
.lock
);
836 rtl
->rtx_timeout
.expires
= KTIME_MAX
;
837 spin_unlock(&rtl
->rtx_timeout
.lock
);
839 spin_lock(&rtl
->pending
.lock
);
840 list_for_each_entry_safe(r
, n
, &rtl
->pending
.head
, node
) {
841 ktime_t expires
= ssh_request_get_expiration(r
, timeout
);
844 * Check if the timeout hasn't expired yet. Find out next
845 * expiration date to be handled after this run.
847 if (ktime_after(expires
, now
)) {
848 next
= ktime_before(expires
, next
) ? expires
: next
;
852 /* Avoid further transitions if locked. */
853 if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT
, &r
->state
))
857 * We have now marked the packet as locked. Thus it cannot be
858 * added to the pending or queued lists again after we've
859 * removed it here. We can therefore re-use the node of this
860 * packet temporarily.
863 clear_bit(SSH_REQUEST_SF_PENDING_BIT
, &r
->state
);
865 atomic_dec(&rtl
->pending
.count
);
866 list_move_tail(&r
->node
, &claimed
);
868 spin_unlock(&rtl
->pending
.lock
);
870 /* Cancel and complete the request. */
871 list_for_each_entry_safe(r
, n
, &claimed
, node
) {
872 trace_ssam_request_timeout(r
);
875 * At this point we've removed the packet from pending. This
876 * means that we've obtained the last (only) reference of the
877 * system to it. Thus we can just complete it.
879 if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT
, &r
->state
))
880 ssh_rtl_complete_with_status(r
, -ETIMEDOUT
);
883 * Drop the reference we've obtained by removing it from the
890 /* Ensure that the reaper doesn't run again immediately. */
891 next
= max(next
, ktime_add(now
, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION
));
892 if (next
!= KTIME_MAX
)
893 ssh_rtl_timeout_reaper_mod(rtl
, now
, next
);
895 ssh_rtl_tx_schedule(rtl
);
898 static void ssh_rtl_rx_event(struct ssh_rtl
*rtl
, const struct ssh_command
*cmd
,
899 const struct ssam_span
*data
)
901 trace_ssam_rx_event_received(cmd
, data
->len
);
903 rtl_dbg(rtl
, "rtl: handling event (rqid: %#06x)\n",
904 get_unaligned_le16(&cmd
->rqid
));
906 rtl
->ops
.handle_event(rtl
, cmd
, data
);
909 static void ssh_rtl_rx_command(struct ssh_ptl
*p
, const struct ssam_span
*data
)
911 struct ssh_rtl
*rtl
= to_ssh_rtl(p
, ptl
);
912 struct device
*dev
= &p
->serdev
->dev
;
913 struct ssh_command
*command
;
914 struct ssam_span command_data
;
916 if (sshp_parse_command(dev
, data
, &command
, &command_data
))
920 * Check if the message was intended for us. If not, drop it.
922 * Note: We will need to change this to handle debug messages. On newer
923 * generation devices, these seem to be sent to SSAM_SSH_TID_DEBUG. We
924 * as host can still receive them as they can be forwarded via an
925 * override option on SAM, but doing so does not change the target ID
926 * to SSAM_SSH_TID_HOST.
928 if (command
->tid
!= SSAM_SSH_TID_HOST
) {
929 rtl_warn(rtl
, "rtl: dropping message not intended for us (tid = %#04x)\n",
934 if (ssh_rqid_is_event(get_unaligned_le16(&command
->rqid
)))
935 ssh_rtl_rx_event(rtl
, command
, &command_data
);
937 ssh_rtl_complete(rtl
, command
, &command_data
);
940 static void ssh_rtl_rx_data(struct ssh_ptl
*p
, const struct ssam_span
*data
)
943 ptl_err(p
, "rtl: rx: no data frame payload\n");
947 switch (data
->ptr
[0]) {
948 case SSH_PLD_TYPE_CMD
:
949 ssh_rtl_rx_command(p
, data
);
953 ptl_err(p
, "rtl: rx: unknown frame payload type (type: %#04x)\n",
959 static void ssh_rtl_packet_release(struct ssh_packet
*p
)
961 struct ssh_request
*rqst
;
963 rqst
= to_ssh_request(p
);
964 rqst
->ops
->release(rqst
);
967 static const struct ssh_packet_ops ssh_rtl_packet_ops
= {
968 .complete
= ssh_rtl_packet_callback
,
969 .release
= ssh_rtl_packet_release
,
973 * ssh_request_init() - Initialize SSH request.
974 * @rqst: The request to initialize.
975 * @flags: Request flags, determining the type of the request.
976 * @ops: Request operations.
978 * Initializes the given SSH request and underlying packet. Sets the message
979 * buffer pointer to %NULL and the message buffer length to zero. This buffer
980 * has to be set separately via ssh_request_set_data() before submission and
981 * must contain a valid SSH request message.
983 * Return: Returns zero on success or %-EINVAL if the given flags are invalid.
985 int ssh_request_init(struct ssh_request
*rqst
, enum ssam_request_flags flags
,
986 const struct ssh_request_ops
*ops
)
988 unsigned long type
= BIT(SSH_PACKET_TY_BLOCKING_BIT
);
990 /* Unsequenced requests cannot have a response. */
991 if (flags
& SSAM_REQUEST_UNSEQUENCED
&& flags
& SSAM_REQUEST_HAS_RESPONSE
)
994 if (!(flags
& SSAM_REQUEST_UNSEQUENCED
))
995 type
|= BIT(SSH_PACKET_TY_SEQUENCED_BIT
);
997 ssh_packet_init(&rqst
->packet
, type
, SSH_PACKET_PRIORITY(DATA
, 0),
998 &ssh_rtl_packet_ops
);
1000 INIT_LIST_HEAD(&rqst
->node
);
1003 if (flags
& SSAM_REQUEST_HAS_RESPONSE
)
1004 rqst
->state
|= BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT
);
1006 rqst
->timestamp
= KTIME_MAX
;
1013 * ssh_rtl_init() - Initialize request transport layer.
1014 * @rtl: The request transport layer to initialize.
1015 * @serdev: The underlying serial device, i.e. the lower-level transport.
1016 * @ops: Request transport layer operations.
1018 * Initializes the given request transport layer and associated packet
1019 * transport layer. Transmitter and receiver threads must be started
1020 * separately via ssh_rtl_start(), after the request-layer has been
1021 * initialized and the lower-level serial device layer has been set up.
1023 * Return: Returns zero on success and a nonzero error code on failure.
1025 int ssh_rtl_init(struct ssh_rtl
*rtl
, struct serdev_device
*serdev
,
1026 const struct ssh_rtl_ops
*ops
)
1028 struct ssh_ptl_ops ptl_ops
;
1031 ptl_ops
.data_received
= ssh_rtl_rx_data
;
1033 status
= ssh_ptl_init(&rtl
->ptl
, serdev
, &ptl_ops
);
1037 spin_lock_init(&rtl
->queue
.lock
);
1038 INIT_LIST_HEAD(&rtl
->queue
.head
);
1040 spin_lock_init(&rtl
->pending
.lock
);
1041 INIT_LIST_HEAD(&rtl
->pending
.head
);
1042 atomic_set_release(&rtl
->pending
.count
, 0);
1044 INIT_WORK(&rtl
->tx
.work
, ssh_rtl_tx_work_fn
);
1046 spin_lock_init(&rtl
->rtx_timeout
.lock
);
1047 rtl
->rtx_timeout
.timeout
= SSH_RTL_REQUEST_TIMEOUT
;
1048 rtl
->rtx_timeout
.expires
= KTIME_MAX
;
1049 INIT_DELAYED_WORK(&rtl
->rtx_timeout
.reaper
, ssh_rtl_timeout_reap
);
1057 * ssh_rtl_destroy() - Deinitialize request transport layer.
1058 * @rtl: The request transport layer to deinitialize.
1060 * Deinitializes the given request transport layer and frees resources
1061 * associated with it. If receiver and/or transmitter threads have been
1062 * started, the layer must first be shut down via ssh_rtl_shutdown() before
1063 * this function can be called.
1065 void ssh_rtl_destroy(struct ssh_rtl
*rtl
)
1067 ssh_ptl_destroy(&rtl
->ptl
);
1071 * ssh_rtl_start() - Start request transmitter and receiver.
1072 * @rtl: The request transport layer.
1074 * Return: Returns zero on success, a negative error code on failure.
1076 int ssh_rtl_start(struct ssh_rtl
*rtl
)
1080 status
= ssh_ptl_tx_start(&rtl
->ptl
);
1084 ssh_rtl_tx_schedule(rtl
);
1086 status
= ssh_ptl_rx_start(&rtl
->ptl
);
1088 ssh_rtl_flush(rtl
, msecs_to_jiffies(5000));
1089 ssh_ptl_tx_stop(&rtl
->ptl
);
1096 struct ssh_flush_request
{
1097 struct ssh_request base
;
1098 struct completion completion
;
1102 static void ssh_rtl_flush_request_complete(struct ssh_request
*r
,
1103 const struct ssh_command
*cmd
,
1104 const struct ssam_span
*data
,
1107 struct ssh_flush_request
*rqst
;
1109 rqst
= container_of(r
, struct ssh_flush_request
, base
);
1110 rqst
->status
= status
;
1113 static void ssh_rtl_flush_request_release(struct ssh_request
*r
)
1115 struct ssh_flush_request
*rqst
;
1117 rqst
= container_of(r
, struct ssh_flush_request
, base
);
1118 complete_all(&rqst
->completion
);
1121 static const struct ssh_request_ops ssh_rtl_flush_request_ops
= {
1122 .complete
= ssh_rtl_flush_request_complete
,
1123 .release
= ssh_rtl_flush_request_release
,
1127 * ssh_rtl_flush() - Flush the request transport layer.
1128 * @rtl: request transport layer
1129 * @timeout: timeout for the flush operation in jiffies
1131 * Queue a special flush request and wait for its completion. This request
1132 * will be completed after all other currently queued and pending requests
1133 * have been completed. Instead of a normal data packet, this request submits
1134 * a special flush packet, meaning that upon completion, also the underlying
1135 * packet transport layer has been flushed.
1137 * Flushing the request layer guarantees that all previously submitted
1138 * requests have been fully completed before this call returns. Additionally,
1139 * flushing blocks execution of all later submitted requests until the flush
1140 * has been completed.
1142 * If the caller ensures that no new requests are submitted after a call to
1143 * this function, the request transport layer is guaranteed to have no
1144 * remaining requests when this call returns. The same guarantee does not hold
1145 * for the packet layer, on which control packets may still be queued after
1148 * Return: Returns zero on success, %-ETIMEDOUT if the flush timed out and has
1149 * been canceled as a result of the timeout, or %-ESHUTDOWN if the packet
1150 * and/or request transport layer has been shut down before this call. May
1151 * also return %-EINTR if the underlying packet transmission has been
1154 int ssh_rtl_flush(struct ssh_rtl
*rtl
, unsigned long timeout
)
1156 const unsigned int init_flags
= SSAM_REQUEST_UNSEQUENCED
;
1157 struct ssh_flush_request rqst
;
1160 ssh_request_init(&rqst
.base
, init_flags
, &ssh_rtl_flush_request_ops
);
1161 rqst
.base
.packet
.state
|= BIT(SSH_PACKET_TY_FLUSH_BIT
);
1162 rqst
.base
.packet
.priority
= SSH_PACKET_PRIORITY(FLUSH
, 0);
1163 rqst
.base
.state
|= BIT(SSH_REQUEST_TY_FLUSH_BIT
);
1165 init_completion(&rqst
.completion
);
1167 status
= ssh_rtl_submit(rtl
, &rqst
.base
);
1171 ssh_request_put(&rqst
.base
);
1173 if (!wait_for_completion_timeout(&rqst
.completion
, timeout
)) {
1174 ssh_rtl_cancel(&rqst
.base
, true);
1175 wait_for_completion(&rqst
.completion
);
1178 WARN_ON(rqst
.status
!= 0 && rqst
.status
!= -ECANCELED
&&
1179 rqst
.status
!= -ESHUTDOWN
&& rqst
.status
!= -EINTR
);
1181 return rqst
.status
== -ECANCELED
? -ETIMEDOUT
: rqst
.status
;
1185 * ssh_rtl_shutdown() - Shut down request transport layer.
1186 * @rtl: The request transport layer.
1188 * Shuts down the request transport layer, removing and canceling all queued
1189 * and pending requests. Requests canceled by this operation will be completed
1190 * with %-ESHUTDOWN as status. Receiver and transmitter threads will be
1191 * stopped, the lower-level packet layer will be shutdown.
1193 * As a result of this function, the transport layer will be marked as shut
1194 * down. Submission of requests after the transport layer has been shut down
1195 * will fail with %-ESHUTDOWN.
1197 void ssh_rtl_shutdown(struct ssh_rtl
*rtl
)
1199 struct ssh_request
*r
, *n
;
1203 set_bit(SSH_RTL_SF_SHUTDOWN_BIT
, &rtl
->state
);
1205 * Ensure that the layer gets marked as shut-down before actually
1206 * stopping it. In combination with the check in ssh_rtl_submit(),
1207 * this guarantees that no new requests can be added and all already
1208 * queued requests are properly canceled.
1210 smp_mb__after_atomic();
1212 /* Remove requests from queue. */
1213 spin_lock(&rtl
->queue
.lock
);
1214 list_for_each_entry_safe(r
, n
, &rtl
->queue
.head
, node
) {
1215 set_bit(SSH_REQUEST_SF_LOCKED_BIT
, &r
->state
);
1216 /* Ensure state never gets zero. */
1217 smp_mb__before_atomic();
1218 clear_bit(SSH_REQUEST_SF_QUEUED_BIT
, &r
->state
);
1220 list_move_tail(&r
->node
, &claimed
);
1222 spin_unlock(&rtl
->queue
.lock
);
1225 * We have now guaranteed that the queue is empty and no more new
1226 * requests can be submitted (i.e. it will stay empty). This means that
1227 * calling ssh_rtl_tx_schedule() will not schedule tx.work any more. So
1228 * we can simply call cancel_work_sync() on tx.work here and when that
1229 * returns, we've locked it down. This also means that after this call,
1230 * we don't submit any more packets to the underlying packet layer, so
1231 * we can also shut that down.
1234 cancel_work_sync(&rtl
->tx
.work
);
1235 ssh_ptl_shutdown(&rtl
->ptl
);
1236 cancel_delayed_work_sync(&rtl
->rtx_timeout
.reaper
);
1239 * Shutting down the packet layer should also have canceled all
1240 * requests. Thus the pending set should be empty. Attempt to handle
1241 * this gracefully anyways, even though this should be dead code.
1244 pending
= atomic_read(&rtl
->pending
.count
);
1245 if (WARN_ON(pending
)) {
1246 spin_lock(&rtl
->pending
.lock
);
1247 list_for_each_entry_safe(r
, n
, &rtl
->pending
.head
, node
) {
1248 set_bit(SSH_REQUEST_SF_LOCKED_BIT
, &r
->state
);
1249 /* Ensure state never gets zero. */
1250 smp_mb__before_atomic();
1251 clear_bit(SSH_REQUEST_SF_PENDING_BIT
, &r
->state
);
1253 list_move_tail(&r
->node
, &claimed
);
1255 spin_unlock(&rtl
->pending
.lock
);
1258 /* Finally, cancel and complete the requests we claimed before. */
1259 list_for_each_entry_safe(r
, n
, &claimed
, node
) {
1261 * We need test_and_set() because we still might compete with
1264 if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT
, &r
->state
))
1265 ssh_rtl_complete_with_status(r
, -ESHUTDOWN
);
1268 * Drop the reference we've obtained by removing it from the