1 // SPDX-License-Identifier: GPL-2.0+
3 * SSH packet transport layer.
5 * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
8 #include <linux/unaligned.h>
9 #include <linux/atomic.h>
10 #include <linux/error-injection.h>
11 #include <linux/jiffies.h>
12 #include <linux/kfifo.h>
13 #include <linux/kref.h>
14 #include <linux/kthread.h>
15 #include <linux/ktime.h>
16 #include <linux/limits.h>
17 #include <linux/list.h>
18 #include <linux/lockdep.h>
19 #include <linux/serdev.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
24 #include <linux/surface_aggregator/serial_hub.h>
27 #include "ssh_packet_layer.h"
28 #include "ssh_parser.h"
33 * To simplify reasoning about the code below, we define a few concepts. The
34 * system below is similar to a state-machine for packets, however, there are
35 * too many states to explicitly write them down. To (somewhat) manage the
36 * states and packets we rely on flags, reference counting, and some simple
37 * concepts. State transitions are triggered by actions.
42 * - transmission start (process next item in queue)
43 * - transmission finished (guaranteed to never be parallel to transmission
46 * - NAK received (this is equivalent to issuing re-submit for all pending
48 * - timeout (this is equivalent to re-issuing a submit or canceling)
49 * - cancel (non-pending and pending)
51 * >> Data Structures, Packet Ownership, General Overview <<
53 * The code below employs two main data structures: The packet queue,
54 * containing all packets scheduled for transmission, and the set of pending
55 * packets, containing all packets awaiting an ACK.
57 * Shared ownership of a packet is controlled via reference counting. Inside
58 * the transport system are a total of five packet owners:
62 * - the transmitter thread,
63 * - the receiver thread (via ACKing), and
64 * - the timeout work item.
66 * Normal operation is as follows: The initial reference of the packet is
67 * obtained by submitting the packet and queuing it. The receiver thread takes
68 * packets from the queue. By doing this, it does not increment the refcount
69 * but takes over the reference (removing it from the queue). If the packet is
70 * sequenced (i.e. needs to be ACKed by the client), the transmitter thread
71 * sets-up the timeout and adds the packet to the pending set before starting
72 * to transmit it. As the timeout is handled by a reaper task, no additional
73 * reference for it is needed. After the transmit is done, the reference held
74 * by the transmitter thread is dropped. If the packet is unsequenced (i.e.
75 * does not need an ACK), the packet is completed by the transmitter thread
76 * before dropping that reference.
78 * On receival of an ACK, the receiver thread removes and obtains the
79 * reference to the packet from the pending set. The receiver thread will then
80 * complete the packet and drop its reference.
82 * On receival of a NAK, the receiver thread re-submits all currently pending
85 * Packet timeouts are detected by the timeout reaper. This is a task,
86 * scheduled depending on the earliest packet timeout expiration date,
87 * checking all currently pending packets if their timeout has expired. If the
88 * timeout of a packet has expired, it is re-submitted and the number of tries
89 * of this packet is incremented. If this number reaches its limit, the packet
90 * will be completed with a failure.
92 * On transmission failure (such as repeated packet timeouts), the completion
93 * callback is immediately run by on thread on which the error was detected.
95 * To ensure that a packet eventually leaves the system it is marked as
96 * "locked" directly before it is going to be completed or when it is
97 * canceled. Marking a packet as "locked" has the effect that passing and
98 * creating new references of the packet is disallowed. This means that the
99 * packet cannot be added to the queue, the pending set, and the timeout, or
100 * be picked up by the transmitter thread or receiver thread. To remove a
101 * packet from the system it has to be marked as locked and subsequently all
102 * references from the data structures (queue, pending) have to be removed.
103 * References held by threads will eventually be dropped automatically as
104 * their execution progresses.
106 * Note that the packet completion callback is, in case of success and for a
107 * sequenced packet, guaranteed to run on the receiver thread, thus providing
108 * a way to reliably identify responses to the packet. The packet completion
109 * callback is only run once and it does not indicate that the packet has
110 * fully left the system (for this, one should rely on the release method,
111 * triggered when the reference count of the packet reaches zero). In case of
112 * re-submission (and with somewhat unlikely timing), it may be possible that
113 * the packet is being re-transmitted while the completion callback runs.
114 * Completion will occur both on success and internal error, as well as when
115 * the packet is canceled.
119 * Flags are used to indicate the state and progression of a packet. Some flags
120 * have stricter guarantees than other:
123 * Indicates if the packet is locked. If the packet is locked, passing and/or
124 * creating additional references to the packet is forbidden. The packet thus
125 * may not be queued, dequeued, or removed or added to the pending set. Note
126 * that the packet state flags may still change (e.g. it may be marked as
127 * ACKed, transmitted, ...).
130 * Indicates if the packet completion callback has been executed or is about
131 * to be executed. This flag is used to ensure that the packet completion
132 * callback is only run once.
135 * Indicates if a packet is present in the submission queue or not. This flag
136 * must only be modified with the queue lock held, and must be coherent to the
137 * presence of the packet in the queue.
140 * Indicates if a packet is present in the set of pending packets or not.
141 * This flag must only be modified with the pending lock held, and must be
142 * coherent to the presence of the packet in the pending set.
145 * Indicates if the packet is currently transmitting. In case of
146 * re-transmissions, it is only safe to wait on the "transmitted" completion
147 * after this flag has been set. The completion will be set both in success
151 * Indicates if the packet has been transmitted. This flag is not cleared by
152 * the system, thus it indicates the first transmission only.
155 * Indicates if the packet has been acknowledged by the client. There are no
156 * other guarantees given. For example, the packet may still be canceled
157 * and/or the completion may be triggered an error even though this bit is
158 * set. Rely on the status provided to the completion callback instead.
161 * Indicates if the packet has been canceled from the outside. There are no
162 * other guarantees given. Specifically, the packet may be completed by
163 * another part of the system before the cancellation attempts to complete it.
165 * >> General Notes <<
167 * - To avoid deadlocks, if both queue and pending locks are required, the
168 * pending lock must be acquired before the queue lock.
170 * - The packet priority must be accessed only while holding the queue lock.
172 * - The packet timestamp must be accessed only while holding the pending
177 * SSH_PTL_MAX_PACKET_TRIES - Maximum transmission attempts for packet.
179 * Maximum number of transmission attempts per sequenced packet in case of
180 * time-outs. Must be smaller than 16. If the packet times out after this
181 * amount of tries, the packet will be completed with %-ETIMEDOUT as status
184 #define SSH_PTL_MAX_PACKET_TRIES 3
187 * SSH_PTL_TX_TIMEOUT - Packet transmission timeout.
189 * Timeout in jiffies for packet transmission via the underlying serial
190 * device. If transmitting the packet takes longer than this timeout, the
191 * packet will be completed with -ETIMEDOUT. It will not be re-submitted.
193 #define SSH_PTL_TX_TIMEOUT HZ
196 * SSH_PTL_PACKET_TIMEOUT - Packet response timeout.
198 * Timeout as ktime_t delta for ACKs. If we have not received an ACK in this
199 * time-frame after starting transmission, the packet will be re-submitted.
201 #define SSH_PTL_PACKET_TIMEOUT ms_to_ktime(1000)
204 * SSH_PTL_PACKET_TIMEOUT_RESOLUTION - Packet timeout granularity.
206 * Time-resolution for timeouts. Should be larger than one jiffy to avoid
207 * direct re-scheduling of reaper work_struct.
209 #define SSH_PTL_PACKET_TIMEOUT_RESOLUTION ms_to_ktime(max(2000 / HZ, 50))
212 * SSH_PTL_MAX_PENDING - Maximum number of pending packets.
214 * Maximum number of sequenced packets concurrently waiting for an ACK.
215 * Packets marked as blocking will not be transmitted while this limit is
218 #define SSH_PTL_MAX_PENDING 1
221 * SSH_PTL_RX_BUF_LEN - Evaluation-buffer size in bytes.
223 #define SSH_PTL_RX_BUF_LEN 4096
226 * SSH_PTL_RX_FIFO_LEN - Fifo input-buffer size in bytes.
228 #define SSH_PTL_RX_FIFO_LEN 4096
230 #ifdef CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION
233 * ssh_ptl_should_drop_ack_packet() - Error injection hook to drop ACK packets.
235 * Useful to test detection and handling of automated re-transmits by the EC.
236 * Specifically of packets that the EC considers not-ACKed but the driver
237 * already considers ACKed (due to dropped ACK). In this case, the EC
238 * re-transmits the packet-to-be-ACKed and the driver should detect it as
239 * duplicate/already handled. Note that the driver should still send an ACK
240 * for the re-transmitted packet.
242 static noinline
bool ssh_ptl_should_drop_ack_packet(void)
246 ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_ack_packet
, TRUE
);
249 * ssh_ptl_should_drop_nak_packet() - Error injection hook to drop NAK packets.
251 * Useful to test/force automated (timeout-based) re-transmit by the EC.
252 * Specifically, packets that have not reached the driver completely/with valid
253 * checksums. Only useful in combination with receival of (injected) bad data.
255 static noinline
bool ssh_ptl_should_drop_nak_packet(void)
259 ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_nak_packet
, TRUE
);
262 * ssh_ptl_should_drop_dsq_packet() - Error injection hook to drop sequenced
265 * Useful to test re-transmit timeout of the driver. If the data packet has not
266 * been ACKed after a certain time, the driver should re-transmit the packet up
267 * to limited number of times defined in SSH_PTL_MAX_PACKET_TRIES.
269 static noinline
bool ssh_ptl_should_drop_dsq_packet(void)
273 ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_dsq_packet
, TRUE
);
276 * ssh_ptl_should_fail_write() - Error injection hook to make
277 * serdev_device_write() fail.
279 * Hook to simulate errors in serdev_device_write when transmitting packets.
281 static noinline
int ssh_ptl_should_fail_write(void)
285 ALLOW_ERROR_INJECTION(ssh_ptl_should_fail_write
, ERRNO
);
288 * ssh_ptl_should_corrupt_tx_data() - Error injection hook to simulate invalid
289 * data being sent to the EC.
291 * Hook to simulate corrupt/invalid data being sent from host (driver) to EC.
292 * Causes the packet data to be actively corrupted by overwriting it with
293 * pre-defined values, such that it becomes invalid, causing the EC to respond
294 * with a NAK packet. Useful to test handling of NAK packets received by the
297 static noinline
bool ssh_ptl_should_corrupt_tx_data(void)
301 ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_tx_data
, TRUE
);
304 * ssh_ptl_should_corrupt_rx_syn() - Error injection hook to simulate invalid
305 * data being sent by the EC.
307 * Hook to simulate invalid SYN bytes, i.e. an invalid start of messages and
308 * test handling thereof in the driver.
310 static noinline
bool ssh_ptl_should_corrupt_rx_syn(void)
314 ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_syn
, TRUE
);
317 * ssh_ptl_should_corrupt_rx_data() - Error injection hook to simulate invalid
318 * data being sent by the EC.
320 * Hook to simulate invalid data/checksum of the message frame and test handling
321 * thereof in the driver.
323 static noinline
bool ssh_ptl_should_corrupt_rx_data(void)
327 ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_data
, TRUE
);
329 static bool __ssh_ptl_should_drop_ack_packet(struct ssh_packet
*packet
)
331 if (likely(!ssh_ptl_should_drop_ack_packet()))
334 trace_ssam_ei_tx_drop_ack_packet(packet
);
335 ptl_info(packet
->ptl
, "packet error injection: dropping ACK packet %p\n",
341 static bool __ssh_ptl_should_drop_nak_packet(struct ssh_packet
*packet
)
343 if (likely(!ssh_ptl_should_drop_nak_packet()))
346 trace_ssam_ei_tx_drop_nak_packet(packet
);
347 ptl_info(packet
->ptl
, "packet error injection: dropping NAK packet %p\n",
353 static bool __ssh_ptl_should_drop_dsq_packet(struct ssh_packet
*packet
)
355 if (likely(!ssh_ptl_should_drop_dsq_packet()))
358 trace_ssam_ei_tx_drop_dsq_packet(packet
);
359 ptl_info(packet
->ptl
,
360 "packet error injection: dropping sequenced data packet %p\n",
366 static bool ssh_ptl_should_drop_packet(struct ssh_packet
*packet
)
368 /* Ignore packets that don't carry any data (i.e. flush). */
369 if (!packet
->data
.ptr
|| !packet
->data
.len
)
372 switch (packet
->data
.ptr
[SSH_MSGOFFSET_FRAME(type
)]) {
373 case SSH_FRAME_TYPE_ACK
:
374 return __ssh_ptl_should_drop_ack_packet(packet
);
376 case SSH_FRAME_TYPE_NAK
:
377 return __ssh_ptl_should_drop_nak_packet(packet
);
379 case SSH_FRAME_TYPE_DATA_SEQ
:
380 return __ssh_ptl_should_drop_dsq_packet(packet
);
387 static int ssh_ptl_write_buf(struct ssh_ptl
*ptl
, struct ssh_packet
*packet
,
388 const unsigned char *buf
, size_t count
)
392 status
= ssh_ptl_should_fail_write();
393 if (unlikely(status
)) {
394 trace_ssam_ei_tx_fail_write(packet
, status
);
395 ptl_info(packet
->ptl
,
396 "packet error injection: simulating transmit error %d, packet %p\n",
402 return serdev_device_write_buf(ptl
->serdev
, buf
, count
);
405 static void ssh_ptl_tx_inject_invalid_data(struct ssh_packet
*packet
)
407 /* Ignore packets that don't carry any data (i.e. flush). */
408 if (!packet
->data
.ptr
|| !packet
->data
.len
)
411 /* Only allow sequenced data packets to be modified. */
412 if (packet
->data
.ptr
[SSH_MSGOFFSET_FRAME(type
)] != SSH_FRAME_TYPE_DATA_SEQ
)
415 if (likely(!ssh_ptl_should_corrupt_tx_data()))
418 trace_ssam_ei_tx_corrupt_data(packet
);
419 ptl_info(packet
->ptl
,
420 "packet error injection: simulating invalid transmit data on packet %p\n",
424 * NB: The value 0xb3 has been chosen more or less randomly so that it
425 * doesn't have any (major) overlap with the SYN bytes (aa 55) and is
426 * non-trivial (i.e. non-zero, non-0xff).
428 memset(packet
->data
.ptr
, 0xb3, packet
->data
.len
);
431 static void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl
*ptl
,
432 struct ssam_span
*data
)
434 struct ssam_span frame
;
436 /* Check if there actually is something to corrupt. */
437 if (!sshp_find_syn(data
, &frame
))
440 if (likely(!ssh_ptl_should_corrupt_rx_syn()))
443 trace_ssam_ei_rx_corrupt_syn(data
->len
);
445 data
->ptr
[1] = 0xb3; /* Set second byte of SYN to "random" value. */
448 static void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl
*ptl
,
449 struct ssam_span
*frame
)
451 size_t payload_len
, message_len
;
452 struct ssh_frame
*sshf
;
454 /* Ignore incomplete messages, will get handled once it's complete. */
455 if (frame
->len
< SSH_MESSAGE_LENGTH(0))
458 /* Ignore incomplete messages, part 2. */
459 payload_len
= get_unaligned_le16(&frame
->ptr
[SSH_MSGOFFSET_FRAME(len
)]);
460 message_len
= SSH_MESSAGE_LENGTH(payload_len
);
461 if (frame
->len
< message_len
)
464 if (likely(!ssh_ptl_should_corrupt_rx_data()))
467 sshf
= (struct ssh_frame
*)&frame
->ptr
[SSH_MSGOFFSET_FRAME(type
)];
468 trace_ssam_ei_rx_corrupt_data(sshf
);
471 * Flip bits in first byte of payload checksum. This is basically
472 * equivalent to a payload/frame data error without us having to worry
473 * about (the, arguably pretty small, probability of) accidental
474 * checksum collisions.
476 frame
->ptr
[frame
->len
- 2] = ~frame
->ptr
[frame
->len
- 2];
479 #else /* CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION */
481 static inline bool ssh_ptl_should_drop_packet(struct ssh_packet
*packet
)
486 static inline int ssh_ptl_write_buf(struct ssh_ptl
*ptl
,
487 struct ssh_packet
*packet
,
488 const unsigned char *buf
,
491 return serdev_device_write_buf(ptl
->serdev
, buf
, count
);
494 static inline void ssh_ptl_tx_inject_invalid_data(struct ssh_packet
*packet
)
498 static inline void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl
*ptl
,
499 struct ssam_span
*data
)
503 static inline void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl
*ptl
,
504 struct ssam_span
*frame
)
508 #endif /* CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION */
510 static void __ssh_ptl_packet_release(struct kref
*kref
)
512 struct ssh_packet
*p
= container_of(kref
, struct ssh_packet
, refcnt
);
514 trace_ssam_packet_release(p
);
516 ptl_dbg_cond(p
->ptl
, "ptl: releasing packet %p\n", p
);
521 * ssh_packet_get() - Increment reference count of packet.
522 * @packet: The packet to increment the reference count of.
524 * Increments the reference count of the given packet. See ssh_packet_put()
525 * for the counter-part of this function.
527 * Return: Returns the packet provided as input.
529 struct ssh_packet
*ssh_packet_get(struct ssh_packet
*packet
)
532 kref_get(&packet
->refcnt
);
535 EXPORT_SYMBOL_GPL(ssh_packet_get
);
538 * ssh_packet_put() - Decrement reference count of packet.
539 * @packet: The packet to decrement the reference count of.
541 * If the reference count reaches zero, the ``release`` callback specified in
542 * the packet's &struct ssh_packet_ops, i.e. ``packet->ops->release``, will be
545 * See ssh_packet_get() for the counter-part of this function.
547 void ssh_packet_put(struct ssh_packet
*packet
)
550 kref_put(&packet
->refcnt
, __ssh_ptl_packet_release
);
552 EXPORT_SYMBOL_GPL(ssh_packet_put
);
554 static u8
ssh_packet_get_seq(struct ssh_packet
*packet
)
556 return packet
->data
.ptr
[SSH_MSGOFFSET_FRAME(seq
)];
560 * ssh_packet_init() - Initialize SSH packet.
561 * @packet: The packet to initialize.
562 * @type: Type-flags of the packet.
563 * @priority: Priority of the packet. See SSH_PACKET_PRIORITY() for details.
564 * @ops: Packet operations.
566 * Initializes the given SSH packet. Sets the transmission buffer pointer to
567 * %NULL and the transmission buffer length to zero. For data-type packets,
568 * this buffer has to be set separately via ssh_packet_set_data() before
569 * submission, and must contain a valid SSH message, i.e. frame with optional
570 * payload of any type.
572 void ssh_packet_init(struct ssh_packet
*packet
, unsigned long type
,
573 u8 priority
, const struct ssh_packet_ops
*ops
)
575 kref_init(&packet
->refcnt
);
578 INIT_LIST_HEAD(&packet
->queue_node
);
579 INIT_LIST_HEAD(&packet
->pending_node
);
581 packet
->state
= type
& SSH_PACKET_FLAGS_TY_MASK
;
582 packet
->priority
= priority
;
583 packet
->timestamp
= KTIME_MAX
;
585 packet
->data
.ptr
= NULL
;
586 packet
->data
.len
= 0;
591 static struct kmem_cache
*ssh_ctrl_packet_cache
;
594 * ssh_ctrl_packet_cache_init() - Initialize the control packet cache.
596 int ssh_ctrl_packet_cache_init(void)
598 const unsigned int size
= sizeof(struct ssh_packet
) + SSH_MSG_LEN_CTRL
;
599 const unsigned int align
= __alignof__(struct ssh_packet
);
600 struct kmem_cache
*cache
;
602 cache
= kmem_cache_create("ssam_ctrl_packet", size
, align
, 0, NULL
);
606 ssh_ctrl_packet_cache
= cache
;
611 * ssh_ctrl_packet_cache_destroy() - Deinitialize the control packet cache.
613 void ssh_ctrl_packet_cache_destroy(void)
615 kmem_cache_destroy(ssh_ctrl_packet_cache
);
616 ssh_ctrl_packet_cache
= NULL
;
620 * ssh_ctrl_packet_alloc() - Allocate packet from control packet cache.
621 * @packet: Where the pointer to the newly allocated packet should be stored.
622 * @buffer: The buffer corresponding to this packet.
623 * @flags: Flags used for allocation.
625 * Allocates a packet and corresponding transport buffer from the control
626 * packet cache. Sets the packet's buffer reference to the allocated buffer.
627 * The packet must be freed via ssh_ctrl_packet_free(), which will also free
628 * the corresponding buffer. The corresponding buffer must not be freed
629 * separately. Intended to be used with %ssh_ptl_ctrl_packet_ops as packet
632 * Return: Returns zero on success, %-ENOMEM if the allocation failed.
634 static int ssh_ctrl_packet_alloc(struct ssh_packet
**packet
,
635 struct ssam_span
*buffer
, gfp_t flags
)
637 *packet
= kmem_cache_alloc(ssh_ctrl_packet_cache
, flags
);
641 buffer
->ptr
= (u8
*)(*packet
+ 1);
642 buffer
->len
= SSH_MSG_LEN_CTRL
;
644 trace_ssam_ctrl_packet_alloc(*packet
, buffer
->len
);
649 * ssh_ctrl_packet_free() - Free packet allocated from control packet cache.
650 * @p: The packet to free.
652 static void ssh_ctrl_packet_free(struct ssh_packet
*p
)
654 trace_ssam_ctrl_packet_free(p
);
655 kmem_cache_free(ssh_ctrl_packet_cache
, p
);
658 static const struct ssh_packet_ops ssh_ptl_ctrl_packet_ops
= {
660 .release
= ssh_ctrl_packet_free
,
663 static void ssh_ptl_timeout_reaper_mod(struct ssh_ptl
*ptl
, ktime_t now
,
666 unsigned long delta
= msecs_to_jiffies(ktime_ms_delta(expires
, now
));
667 ktime_t aexp
= ktime_add(expires
, SSH_PTL_PACKET_TIMEOUT_RESOLUTION
);
669 spin_lock(&ptl
->rtx_timeout
.lock
);
671 /* Re-adjust / schedule reaper only if it is above resolution delta. */
672 if (ktime_before(aexp
, ptl
->rtx_timeout
.expires
)) {
673 ptl
->rtx_timeout
.expires
= expires
;
674 mod_delayed_work(system_wq
, &ptl
->rtx_timeout
.reaper
, delta
);
677 spin_unlock(&ptl
->rtx_timeout
.lock
);
680 /* Must be called with queue lock held. */
681 static void ssh_packet_next_try(struct ssh_packet
*p
)
683 u8 base
= ssh_packet_priority_get_base(p
->priority
);
684 u8
try = ssh_packet_priority_get_try(p
->priority
);
686 lockdep_assert_held(&p
->ptl
->queue
.lock
);
689 * Ensure that we write the priority in one go via WRITE_ONCE() so we
690 * can access it via READ_ONCE() for tracing. Note that other access
691 * is guarded by the queue lock, so no need to use READ_ONCE() there.
693 WRITE_ONCE(p
->priority
, __SSH_PACKET_PRIORITY(base
, try + 1));
696 /* Must be called with queue lock held. */
697 static struct list_head
*__ssh_ptl_queue_find_entrypoint(struct ssh_packet
*p
)
699 struct list_head
*head
;
700 struct ssh_packet
*q
;
702 lockdep_assert_held(&p
->ptl
->queue
.lock
);
705 * We generally assume that there are less control (ACK/NAK) packets
706 * and re-submitted data packets as there are normal data packets (at
707 * least in situations in which many packets are queued; if there
708 * aren't many packets queued the decision on how to iterate should be
709 * basically irrelevant; the number of control/data packets is more or
710 * less limited via the maximum number of pending packets). Thus, when
711 * inserting a control or re-submitted data packet, (determined by
712 * their priority), we search from front to back. Normal data packets
713 * are, usually queued directly at the tail of the queue, so for those
714 * search from back to front.
717 if (p
->priority
> SSH_PACKET_PRIORITY(DATA
, 0)) {
718 list_for_each(head
, &p
->ptl
->queue
.head
) {
719 q
= list_entry(head
, struct ssh_packet
, queue_node
);
721 if (q
->priority
< p
->priority
)
725 list_for_each_prev(head
, &p
->ptl
->queue
.head
) {
726 q
= list_entry(head
, struct ssh_packet
, queue_node
);
728 if (q
->priority
>= p
->priority
) {
738 /* Must be called with queue lock held. */
739 static int __ssh_ptl_queue_push(struct ssh_packet
*packet
)
741 struct ssh_ptl
*ptl
= packet
->ptl
;
742 struct list_head
*head
;
744 lockdep_assert_held(&ptl
->queue
.lock
);
746 if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT
, &ptl
->state
))
749 /* Avoid further transitions when canceling/completing. */
750 if (test_bit(SSH_PACKET_SF_LOCKED_BIT
, &packet
->state
))
753 /* If this packet has already been queued, do not add it. */
754 if (test_and_set_bit(SSH_PACKET_SF_QUEUED_BIT
, &packet
->state
))
757 head
= __ssh_ptl_queue_find_entrypoint(packet
);
759 list_add_tail(&ssh_packet_get(packet
)->queue_node
, head
);
763 static int ssh_ptl_queue_push(struct ssh_packet
*packet
)
767 spin_lock(&packet
->ptl
->queue
.lock
);
768 status
= __ssh_ptl_queue_push(packet
);
769 spin_unlock(&packet
->ptl
->queue
.lock
);
774 static void ssh_ptl_queue_remove(struct ssh_packet
*packet
)
776 struct ssh_ptl
*ptl
= packet
->ptl
;
778 spin_lock(&ptl
->queue
.lock
);
780 if (!test_and_clear_bit(SSH_PACKET_SF_QUEUED_BIT
, &packet
->state
)) {
781 spin_unlock(&ptl
->queue
.lock
);
785 list_del(&packet
->queue_node
);
787 spin_unlock(&ptl
->queue
.lock
);
788 ssh_packet_put(packet
);
791 static void ssh_ptl_pending_push(struct ssh_packet
*p
)
793 struct ssh_ptl
*ptl
= p
->ptl
;
794 const ktime_t timestamp
= ktime_get_coarse_boottime();
795 const ktime_t timeout
= ptl
->rtx_timeout
.timeout
;
798 * Note: We can get the time for the timestamp before acquiring the
799 * lock as this is the only place we're setting it and this function
800 * is called only from the transmitter thread. Thus it is not possible
801 * to overwrite the timestamp with an outdated value below.
804 spin_lock(&ptl
->pending
.lock
);
806 /* If we are canceling/completing this packet, do not add it. */
807 if (test_bit(SSH_PACKET_SF_LOCKED_BIT
, &p
->state
)) {
808 spin_unlock(&ptl
->pending
.lock
);
813 * On re-submission, the packet has already been added the pending
814 * set. We still need to update the timestamp as the packet timeout is
815 * reset for each (re-)submission.
817 p
->timestamp
= timestamp
;
819 /* In case it is already pending (e.g. re-submission), do not add it. */
820 if (!test_and_set_bit(SSH_PACKET_SF_PENDING_BIT
, &p
->state
)) {
821 atomic_inc(&ptl
->pending
.count
);
822 list_add_tail(&ssh_packet_get(p
)->pending_node
, &ptl
->pending
.head
);
825 spin_unlock(&ptl
->pending
.lock
);
827 /* Arm/update timeout reaper. */
828 ssh_ptl_timeout_reaper_mod(ptl
, timestamp
, timestamp
+ timeout
);
831 static void ssh_ptl_pending_remove(struct ssh_packet
*packet
)
833 struct ssh_ptl
*ptl
= packet
->ptl
;
835 spin_lock(&ptl
->pending
.lock
);
837 if (!test_and_clear_bit(SSH_PACKET_SF_PENDING_BIT
, &packet
->state
)) {
838 spin_unlock(&ptl
->pending
.lock
);
842 list_del(&packet
->pending_node
);
843 atomic_dec(&ptl
->pending
.count
);
845 spin_unlock(&ptl
->pending
.lock
);
847 ssh_packet_put(packet
);
850 /* Warning: Does not check/set "completed" bit. */
851 static void __ssh_ptl_complete(struct ssh_packet
*p
, int status
)
853 struct ssh_ptl
*ptl
= READ_ONCE(p
->ptl
);
855 trace_ssam_packet_complete(p
, status
);
856 ptl_dbg_cond(ptl
, "ptl: completing packet %p (status: %d)\n", p
, status
);
858 if (p
->ops
->complete
)
859 p
->ops
->complete(p
, status
);
862 static void ssh_ptl_remove_and_complete(struct ssh_packet
*p
, int status
)
865 * A call to this function should in general be preceded by
866 * set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->flags) to avoid re-adding the
867 * packet to the structures it's going to be removed from.
869 * The set_bit call does not need explicit memory barriers as the
870 * implicit barrier of the test_and_set_bit() call below ensure that the
871 * flag is visible before we actually attempt to remove the packet.
874 if (test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT
, &p
->state
))
877 ssh_ptl_queue_remove(p
);
878 ssh_ptl_pending_remove(p
);
880 __ssh_ptl_complete(p
, status
);
883 static bool ssh_ptl_tx_can_process(struct ssh_packet
*packet
)
885 struct ssh_ptl
*ptl
= packet
->ptl
;
887 if (test_bit(SSH_PACKET_TY_FLUSH_BIT
, &packet
->state
))
888 return !atomic_read(&ptl
->pending
.count
);
890 /* We can always process non-blocking packets. */
891 if (!test_bit(SSH_PACKET_TY_BLOCKING_BIT
, &packet
->state
))
894 /* If we are already waiting for this packet, send it again. */
895 if (test_bit(SSH_PACKET_SF_PENDING_BIT
, &packet
->state
))
898 /* Otherwise: Check if we have the capacity to send. */
899 return atomic_read(&ptl
->pending
.count
) < SSH_PTL_MAX_PENDING
;
902 static struct ssh_packet
*ssh_ptl_tx_pop(struct ssh_ptl
*ptl
)
904 struct ssh_packet
*packet
= ERR_PTR(-ENOENT
);
905 struct ssh_packet
*p
, *n
;
907 spin_lock(&ptl
->queue
.lock
);
908 list_for_each_entry_safe(p
, n
, &ptl
->queue
.head
, queue_node
) {
910 * If we are canceling or completing this packet, ignore it.
911 * It's going to be removed from this queue shortly.
913 if (test_bit(SSH_PACKET_SF_LOCKED_BIT
, &p
->state
))
917 * Packets should be ordered non-blocking/to-be-resent first.
918 * If we cannot process this packet, assume that we can't
919 * process any following packet either and abort.
921 if (!ssh_ptl_tx_can_process(p
)) {
922 packet
= ERR_PTR(-EBUSY
);
927 * We are allowed to change the state now. Remove it from the
928 * queue and mark it as being transmitted.
931 list_del(&p
->queue_node
);
933 set_bit(SSH_PACKET_SF_TRANSMITTING_BIT
, &p
->state
);
934 /* Ensure that state never gets zero. */
935 smp_mb__before_atomic();
936 clear_bit(SSH_PACKET_SF_QUEUED_BIT
, &p
->state
);
939 * Update number of tries. This directly influences the
940 * priority in case the packet is re-submitted (e.g. via
941 * timeout/NAK). Note that all reads and writes to the
942 * priority after the first submission are guarded by the
945 ssh_packet_next_try(p
);
950 spin_unlock(&ptl
->queue
.lock
);
955 static struct ssh_packet
*ssh_ptl_tx_next(struct ssh_ptl
*ptl
)
957 struct ssh_packet
*p
;
959 p
= ssh_ptl_tx_pop(ptl
);
963 if (test_bit(SSH_PACKET_TY_SEQUENCED_BIT
, &p
->state
)) {
964 ptl_dbg(ptl
, "ptl: transmitting sequenced packet %p\n", p
);
965 ssh_ptl_pending_push(p
);
967 ptl_dbg(ptl
, "ptl: transmitting non-sequenced packet %p\n", p
);
973 static void ssh_ptl_tx_compl_success(struct ssh_packet
*packet
)
975 struct ssh_ptl
*ptl
= packet
->ptl
;
977 ptl_dbg(ptl
, "ptl: successfully transmitted packet %p\n", packet
);
979 /* Transition state to "transmitted". */
980 set_bit(SSH_PACKET_SF_TRANSMITTED_BIT
, &packet
->state
);
981 /* Ensure that state never gets zero. */
982 smp_mb__before_atomic();
983 clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT
, &packet
->state
);
985 /* If the packet is unsequenced, we're done: Lock and complete. */
986 if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT
, &packet
->state
)) {
987 set_bit(SSH_PACKET_SF_LOCKED_BIT
, &packet
->state
);
988 ssh_ptl_remove_and_complete(packet
, 0);
992 * Notify that a packet transmission has finished. In general we're only
993 * waiting for one packet (if any), so wake_up_all should be fine.
995 wake_up_all(&ptl
->tx
.packet_wq
);
998 static void ssh_ptl_tx_compl_error(struct ssh_packet
*packet
, int status
)
1000 /* Transmission failure: Lock the packet and try to complete it. */
1001 set_bit(SSH_PACKET_SF_LOCKED_BIT
, &packet
->state
);
1002 /* Ensure that state never gets zero. */
1003 smp_mb__before_atomic();
1004 clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT
, &packet
->state
);
1006 ptl_err(packet
->ptl
, "ptl: transmission error: %d\n", status
);
1007 ptl_dbg(packet
->ptl
, "ptl: failed to transmit packet: %p\n", packet
);
1009 ssh_ptl_remove_and_complete(packet
, status
);
1012 * Notify that a packet transmission has finished. In general we're only
1013 * waiting for one packet (if any), so wake_up_all should be fine.
1015 wake_up_all(&packet
->ptl
->tx
.packet_wq
);
1018 static long ssh_ptl_tx_wait_packet(struct ssh_ptl
*ptl
)
1022 status
= wait_for_completion_interruptible(&ptl
->tx
.thread_cplt_pkt
);
1023 reinit_completion(&ptl
->tx
.thread_cplt_pkt
);
1026 * Ensure completion is cleared before continuing to avoid lost update
1029 smp_mb__after_atomic();
1034 static long ssh_ptl_tx_wait_transfer(struct ssh_ptl
*ptl
, long timeout
)
1038 status
= wait_for_completion_interruptible_timeout(&ptl
->tx
.thread_cplt_tx
,
1040 reinit_completion(&ptl
->tx
.thread_cplt_tx
);
1043 * Ensure completion is cleared before continuing to avoid lost update
1046 smp_mb__after_atomic();
1051 static int ssh_ptl_tx_packet(struct ssh_ptl
*ptl
, struct ssh_packet
*packet
)
1053 long timeout
= SSH_PTL_TX_TIMEOUT
;
1056 /* Note: Flush-packets don't have any data. */
1057 if (unlikely(!packet
->data
.ptr
))
1060 /* Error injection: drop packet to simulate transmission problem. */
1061 if (ssh_ptl_should_drop_packet(packet
))
1064 /* Error injection: simulate invalid packet data. */
1065 ssh_ptl_tx_inject_invalid_data(packet
);
1067 ptl_dbg(ptl
, "tx: sending data (length: %zu)\n", packet
->data
.len
);
1068 print_hex_dump_debug("tx: ", DUMP_PREFIX_OFFSET
, 16, 1,
1069 packet
->data
.ptr
, packet
->data
.len
, false);
1072 ssize_t status
, len
;
1075 buf
= packet
->data
.ptr
+ offset
;
1076 len
= packet
->data
.len
- offset
;
1078 status
= ssh_ptl_write_buf(ptl
, packet
, buf
, len
);
1087 timeout
= ssh_ptl_tx_wait_transfer(ptl
, timeout
);
1088 if (kthread_should_stop() || !atomic_read(&ptl
->tx
.running
))
1099 static int ssh_ptl_tx_threadfn(void *data
)
1101 struct ssh_ptl
*ptl
= data
;
1103 while (!kthread_should_stop() && atomic_read(&ptl
->tx
.running
)) {
1104 struct ssh_packet
*packet
;
1107 /* Try to get the next packet. */
1108 packet
= ssh_ptl_tx_next(ptl
);
1110 /* If no packet can be processed, we are done. */
1111 if (IS_ERR(packet
)) {
1112 ssh_ptl_tx_wait_packet(ptl
);
1116 /* Transfer and complete packet. */
1117 status
= ssh_ptl_tx_packet(ptl
, packet
);
1119 ssh_ptl_tx_compl_error(packet
, status
);
1121 ssh_ptl_tx_compl_success(packet
);
1123 ssh_packet_put(packet
);
1130 * ssh_ptl_tx_wakeup_packet() - Wake up packet transmitter thread for new
1132 * @ptl: The packet transport layer.
1134 * Wakes up the packet transmitter thread, notifying it that a new packet has
1135 * arrived and is ready for transfer. If the packet transport layer has been
1136 * shut down, calls to this function will be ignored.
1138 static void ssh_ptl_tx_wakeup_packet(struct ssh_ptl
*ptl
)
1140 if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT
, &ptl
->state
))
1143 complete(&ptl
->tx
.thread_cplt_pkt
);
1147 * ssh_ptl_tx_start() - Start packet transmitter thread.
1148 * @ptl: The packet transport layer.
1150 * Return: Returns zero on success, a negative error code on failure.
1152 int ssh_ptl_tx_start(struct ssh_ptl
*ptl
)
1154 atomic_set_release(&ptl
->tx
.running
, 1);
1156 ptl
->tx
.thread
= kthread_run(ssh_ptl_tx_threadfn
, ptl
, "ssam_serial_hub-tx");
1157 if (IS_ERR(ptl
->tx
.thread
))
1158 return PTR_ERR(ptl
->tx
.thread
);
1164 * ssh_ptl_tx_stop() - Stop packet transmitter thread.
1165 * @ptl: The packet transport layer.
1167 * Return: Returns zero on success, a negative error code on failure.
1169 int ssh_ptl_tx_stop(struct ssh_ptl
*ptl
)
1173 if (!IS_ERR_OR_NULL(ptl
->tx
.thread
)) {
1174 /* Tell thread to stop. */
1175 atomic_set_release(&ptl
->tx
.running
, 0);
1178 * Wake up thread in case it is paused. Do not use wakeup
1179 * helpers as this may be called when the shutdown bit has
1182 complete(&ptl
->tx
.thread_cplt_pkt
);
1183 complete(&ptl
->tx
.thread_cplt_tx
);
1185 /* Finally, wait for thread to stop. */
1186 status
= kthread_stop(ptl
->tx
.thread
);
1187 ptl
->tx
.thread
= NULL
;
1193 static struct ssh_packet
*ssh_ptl_ack_pop(struct ssh_ptl
*ptl
, u8 seq_id
)
1195 struct ssh_packet
*packet
= ERR_PTR(-ENOENT
);
1196 struct ssh_packet
*p
, *n
;
1198 spin_lock(&ptl
->pending
.lock
);
1199 list_for_each_entry_safe(p
, n
, &ptl
->pending
.head
, pending_node
) {
1201 * We generally expect packets to be in order, so first packet
1202 * to be added to pending is first to be sent, is first to be
1205 if (unlikely(ssh_packet_get_seq(p
) != seq_id
))
1209 * In case we receive an ACK while handling a transmission
1210 * error completion. The packet will be removed shortly.
1212 if (unlikely(test_bit(SSH_PACKET_SF_LOCKED_BIT
, &p
->state
))) {
1213 packet
= ERR_PTR(-EPERM
);
1218 * Mark the packet as ACKed and remove it from pending by
1219 * removing its node and decrementing the pending counter.
1221 set_bit(SSH_PACKET_SF_ACKED_BIT
, &p
->state
);
1222 /* Ensure that state never gets zero. */
1223 smp_mb__before_atomic();
1224 clear_bit(SSH_PACKET_SF_PENDING_BIT
, &p
->state
);
1226 atomic_dec(&ptl
->pending
.count
);
1227 list_del(&p
->pending_node
);
1232 spin_unlock(&ptl
->pending
.lock
);
1237 static void ssh_ptl_wait_until_transmitted(struct ssh_packet
*packet
)
1239 wait_event(packet
->ptl
->tx
.packet_wq
,
1240 test_bit(SSH_PACKET_SF_TRANSMITTED_BIT
, &packet
->state
) ||
1241 test_bit(SSH_PACKET_SF_LOCKED_BIT
, &packet
->state
));
1244 static void ssh_ptl_acknowledge(struct ssh_ptl
*ptl
, u8 seq
)
1246 struct ssh_packet
*p
;
1248 p
= ssh_ptl_ack_pop(ptl
, seq
);
1250 if (PTR_ERR(p
) == -ENOENT
) {
1252 * The packet has not been found in the set of pending
1255 ptl_warn(ptl
, "ptl: received ACK for non-pending packet\n");
1258 * The packet is pending, but we are not allowed to take
1259 * it because it has been locked.
1261 WARN_ON(PTR_ERR(p
) != -EPERM
);
1266 ptl_dbg(ptl
, "ptl: received ACK for packet %p\n", p
);
1269 * It is possible that the packet has been transmitted, but the state
1270 * has not been updated from "transmitting" to "transmitted" yet.
1271 * In that case, we need to wait for this transition to occur in order
1272 * to determine between success or failure.
1274 * On transmission failure, the packet will be locked after this call.
1275 * On success, the transmitted bit will be set.
1277 ssh_ptl_wait_until_transmitted(p
);
1280 * The packet will already be locked in case of a transmission error or
1281 * cancellation. Let the transmitter or cancellation issuer complete the
1284 if (unlikely(test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT
, &p
->state
))) {
1285 if (unlikely(!test_bit(SSH_PACKET_SF_TRANSMITTED_BIT
, &p
->state
)))
1286 ptl_err(ptl
, "ptl: received ACK before packet had been fully transmitted\n");
1292 ssh_ptl_remove_and_complete(p
, 0);
1295 if (atomic_read(&ptl
->pending
.count
) < SSH_PTL_MAX_PENDING
)
1296 ssh_ptl_tx_wakeup_packet(ptl
);
1300 * ssh_ptl_submit() - Submit a packet to the transport layer.
1301 * @ptl: The packet transport layer to submit the packet to.
1302 * @p: The packet to submit.
1304 * Submits a new packet to the transport layer, queuing it to be sent. This
1305 * function should not be used for re-submission.
1307 * Return: Returns zero on success, %-EINVAL if a packet field is invalid or
1308 * the packet has been canceled prior to submission, %-EALREADY if the packet
1309 * has already been submitted, or %-ESHUTDOWN if the packet transport layer
1310 * has been shut down.
1312 int ssh_ptl_submit(struct ssh_ptl
*ptl
, struct ssh_packet
*p
)
1314 struct ssh_ptl
*ptl_old
;
1317 trace_ssam_packet_submit(p
);
1319 /* Validate packet fields. */
1320 if (test_bit(SSH_PACKET_TY_FLUSH_BIT
, &p
->state
)) {
1321 if (p
->data
.ptr
|| test_bit(SSH_PACKET_TY_SEQUENCED_BIT
, &p
->state
))
1323 } else if (!p
->data
.ptr
) {
1328 * The ptl reference only gets set on or before the first submission.
1329 * After the first submission, it has to be read-only.
1331 * Note that ptl may already be set from upper-layer request
1332 * submission, thus we cannot expect it to be NULL.
1334 ptl_old
= READ_ONCE(p
->ptl
);
1336 WRITE_ONCE(p
->ptl
, ptl
);
1337 else if (WARN_ON(ptl_old
!= ptl
))
1338 return -EALREADY
; /* Submitted on different PTL. */
1340 status
= ssh_ptl_queue_push(p
);
1344 if (!test_bit(SSH_PACKET_TY_BLOCKING_BIT
, &p
->state
) ||
1345 (atomic_read(&ptl
->pending
.count
) < SSH_PTL_MAX_PENDING
))
1346 ssh_ptl_tx_wakeup_packet(ptl
);
1352 * __ssh_ptl_resubmit() - Re-submit a packet to the transport layer.
1353 * @packet: The packet to re-submit.
1355 * Re-submits the given packet: Checks if it can be re-submitted and queues it
1356 * if it can, resetting the packet timestamp in the process. Must be called
1357 * with the pending lock held.
1359 * Return: Returns %-ECANCELED if the packet has exceeded its number of tries,
1360 * %-EINVAL if the packet has been locked, %-EALREADY if the packet is already
1361 * on the queue, and %-ESHUTDOWN if the transmission layer has been shut down.
1363 static int __ssh_ptl_resubmit(struct ssh_packet
*packet
)
1368 lockdep_assert_held(&packet
->ptl
->pending
.lock
);
1370 trace_ssam_packet_resubmit(packet
);
1372 spin_lock(&packet
->ptl
->queue
.lock
);
1374 /* Check if the packet is out of tries. */
1375 try = ssh_packet_priority_get_try(packet
->priority
);
1376 if (try >= SSH_PTL_MAX_PACKET_TRIES
) {
1377 spin_unlock(&packet
->ptl
->queue
.lock
);
1381 status
= __ssh_ptl_queue_push(packet
);
1384 * An error here indicates that the packet has either already
1385 * been queued, been locked, or the transport layer is being
1386 * shut down. In all cases: Ignore the error.
1388 spin_unlock(&packet
->ptl
->queue
.lock
);
1392 packet
->timestamp
= KTIME_MAX
;
1394 spin_unlock(&packet
->ptl
->queue
.lock
);
1398 static void ssh_ptl_resubmit_pending(struct ssh_ptl
*ptl
)
1400 struct ssh_packet
*p
;
1404 * Note: We deliberately do not remove/attempt to cancel and complete
1405 * packets that are out of tires in this function. The packet will be
1406 * eventually canceled and completed by the timeout. Removing the packet
1407 * here could lead to overly eager cancellation if the packet has not
1408 * been re-transmitted yet but the tries-counter already updated (i.e
1409 * ssh_ptl_tx_next() removed the packet from the queue and updated the
1410 * counter, but re-transmission for the last try has not actually
1414 spin_lock(&ptl
->pending
.lock
);
1416 /* Re-queue all pending packets. */
1417 list_for_each_entry(p
, &ptl
->pending
.head
, pending_node
) {
1419 * Re-submission fails if the packet is out of tries, has been
1420 * locked, is already queued, or the layer is being shut down.
1421 * No need to re-schedule tx-thread in those cases.
1423 if (!__ssh_ptl_resubmit(p
))
1427 spin_unlock(&ptl
->pending
.lock
);
1430 ssh_ptl_tx_wakeup_packet(ptl
);
1434 * ssh_ptl_cancel() - Cancel a packet.
1435 * @p: The packet to cancel.
1437 * Cancels a packet. There are no guarantees on when completion and release
1438 * callbacks will be called. This may occur during execution of this function
1439 * or may occur at any point later.
1441 * Note that it is not guaranteed that the packet will actually be canceled if
1442 * the packet is concurrently completed by another process. The only guarantee
1443 * of this function is that the packet will be completed (with success,
1444 * failure, or cancellation) and released from the transport layer in a
1445 * reasonable time-frame.
1447 * May be called before the packet has been submitted, in which case any later
1448 * packet submission fails.
1450 void ssh_ptl_cancel(struct ssh_packet
*p
)
1452 if (test_and_set_bit(SSH_PACKET_SF_CANCELED_BIT
, &p
->state
))
1455 trace_ssam_packet_cancel(p
);
1458 * Lock packet and commit with memory barrier. If this packet has
1459 * already been locked, it's going to be removed and completed by
1460 * another party, which should have precedence.
1462 if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT
, &p
->state
))
1466 * By marking the packet as locked and employing the implicit memory
1467 * barrier of test_and_set_bit, we have guaranteed that, at this point,
1468 * the packet cannot be added to the queue any more.
1470 * In case the packet has never been submitted, packet->ptl is NULL. If
1471 * the packet is currently being submitted, packet->ptl may be NULL or
1472 * non-NULL. Due marking the packet as locked above and committing with
1473 * the memory barrier, we have guaranteed that, if packet->ptl is NULL,
1474 * the packet will never be added to the queue. If packet->ptl is
1475 * non-NULL, we don't have any guarantees.
1478 if (READ_ONCE(p
->ptl
)) {
1479 ssh_ptl_remove_and_complete(p
, -ECANCELED
);
1481 if (atomic_read(&p
->ptl
->pending
.count
) < SSH_PTL_MAX_PENDING
)
1482 ssh_ptl_tx_wakeup_packet(p
->ptl
);
1484 } else if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT
, &p
->state
)) {
1485 __ssh_ptl_complete(p
, -ECANCELED
);
1489 /* Must be called with pending lock held */
1490 static ktime_t
ssh_packet_get_expiration(struct ssh_packet
*p
, ktime_t timeout
)
1492 lockdep_assert_held(&p
->ptl
->pending
.lock
);
1494 if (p
->timestamp
!= KTIME_MAX
)
1495 return ktime_add(p
->timestamp
, timeout
);
1500 static void ssh_ptl_timeout_reap(struct work_struct
*work
)
1502 struct ssh_ptl
*ptl
= to_ssh_ptl(work
, rtx_timeout
.reaper
.work
);
1503 struct ssh_packet
*p
, *n
;
1505 ktime_t now
= ktime_get_coarse_boottime();
1506 ktime_t timeout
= ptl
->rtx_timeout
.timeout
;
1507 ktime_t next
= KTIME_MAX
;
1511 trace_ssam_ptl_timeout_reap(atomic_read(&ptl
->pending
.count
));
1514 * Mark reaper as "not pending". This is done before checking any
1515 * packets to avoid lost-update type problems.
1517 spin_lock(&ptl
->rtx_timeout
.lock
);
1518 ptl
->rtx_timeout
.expires
= KTIME_MAX
;
1519 spin_unlock(&ptl
->rtx_timeout
.lock
);
1521 spin_lock(&ptl
->pending
.lock
);
1523 list_for_each_entry_safe(p
, n
, &ptl
->pending
.head
, pending_node
) {
1524 ktime_t expires
= ssh_packet_get_expiration(p
, timeout
);
1527 * Check if the timeout hasn't expired yet. Find out next
1528 * expiration date to be handled after this run.
1530 if (ktime_after(expires
, now
)) {
1531 next
= ktime_before(expires
, next
) ? expires
: next
;
1535 trace_ssam_packet_timeout(p
);
1537 status
= __ssh_ptl_resubmit(p
);
1540 * Re-submission fails if the packet is out of tries, has been
1541 * locked, is already queued, or the layer is being shut down.
1542 * No need to re-schedule tx-thread in those cases.
1547 /* Go to next packet if this packet is not out of tries. */
1548 if (status
!= -ECANCELED
)
1551 /* No more tries left: Cancel the packet. */
1554 * If someone else has locked the packet already, don't use it
1555 * and let the other party complete it.
1557 if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT
, &p
->state
))
1561 * We have now marked the packet as locked. Thus it cannot be
1562 * added to the pending list again after we've removed it here.
1563 * We can therefore re-use the pending_node of this packet
1567 clear_bit(SSH_PACKET_SF_PENDING_BIT
, &p
->state
);
1569 atomic_dec(&ptl
->pending
.count
);
1570 list_move_tail(&p
->pending_node
, &claimed
);
1573 spin_unlock(&ptl
->pending
.lock
);
1575 /* Cancel and complete the packet. */
1576 list_for_each_entry_safe(p
, n
, &claimed
, pending_node
) {
1577 if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT
, &p
->state
)) {
1578 ssh_ptl_queue_remove(p
);
1579 __ssh_ptl_complete(p
, -ETIMEDOUT
);
1583 * Drop the reference we've obtained by removing it from
1586 list_del(&p
->pending_node
);
1590 /* Ensure that reaper doesn't run again immediately. */
1591 next
= max(next
, ktime_add(now
, SSH_PTL_PACKET_TIMEOUT_RESOLUTION
));
1592 if (next
!= KTIME_MAX
)
1593 ssh_ptl_timeout_reaper_mod(ptl
, now
, next
);
1596 ssh_ptl_tx_wakeup_packet(ptl
);
1599 static bool ssh_ptl_rx_retransmit_check(struct ssh_ptl
*ptl
, const struct ssh_frame
*frame
)
1604 * Ignore unsequenced packets. On some devices (notably Surface Pro 9),
1605 * unsequenced events will always be sent with SEQ=0x00. Attempting to
1606 * detect retransmission would thus just block all events.
1608 * While sequence numbers would also allow detection of retransmitted
1609 * packets in unsequenced communication, they have only ever been used
1610 * to cover edge-cases in sequenced transmission. In particular, the
1611 * only instance of packets being retransmitted (that we are aware of)
1612 * is due to an ACK timeout. As this does not happen in unsequenced
1613 * communication, skip the retransmission check for those packets
1616 if (frame
->type
== SSH_FRAME_TYPE_DATA_NSQ
)
1620 * Check if SEQ has been seen recently (i.e. packet was
1621 * re-transmitted and we should ignore it).
1623 for (i
= 0; i
< ARRAY_SIZE(ptl
->rx
.blocked
.seqs
); i
++) {
1624 if (likely(ptl
->rx
.blocked
.seqs
[i
] != frame
->seq
))
1627 ptl_dbg(ptl
, "ptl: ignoring repeated data packet\n");
1631 /* Update list of blocked sequence IDs. */
1632 ptl
->rx
.blocked
.seqs
[ptl
->rx
.blocked
.offset
] = frame
->seq
;
1633 ptl
->rx
.blocked
.offset
= (ptl
->rx
.blocked
.offset
+ 1)
1634 % ARRAY_SIZE(ptl
->rx
.blocked
.seqs
);
1639 static void ssh_ptl_rx_dataframe(struct ssh_ptl
*ptl
,
1640 const struct ssh_frame
*frame
,
1641 const struct ssam_span
*payload
)
1643 if (ssh_ptl_rx_retransmit_check(ptl
, frame
))
1646 ptl
->ops
.data_received(ptl
, payload
);
1649 static void ssh_ptl_send_ack(struct ssh_ptl
*ptl
, u8 seq
)
1651 struct ssh_packet
*packet
;
1652 struct ssam_span buf
;
1656 status
= ssh_ctrl_packet_alloc(&packet
, &buf
, GFP_KERNEL
);
1658 ptl_err(ptl
, "ptl: failed to allocate ACK packet\n");
1662 ssh_packet_init(packet
, 0, SSH_PACKET_PRIORITY(ACK
, 0),
1663 &ssh_ptl_ctrl_packet_ops
);
1665 msgb_init(&msgb
, buf
.ptr
, buf
.len
);
1666 msgb_push_ack(&msgb
, seq
);
1667 ssh_packet_set_data(packet
, msgb
.begin
, msgb_bytes_used(&msgb
));
1669 ssh_ptl_submit(ptl
, packet
);
1670 ssh_packet_put(packet
);
1673 static void ssh_ptl_send_nak(struct ssh_ptl
*ptl
)
1675 struct ssh_packet
*packet
;
1676 struct ssam_span buf
;
1680 status
= ssh_ctrl_packet_alloc(&packet
, &buf
, GFP_KERNEL
);
1682 ptl_err(ptl
, "ptl: failed to allocate NAK packet\n");
1686 ssh_packet_init(packet
, 0, SSH_PACKET_PRIORITY(NAK
, 0),
1687 &ssh_ptl_ctrl_packet_ops
);
1689 msgb_init(&msgb
, buf
.ptr
, buf
.len
);
1690 msgb_push_nak(&msgb
);
1691 ssh_packet_set_data(packet
, msgb
.begin
, msgb_bytes_used(&msgb
));
1693 ssh_ptl_submit(ptl
, packet
);
1694 ssh_packet_put(packet
);
1697 static size_t ssh_ptl_rx_eval(struct ssh_ptl
*ptl
, struct ssam_span
*source
)
1699 struct ssh_frame
*frame
;
1700 struct ssam_span payload
;
1701 struct ssam_span aligned
;
1705 /* Error injection: Modify data to simulate corrupt SYN bytes. */
1706 ssh_ptl_rx_inject_invalid_syn(ptl
, source
);
1709 syn_found
= sshp_find_syn(source
, &aligned
);
1711 if (unlikely(aligned
.ptr
!= source
->ptr
)) {
1713 * We expect aligned.ptr == source->ptr. If this is not the
1714 * case, then aligned.ptr > source->ptr and we've encountered
1715 * some unexpected data where we'd expect the start of a new
1716 * message (i.e. the SYN sequence).
1718 * This can happen when a CRC check for the previous message
1719 * failed and we start actively searching for the next one
1720 * (via the call to sshp_find_syn() above), or the first bytes
1721 * of a message got dropped or corrupted.
1723 * In any case, we issue a warning, send a NAK to the EC to
1724 * request re-transmission of any data we haven't acknowledged
1725 * yet, and finally, skip everything up to the next SYN
1729 ptl_warn(ptl
, "rx: parser: invalid start of frame, skipping\n");
1733 * - This might send multiple NAKs in case the communication
1734 * starts with an invalid SYN and is broken down into multiple
1735 * pieces. This should generally be handled fine, we just
1736 * might receive duplicate data in this case, which is
1737 * detected when handling data frames.
1738 * - This path will also be executed on invalid CRCs: When an
1739 * invalid CRC is encountered, the code below will skip data
1740 * until directly after the SYN. This causes the search for
1741 * the next SYN, which is generally not placed directly after
1744 * Open question: Should we send this in case of invalid
1745 * payload CRCs if the frame-type is non-sequential (current
1746 * implementation) or should we drop that frame without
1749 ssh_ptl_send_nak(ptl
);
1752 if (unlikely(!syn_found
))
1753 return aligned
.ptr
- source
->ptr
;
1755 /* Error injection: Modify data to simulate corruption. */
1756 ssh_ptl_rx_inject_invalid_data(ptl
, &aligned
);
1758 /* Parse and validate frame. */
1759 status
= sshp_parse_frame(&ptl
->serdev
->dev
, &aligned
, &frame
, &payload
,
1760 SSH_PTL_RX_BUF_LEN
);
1761 if (status
) /* Invalid frame: skip to next SYN. */
1762 return aligned
.ptr
- source
->ptr
+ sizeof(u16
);
1763 if (!frame
) /* Not enough data. */
1764 return aligned
.ptr
- source
->ptr
;
1766 trace_ssam_rx_frame_received(frame
);
1768 switch (frame
->type
) {
1769 case SSH_FRAME_TYPE_ACK
:
1770 ssh_ptl_acknowledge(ptl
, frame
->seq
);
1773 case SSH_FRAME_TYPE_NAK
:
1774 ssh_ptl_resubmit_pending(ptl
);
1777 case SSH_FRAME_TYPE_DATA_SEQ
:
1778 ssh_ptl_send_ack(ptl
, frame
->seq
);
1781 case SSH_FRAME_TYPE_DATA_NSQ
:
1782 ssh_ptl_rx_dataframe(ptl
, frame
, &payload
);
1786 ptl_warn(ptl
, "ptl: received frame with unknown type %#04x\n",
1791 return aligned
.ptr
- source
->ptr
+ SSH_MESSAGE_LENGTH(payload
.len
);
1794 static int ssh_ptl_rx_threadfn(void *data
)
1796 struct ssh_ptl
*ptl
= data
;
1799 struct ssam_span span
;
1803 wait_event_interruptible(ptl
->rx
.wq
,
1804 !kfifo_is_empty(&ptl
->rx
.fifo
) ||
1805 kthread_should_stop());
1806 if (kthread_should_stop())
1809 /* Copy from fifo to evaluation buffer. */
1810 n
= sshp_buf_read_from_fifo(&ptl
->rx
.buf
, &ptl
->rx
.fifo
);
1812 ptl_dbg(ptl
, "rx: received data (size: %zu)\n", n
);
1813 print_hex_dump_debug("rx: ", DUMP_PREFIX_OFFSET
, 16, 1,
1814 ptl
->rx
.buf
.ptr
+ ptl
->rx
.buf
.len
- n
,
1817 /* Parse until we need more bytes or buffer is empty. */
1818 while (offs
< ptl
->rx
.buf
.len
) {
1819 sshp_buf_span_from(&ptl
->rx
.buf
, offs
, &span
);
1820 n
= ssh_ptl_rx_eval(ptl
, &span
);
1822 break; /* Need more bytes. */
1827 /* Throw away the evaluated parts. */
1828 sshp_buf_drop(&ptl
->rx
.buf
, offs
);
1834 static void ssh_ptl_rx_wakeup(struct ssh_ptl
*ptl
)
1836 wake_up(&ptl
->rx
.wq
);
1840 * ssh_ptl_rx_start() - Start packet transport layer receiver thread.
1841 * @ptl: The packet transport layer.
1843 * Return: Returns zero on success, a negative error code on failure.
1845 int ssh_ptl_rx_start(struct ssh_ptl
*ptl
)
1850 ptl
->rx
.thread
= kthread_run(ssh_ptl_rx_threadfn
, ptl
,
1851 "ssam_serial_hub-rx");
1852 if (IS_ERR(ptl
->rx
.thread
))
1853 return PTR_ERR(ptl
->rx
.thread
);
1859 * ssh_ptl_rx_stop() - Stop packet transport layer receiver thread.
1860 * @ptl: The packet transport layer.
1862 * Return: Returns zero on success, a negative error code on failure.
1864 int ssh_ptl_rx_stop(struct ssh_ptl
*ptl
)
1868 if (ptl
->rx
.thread
) {
1869 status
= kthread_stop(ptl
->rx
.thread
);
1870 ptl
->rx
.thread
= NULL
;
1877 * ssh_ptl_rx_rcvbuf() - Push data from lower-layer transport to the packet
1879 * @ptl: The packet transport layer.
1880 * @buf: Pointer to the data to push to the layer.
1881 * @n: Size of the data to push to the layer, in bytes.
1883 * Pushes data from a lower-layer transport to the receiver fifo buffer of the
1884 * packet layer and notifies the receiver thread. Calls to this function are
1885 * ignored once the packet layer has been shut down.
1887 * Return: Returns the number of bytes transferred (positive or zero) on
1888 * success. Returns %-ESHUTDOWN if the packet layer has been shut down.
1890 ssize_t
ssh_ptl_rx_rcvbuf(struct ssh_ptl
*ptl
, const u8
*buf
, size_t n
)
1894 if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT
, &ptl
->state
))
1897 used
= kfifo_in(&ptl
->rx
.fifo
, buf
, n
);
1899 ssh_ptl_rx_wakeup(ptl
);
1905 * ssh_ptl_shutdown() - Shut down the packet transport layer.
1906 * @ptl: The packet transport layer.
1908 * Shuts down the packet transport layer, removing and canceling all queued
1909 * and pending packets. Packets canceled by this operation will be completed
1910 * with %-ESHUTDOWN as status. Receiver and transmitter threads will be
1913 * As a result of this function, the transport layer will be marked as shut
1914 * down. Submission of packets after the transport layer has been shut down
1915 * will fail with %-ESHUTDOWN.
1917 void ssh_ptl_shutdown(struct ssh_ptl
*ptl
)
1919 LIST_HEAD(complete_q
);
1920 LIST_HEAD(complete_p
);
1921 struct ssh_packet
*p
, *n
;
1924 /* Ensure that no new packets (including ACK/NAK) can be submitted. */
1925 set_bit(SSH_PTL_SF_SHUTDOWN_BIT
, &ptl
->state
);
1927 * Ensure that the layer gets marked as shut-down before actually
1928 * stopping it. In combination with the check in ssh_ptl_queue_push(),
1929 * this guarantees that no new packets can be added and all already
1930 * queued packets are properly canceled. In combination with the check
1931 * in ssh_ptl_rx_rcvbuf(), this guarantees that received data is
1934 smp_mb__after_atomic();
1936 status
= ssh_ptl_rx_stop(ptl
);
1938 ptl_err(ptl
, "ptl: failed to stop receiver thread\n");
1940 status
= ssh_ptl_tx_stop(ptl
);
1942 ptl_err(ptl
, "ptl: failed to stop transmitter thread\n");
1944 cancel_delayed_work_sync(&ptl
->rtx_timeout
.reaper
);
1947 * At this point, all threads have been stopped. This means that the
1948 * only references to packets from inside the system are in the queue
1951 * Note: We still need locks here because someone could still be
1952 * canceling packets.
1954 * Note 2: We can re-use queue_node (or pending_node) if we mark the
1955 * packet as locked an then remove it from the queue (or pending set
1956 * respectively). Marking the packet as locked avoids re-queuing
1957 * (which should already be prevented by having stopped the treads...)
1958 * and not setting QUEUED_BIT (or PENDING_BIT) prevents removal from a
1959 * new list via other threads (e.g. cancellation).
1961 * Note 3: There may be overlap between complete_p and complete_q.
1962 * This is handled via test_and_set_bit() on the "completed" flag
1963 * (also handles cancellation).
1966 /* Mark queued packets as locked and move them to complete_q. */
1967 spin_lock(&ptl
->queue
.lock
);
1968 list_for_each_entry_safe(p
, n
, &ptl
->queue
.head
, queue_node
) {
1969 set_bit(SSH_PACKET_SF_LOCKED_BIT
, &p
->state
);
1970 /* Ensure that state does not get zero. */
1971 smp_mb__before_atomic();
1972 clear_bit(SSH_PACKET_SF_QUEUED_BIT
, &p
->state
);
1974 list_move_tail(&p
->queue_node
, &complete_q
);
1976 spin_unlock(&ptl
->queue
.lock
);
1978 /* Mark pending packets as locked and move them to complete_p. */
1979 spin_lock(&ptl
->pending
.lock
);
1980 list_for_each_entry_safe(p
, n
, &ptl
->pending
.head
, pending_node
) {
1981 set_bit(SSH_PACKET_SF_LOCKED_BIT
, &p
->state
);
1982 /* Ensure that state does not get zero. */
1983 smp_mb__before_atomic();
1984 clear_bit(SSH_PACKET_SF_PENDING_BIT
, &p
->state
);
1986 list_move_tail(&p
->pending_node
, &complete_q
);
1988 atomic_set(&ptl
->pending
.count
, 0);
1989 spin_unlock(&ptl
->pending
.lock
);
1991 /* Complete and drop packets on complete_q. */
1992 list_for_each_entry(p
, &complete_q
, queue_node
) {
1993 if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT
, &p
->state
))
1994 __ssh_ptl_complete(p
, -ESHUTDOWN
);
1999 /* Complete and drop packets on complete_p. */
2000 list_for_each_entry(p
, &complete_p
, pending_node
) {
2001 if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT
, &p
->state
))
2002 __ssh_ptl_complete(p
, -ESHUTDOWN
);
2008 * At this point we have guaranteed that the system doesn't reference
2009 * any packets any more.
2014 * ssh_ptl_init() - Initialize packet transport layer.
2015 * @ptl: The packet transport layer to initialize.
2016 * @serdev: The underlying serial device, i.e. the lower-level transport.
2017 * @ops: Packet layer operations.
2019 * Initializes the given packet transport layer. Transmitter and receiver
2020 * threads must be started separately via ssh_ptl_tx_start() and
2021 * ssh_ptl_rx_start(), after the packet-layer has been initialized and the
2022 * lower-level transport layer has been set up.
2024 * Return: Returns zero on success and a nonzero error code on failure.
2026 int ssh_ptl_init(struct ssh_ptl
*ptl
, struct serdev_device
*serdev
,
2027 struct ssh_ptl_ops
*ops
)
2031 ptl
->serdev
= serdev
;
2034 spin_lock_init(&ptl
->queue
.lock
);
2035 INIT_LIST_HEAD(&ptl
->queue
.head
);
2037 spin_lock_init(&ptl
->pending
.lock
);
2038 INIT_LIST_HEAD(&ptl
->pending
.head
);
2039 atomic_set_release(&ptl
->pending
.count
, 0);
2041 ptl
->tx
.thread
= NULL
;
2042 atomic_set(&ptl
->tx
.running
, 0);
2043 init_completion(&ptl
->tx
.thread_cplt_pkt
);
2044 init_completion(&ptl
->tx
.thread_cplt_tx
);
2045 init_waitqueue_head(&ptl
->tx
.packet_wq
);
2047 ptl
->rx
.thread
= NULL
;
2048 init_waitqueue_head(&ptl
->rx
.wq
);
2050 spin_lock_init(&ptl
->rtx_timeout
.lock
);
2051 ptl
->rtx_timeout
.timeout
= SSH_PTL_PACKET_TIMEOUT
;
2052 ptl
->rtx_timeout
.expires
= KTIME_MAX
;
2053 INIT_DELAYED_WORK(&ptl
->rtx_timeout
.reaper
, ssh_ptl_timeout_reap
);
2057 /* Initialize list of recent/blocked SEQs with invalid sequence IDs. */
2058 for (i
= 0; i
< ARRAY_SIZE(ptl
->rx
.blocked
.seqs
); i
++)
2059 ptl
->rx
.blocked
.seqs
[i
] = U16_MAX
;
2060 ptl
->rx
.blocked
.offset
= 0;
2062 status
= kfifo_alloc(&ptl
->rx
.fifo
, SSH_PTL_RX_FIFO_LEN
, GFP_KERNEL
);
2066 status
= sshp_buf_alloc(&ptl
->rx
.buf
, SSH_PTL_RX_BUF_LEN
, GFP_KERNEL
);
2068 kfifo_free(&ptl
->rx
.fifo
);
2074 * ssh_ptl_destroy() - Deinitialize packet transport layer.
2075 * @ptl: The packet transport layer to deinitialize.
2077 * Deinitializes the given packet transport layer and frees resources
2078 * associated with it. If receiver and/or transmitter threads have been
2079 * started, the layer must first be shut down via ssh_ptl_shutdown() before
2080 * this function can be called.
2082 void ssh_ptl_destroy(struct ssh_ptl
*ptl
)
2084 kfifo_free(&ptl
->rx
.fifo
);
2085 sshp_buf_free(&ptl
->rx
.buf
);