1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
6 #include <soc/tegra/ivc.h>
8 #define TEGRA_IVC_ALIGN 64
11 * IVC channel reset protocol.
13 * Each end uses its tx_channel.state to indicate its synchronization state.
15 enum tegra_ivc_state
{
17 * This value is zero for backwards compatibility with services that
18 * assume channels to be initially zeroed. Such channels are in an
19 * initially valid state, but cannot be asynchronously reset, and must
20 * maintain a valid state at all times.
22 * The transmitting end can enter the established state from the sync or
23 * ack state when it observes the receiving endpoint in the ack or
24 * established state, indicating that has cleared the counters in our
27 TEGRA_IVC_STATE_ESTABLISHED
= 0,
30 * If an endpoint is observed in the sync state, the remote endpoint is
31 * allowed to clear the counters it owns asynchronously with respect to
32 * the current endpoint. Therefore, the current endpoint is no longer
33 * allowed to communicate.
38 * When the transmitting end observes the receiving end in the sync
39 * state, it can clear the w_count and r_count and transition to the ack
40 * state. If the remote endpoint observes us in the ack state, it can
41 * return to the established state once it has cleared its counters.
47 * This structure is divided into two-cache aligned parts, the first is only
48 * written through the tx.channel pointer, while the second is only written
49 * through the rx.channel pointer. This delineates ownership of the cache
50 * lines, which is critical to performance and necessary in non-cache coherent
53 struct tegra_ivc_header
{
56 /* fields owned by the transmitting end */
61 u8 pad
[TEGRA_IVC_ALIGN
];
65 /* fields owned by the receiving end */
67 u8 pad
[TEGRA_IVC_ALIGN
];
71 static inline void tegra_ivc_invalidate(struct tegra_ivc
*ivc
, dma_addr_t phys
)
76 dma_sync_single_for_cpu(ivc
->peer
, phys
, TEGRA_IVC_ALIGN
,
80 static inline void tegra_ivc_flush(struct tegra_ivc
*ivc
, dma_addr_t phys
)
85 dma_sync_single_for_device(ivc
->peer
, phys
, TEGRA_IVC_ALIGN
,
89 static inline bool tegra_ivc_empty(struct tegra_ivc
*ivc
,
90 struct tegra_ivc_header
*header
)
93 * This function performs multiple checks on the same values with
94 * security implications, so create snapshots with READ_ONCE() to
95 * ensure that these checks use the same values.
97 u32 tx
= READ_ONCE(header
->tx
.count
);
98 u32 rx
= READ_ONCE(header
->rx
.count
);
101 * Perform an over-full check to prevent denial of service attacks
102 * where a server could be easily fooled into believing that there's
103 * an extremely large number of frames ready, since receivers are not
104 * expected to check for full or over-full conditions.
106 * Although the channel isn't empty, this is an invalid case caused by
107 * a potentially malicious peer, so returning empty is safer, because
108 * it gives the impression that the channel has gone silent.
110 if (tx
- rx
> ivc
->num_frames
)
116 static inline bool tegra_ivc_full(struct tegra_ivc
*ivc
,
117 struct tegra_ivc_header
*header
)
119 u32 tx
= READ_ONCE(header
->tx
.count
);
120 u32 rx
= READ_ONCE(header
->rx
.count
);
123 * Invalid cases where the counters indicate that the queue is over
124 * capacity also appear full.
126 return tx
- rx
>= ivc
->num_frames
;
129 static inline u32
tegra_ivc_available(struct tegra_ivc
*ivc
,
130 struct tegra_ivc_header
*header
)
132 u32 tx
= READ_ONCE(header
->tx
.count
);
133 u32 rx
= READ_ONCE(header
->rx
.count
);
136 * This function isn't expected to be used in scenarios where an
137 * over-full situation can lead to denial of service attacks. See the
138 * comment in tegra_ivc_empty() for an explanation about special
139 * over-full considerations.
144 static inline void tegra_ivc_advance_tx(struct tegra_ivc
*ivc
)
146 WRITE_ONCE(ivc
->tx
.channel
->tx
.count
,
147 READ_ONCE(ivc
->tx
.channel
->tx
.count
) + 1);
149 if (ivc
->tx
.position
== ivc
->num_frames
- 1)
150 ivc
->tx
.position
= 0;
155 static inline void tegra_ivc_advance_rx(struct tegra_ivc
*ivc
)
157 WRITE_ONCE(ivc
->rx
.channel
->rx
.count
,
158 READ_ONCE(ivc
->rx
.channel
->rx
.count
) + 1);
160 if (ivc
->rx
.position
== ivc
->num_frames
- 1)
161 ivc
->rx
.position
= 0;
166 static inline int tegra_ivc_check_read(struct tegra_ivc
*ivc
)
168 unsigned int offset
= offsetof(struct tegra_ivc_header
, tx
.count
);
171 * tx.channel->state is set locally, so it is not synchronized with
172 * state from the remote peer. The remote peer cannot reset its
173 * transmit counters until we've acknowledged its synchronization
174 * request, so no additional synchronization is required because an
175 * asynchronous transition of rx.channel->state to
176 * TEGRA_IVC_STATE_ACK is not allowed.
178 if (ivc
->tx
.channel
->tx
.state
!= TEGRA_IVC_STATE_ESTABLISHED
)
182 * Avoid unnecessary invalidations when performing repeated accesses
183 * to an IVC channel by checking the old queue pointers first.
185 * Synchronization is only necessary when these pointers indicate
188 if (!tegra_ivc_empty(ivc
, ivc
->rx
.channel
))
191 tegra_ivc_invalidate(ivc
, ivc
->rx
.phys
+ offset
);
193 if (tegra_ivc_empty(ivc
, ivc
->rx
.channel
))
199 static inline int tegra_ivc_check_write(struct tegra_ivc
*ivc
)
201 unsigned int offset
= offsetof(struct tegra_ivc_header
, rx
.count
);
203 if (ivc
->tx
.channel
->tx
.state
!= TEGRA_IVC_STATE_ESTABLISHED
)
206 if (!tegra_ivc_full(ivc
, ivc
->tx
.channel
))
209 tegra_ivc_invalidate(ivc
, ivc
->tx
.phys
+ offset
);
211 if (tegra_ivc_full(ivc
, ivc
->tx
.channel
))
217 static void *tegra_ivc_frame_virt(struct tegra_ivc
*ivc
,
218 struct tegra_ivc_header
*header
,
221 if (WARN_ON(frame
>= ivc
->num_frames
))
222 return ERR_PTR(-EINVAL
);
224 return (void *)(header
+ 1) + ivc
->frame_size
* frame
;
227 static inline dma_addr_t
tegra_ivc_frame_phys(struct tegra_ivc
*ivc
,
231 unsigned long offset
;
233 offset
= sizeof(struct tegra_ivc_header
) + ivc
->frame_size
* frame
;
235 return phys
+ offset
;
238 static inline void tegra_ivc_invalidate_frame(struct tegra_ivc
*ivc
,
244 if (!ivc
->peer
|| WARN_ON(frame
>= ivc
->num_frames
))
247 phys
= tegra_ivc_frame_phys(ivc
, phys
, frame
) + offset
;
249 dma_sync_single_for_cpu(ivc
->peer
, phys
, size
, DMA_FROM_DEVICE
);
252 static inline void tegra_ivc_flush_frame(struct tegra_ivc
*ivc
,
258 if (!ivc
->peer
|| WARN_ON(frame
>= ivc
->num_frames
))
261 phys
= tegra_ivc_frame_phys(ivc
, phys
, frame
) + offset
;
263 dma_sync_single_for_device(ivc
->peer
, phys
, size
, DMA_TO_DEVICE
);
266 /* directly peek at the next frame rx'ed */
267 void *tegra_ivc_read_get_next_frame(struct tegra_ivc
*ivc
)
271 if (WARN_ON(ivc
== NULL
))
272 return ERR_PTR(-EINVAL
);
274 err
= tegra_ivc_check_read(ivc
);
279 * Order observation of ivc->rx.position potentially indicating new
280 * data before data read.
284 tegra_ivc_invalidate_frame(ivc
, ivc
->rx
.phys
, ivc
->rx
.position
, 0,
287 return tegra_ivc_frame_virt(ivc
, ivc
->rx
.channel
, ivc
->rx
.position
);
289 EXPORT_SYMBOL(tegra_ivc_read_get_next_frame
);
291 int tegra_ivc_read_advance(struct tegra_ivc
*ivc
)
293 unsigned int rx
= offsetof(struct tegra_ivc_header
, rx
.count
);
294 unsigned int tx
= offsetof(struct tegra_ivc_header
, tx
.count
);
298 * No read barriers or synchronization here: the caller is expected to
299 * have already observed the channel non-empty. This check is just to
300 * catch programming errors.
302 err
= tegra_ivc_check_read(ivc
);
306 tegra_ivc_advance_rx(ivc
);
308 tegra_ivc_flush(ivc
, ivc
->rx
.phys
+ rx
);
311 * Ensure our write to ivc->rx.position occurs before our read from
317 * Notify only upon transition from full to non-full. The available
318 * count can only asynchronously increase, so the worst possible
319 * side-effect will be a spurious notification.
321 tegra_ivc_invalidate(ivc
, ivc
->rx
.phys
+ tx
);
323 if (tegra_ivc_available(ivc
, ivc
->rx
.channel
) == ivc
->num_frames
- 1)
324 ivc
->notify(ivc
, ivc
->notify_data
);
328 EXPORT_SYMBOL(tegra_ivc_read_advance
);
330 /* directly poke at the next frame to be tx'ed */
331 void *tegra_ivc_write_get_next_frame(struct tegra_ivc
*ivc
)
335 err
= tegra_ivc_check_write(ivc
);
339 return tegra_ivc_frame_virt(ivc
, ivc
->tx
.channel
, ivc
->tx
.position
);
341 EXPORT_SYMBOL(tegra_ivc_write_get_next_frame
);
343 /* advance the tx buffer */
344 int tegra_ivc_write_advance(struct tegra_ivc
*ivc
)
346 unsigned int tx
= offsetof(struct tegra_ivc_header
, tx
.count
);
347 unsigned int rx
= offsetof(struct tegra_ivc_header
, rx
.count
);
350 err
= tegra_ivc_check_write(ivc
);
354 tegra_ivc_flush_frame(ivc
, ivc
->tx
.phys
, ivc
->tx
.position
, 0,
358 * Order any possible stores to the frame before update of
363 tegra_ivc_advance_tx(ivc
);
364 tegra_ivc_flush(ivc
, ivc
->tx
.phys
+ tx
);
367 * Ensure our write to ivc->tx.position occurs before our read from
373 * Notify only upon transition from empty to non-empty. The available
374 * count can only asynchronously decrease, so the worst possible
375 * side-effect will be a spurious notification.
377 tegra_ivc_invalidate(ivc
, ivc
->tx
.phys
+ rx
);
379 if (tegra_ivc_available(ivc
, ivc
->tx
.channel
) == 1)
380 ivc
->notify(ivc
, ivc
->notify_data
);
384 EXPORT_SYMBOL(tegra_ivc_write_advance
);
386 void tegra_ivc_reset(struct tegra_ivc
*ivc
)
388 unsigned int offset
= offsetof(struct tegra_ivc_header
, tx
.count
);
390 ivc
->tx
.channel
->tx
.state
= TEGRA_IVC_STATE_SYNC
;
391 tegra_ivc_flush(ivc
, ivc
->tx
.phys
+ offset
);
392 ivc
->notify(ivc
, ivc
->notify_data
);
394 EXPORT_SYMBOL(tegra_ivc_reset
);
397 * =======================================================
398 * IVC State Transition Table - see tegra_ivc_notified()
399 * =======================================================
401 * local remote action
402 * ----- ------ -----------------------------------
404 * SYNC ACK reset counters; move to EST; notify
405 * SYNC SYNC reset counters; move to ACK; notify
406 * ACK EST move to EST; notify
407 * ACK ACK move to EST; notify
408 * ACK SYNC reset counters; move to ACK; notify
411 * EST SYNC reset counters; move to ACK; notify
413 * ===============================================================
416 int tegra_ivc_notified(struct tegra_ivc
*ivc
)
418 unsigned int offset
= offsetof(struct tegra_ivc_header
, tx
.count
);
419 enum tegra_ivc_state state
;
421 /* Copy the receiver's state out of shared memory. */
422 tegra_ivc_invalidate(ivc
, ivc
->rx
.phys
+ offset
);
423 state
= READ_ONCE(ivc
->rx
.channel
->tx
.state
);
425 if (state
== TEGRA_IVC_STATE_SYNC
) {
426 offset
= offsetof(struct tegra_ivc_header
, tx
.count
);
429 * Order observation of TEGRA_IVC_STATE_SYNC before stores
430 * clearing tx.channel.
435 * Reset tx.channel counters. The remote end is in the SYNC
436 * state and won't make progress until we change our state,
437 * so the counters are not in use at this time.
439 ivc
->tx
.channel
->tx
.count
= 0;
440 ivc
->rx
.channel
->rx
.count
= 0;
442 ivc
->tx
.position
= 0;
443 ivc
->rx
.position
= 0;
446 * Ensure that counters appear cleared before new state can be
452 * Move to ACK state. We have just cleared our counters, so it
453 * is now safe for the remote end to start using these values.
455 ivc
->tx
.channel
->tx
.state
= TEGRA_IVC_STATE_ACK
;
456 tegra_ivc_flush(ivc
, ivc
->tx
.phys
+ offset
);
459 * Notify remote end to observe state transition.
461 ivc
->notify(ivc
, ivc
->notify_data
);
463 } else if (ivc
->tx
.channel
->tx
.state
== TEGRA_IVC_STATE_SYNC
&&
464 state
== TEGRA_IVC_STATE_ACK
) {
465 offset
= offsetof(struct tegra_ivc_header
, tx
.count
);
468 * Order observation of ivc_state_sync before stores clearing
474 * Reset tx.channel counters. The remote end is in the ACK
475 * state and won't make progress until we change our state,
476 * so the counters are not in use at this time.
478 ivc
->tx
.channel
->tx
.count
= 0;
479 ivc
->rx
.channel
->rx
.count
= 0;
481 ivc
->tx
.position
= 0;
482 ivc
->rx
.position
= 0;
485 * Ensure that counters appear cleared before new state can be
491 * Move to ESTABLISHED state. We know that the remote end has
492 * already cleared its counters, so it is safe to start
493 * writing/reading on this channel.
495 ivc
->tx
.channel
->tx
.state
= TEGRA_IVC_STATE_ESTABLISHED
;
496 tegra_ivc_flush(ivc
, ivc
->tx
.phys
+ offset
);
499 * Notify remote end to observe state transition.
501 ivc
->notify(ivc
, ivc
->notify_data
);
503 } else if (ivc
->tx
.channel
->tx
.state
== TEGRA_IVC_STATE_ACK
) {
504 offset
= offsetof(struct tegra_ivc_header
, tx
.count
);
507 * At this point, we have observed the peer to be in either
508 * the ACK or ESTABLISHED state. Next, order observation of
509 * peer state before storing to tx.channel.
514 * Move to ESTABLISHED state. We know that we have previously
515 * cleared our counters, and we know that the remote end has
516 * cleared its counters, so it is safe to start writing/reading
519 ivc
->tx
.channel
->tx
.state
= TEGRA_IVC_STATE_ESTABLISHED
;
520 tegra_ivc_flush(ivc
, ivc
->tx
.phys
+ offset
);
523 * Notify remote end to observe state transition.
525 ivc
->notify(ivc
, ivc
->notify_data
);
529 * There is no need to handle any further action. Either the
530 * channel is already fully established, or we are waiting for
531 * the remote end to catch up with our current state. Refer
532 * to the diagram in "IVC State Transition Table" above.
536 if (ivc
->tx
.channel
->tx
.state
!= TEGRA_IVC_STATE_ESTABLISHED
)
541 EXPORT_SYMBOL(tegra_ivc_notified
);
543 size_t tegra_ivc_align(size_t size
)
545 return ALIGN(size
, TEGRA_IVC_ALIGN
);
547 EXPORT_SYMBOL(tegra_ivc_align
);
549 unsigned tegra_ivc_total_queue_size(unsigned queue_size
)
551 if (!IS_ALIGNED(queue_size
, TEGRA_IVC_ALIGN
)) {
552 pr_err("%s: queue_size (%u) must be %u-byte aligned\n",
553 __func__
, queue_size
, TEGRA_IVC_ALIGN
);
557 return queue_size
+ sizeof(struct tegra_ivc_header
);
559 EXPORT_SYMBOL(tegra_ivc_total_queue_size
);
561 static int tegra_ivc_check_params(unsigned long rx
, unsigned long tx
,
562 unsigned int num_frames
, size_t frame_size
)
564 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header
, tx
.count
),
566 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header
, rx
.count
),
568 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header
),
571 if ((uint64_t)num_frames
* (uint64_t)frame_size
>= 0x100000000UL
) {
572 pr_err("num_frames * frame_size overflows\n");
576 if (!IS_ALIGNED(frame_size
, TEGRA_IVC_ALIGN
)) {
577 pr_err("frame size not adequately aligned: %zu\n", frame_size
);
582 * The headers must at least be aligned enough for counters
583 * to be accessed atomically.
585 if (!IS_ALIGNED(rx
, TEGRA_IVC_ALIGN
)) {
586 pr_err("IVC channel start not aligned: %#lx\n", rx
);
590 if (!IS_ALIGNED(tx
, TEGRA_IVC_ALIGN
)) {
591 pr_err("IVC channel start not aligned: %#lx\n", tx
);
596 if (rx
+ frame_size
* num_frames
> tx
) {
597 pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
598 rx
, frame_size
* num_frames
, tx
);
602 if (tx
+ frame_size
* num_frames
> rx
) {
603 pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
604 tx
, frame_size
* num_frames
, rx
);
612 int tegra_ivc_init(struct tegra_ivc
*ivc
, struct device
*peer
, void *rx
,
613 dma_addr_t rx_phys
, void *tx
, dma_addr_t tx_phys
,
614 unsigned int num_frames
, size_t frame_size
,
615 void (*notify
)(struct tegra_ivc
*ivc
, void *data
),
621 if (WARN_ON(!ivc
|| !notify
))
625 * All sizes that can be returned by communication functions should
628 if (frame_size
> INT_MAX
)
631 err
= tegra_ivc_check_params((unsigned long)rx
, (unsigned long)tx
,
632 num_frames
, frame_size
);
636 queue_size
= tegra_ivc_total_queue_size(num_frames
* frame_size
);
639 ivc
->rx
.phys
= dma_map_single(peer
, rx
, queue_size
,
641 if (dma_mapping_error(peer
, ivc
->rx
.phys
))
644 ivc
->tx
.phys
= dma_map_single(peer
, tx
, queue_size
,
646 if (dma_mapping_error(peer
, ivc
->tx
.phys
)) {
647 dma_unmap_single(peer
, ivc
->rx
.phys
, queue_size
,
652 ivc
->rx
.phys
= rx_phys
;
653 ivc
->tx
.phys
= tx_phys
;
656 ivc
->rx
.channel
= rx
;
657 ivc
->tx
.channel
= tx
;
659 ivc
->notify
= notify
;
660 ivc
->notify_data
= data
;
661 ivc
->frame_size
= frame_size
;
662 ivc
->num_frames
= num_frames
;
665 * These values aren't necessarily correct until the channel has been
668 ivc
->tx
.position
= 0;
669 ivc
->rx
.position
= 0;
673 EXPORT_SYMBOL(tegra_ivc_init
);
675 void tegra_ivc_cleanup(struct tegra_ivc
*ivc
)
678 size_t size
= tegra_ivc_total_queue_size(ivc
->num_frames
*
681 dma_unmap_single(ivc
->peer
, ivc
->rx
.phys
, size
,
683 dma_unmap_single(ivc
->peer
, ivc
->tx
.phys
, size
,
687 EXPORT_SYMBOL(tegra_ivc_cleanup
);