2 * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #include <soc/tegra/ivc.h>
16 #define TEGRA_IVC_ALIGN 64
19 * IVC channel reset protocol.
21 * Each end uses its tx_channel.state to indicate its synchronization state.
23 enum tegra_ivc_state
{
25 * This value is zero for backwards compatibility with services that
26 * assume channels to be initially zeroed. Such channels are in an
27 * initially valid state, but cannot be asynchronously reset, and must
28 * maintain a valid state at all times.
30 * The transmitting end can enter the established state from the sync or
31 * ack state when it observes the receiving endpoint in the ack or
32 * established state, indicating that has cleared the counters in our
35 TEGRA_IVC_STATE_ESTABLISHED
= 0,
38 * If an endpoint is observed in the sync state, the remote endpoint is
39 * allowed to clear the counters it owns asynchronously with respect to
40 * the current endpoint. Therefore, the current endpoint is no longer
41 * allowed to communicate.
46 * When the transmitting end observes the receiving end in the sync
47 * state, it can clear the w_count and r_count and transition to the ack
48 * state. If the remote endpoint observes us in the ack state, it can
49 * return to the established state once it has cleared its counters.
55 * This structure is divided into two-cache aligned parts, the first is only
56 * written through the tx.channel pointer, while the second is only written
57 * through the rx.channel pointer. This delineates ownership of the cache
58 * lines, which is critical to performance and necessary in non-cache coherent
61 struct tegra_ivc_header
{
64 /* fields owned by the transmitting end */
69 u8 pad
[TEGRA_IVC_ALIGN
];
73 /* fields owned by the receiving end */
75 u8 pad
[TEGRA_IVC_ALIGN
];
79 static inline void tegra_ivc_invalidate(struct tegra_ivc
*ivc
, dma_addr_t phys
)
84 dma_sync_single_for_cpu(ivc
->peer
, phys
, TEGRA_IVC_ALIGN
,
88 static inline void tegra_ivc_flush(struct tegra_ivc
*ivc
, dma_addr_t phys
)
93 dma_sync_single_for_device(ivc
->peer
, phys
, TEGRA_IVC_ALIGN
,
97 static inline bool tegra_ivc_empty(struct tegra_ivc
*ivc
,
98 struct tegra_ivc_header
*header
)
101 * This function performs multiple checks on the same values with
102 * security implications, so create snapshots with ACCESS_ONCE() to
103 * ensure that these checks use the same values.
105 u32 tx
= ACCESS_ONCE(header
->tx
.count
);
106 u32 rx
= ACCESS_ONCE(header
->rx
.count
);
109 * Perform an over-full check to prevent denial of service attacks
110 * where a server could be easily fooled into believing that there's
111 * an extremely large number of frames ready, since receivers are not
112 * expected to check for full or over-full conditions.
114 * Although the channel isn't empty, this is an invalid case caused by
115 * a potentially malicious peer, so returning empty is safer, because
116 * it gives the impression that the channel has gone silent.
118 if (tx
- rx
> ivc
->num_frames
)
124 static inline bool tegra_ivc_full(struct tegra_ivc
*ivc
,
125 struct tegra_ivc_header
*header
)
127 u32 tx
= ACCESS_ONCE(header
->tx
.count
);
128 u32 rx
= ACCESS_ONCE(header
->rx
.count
);
131 * Invalid cases where the counters indicate that the queue is over
132 * capacity also appear full.
134 return tx
- rx
>= ivc
->num_frames
;
137 static inline u32
tegra_ivc_available(struct tegra_ivc
*ivc
,
138 struct tegra_ivc_header
*header
)
140 u32 tx
= ACCESS_ONCE(header
->tx
.count
);
141 u32 rx
= ACCESS_ONCE(header
->rx
.count
);
144 * This function isn't expected to be used in scenarios where an
145 * over-full situation can lead to denial of service attacks. See the
146 * comment in tegra_ivc_empty() for an explanation about special
147 * over-full considerations.
152 static inline void tegra_ivc_advance_tx(struct tegra_ivc
*ivc
)
154 ACCESS_ONCE(ivc
->tx
.channel
->tx
.count
) =
155 ACCESS_ONCE(ivc
->tx
.channel
->tx
.count
) + 1;
157 if (ivc
->tx
.position
== ivc
->num_frames
- 1)
158 ivc
->tx
.position
= 0;
163 static inline void tegra_ivc_advance_rx(struct tegra_ivc
*ivc
)
165 ACCESS_ONCE(ivc
->rx
.channel
->rx
.count
) =
166 ACCESS_ONCE(ivc
->rx
.channel
->rx
.count
) + 1;
168 if (ivc
->rx
.position
== ivc
->num_frames
- 1)
169 ivc
->rx
.position
= 0;
174 static inline int tegra_ivc_check_read(struct tegra_ivc
*ivc
)
176 unsigned int offset
= offsetof(struct tegra_ivc_header
, tx
.count
);
179 * tx.channel->state is set locally, so it is not synchronized with
180 * state from the remote peer. The remote peer cannot reset its
181 * transmit counters until we've acknowledged its synchronization
182 * request, so no additional synchronization is required because an
183 * asynchronous transition of rx.channel->state to
184 * TEGRA_IVC_STATE_ACK is not allowed.
186 if (ivc
->tx
.channel
->tx
.state
!= TEGRA_IVC_STATE_ESTABLISHED
)
190 * Avoid unnecessary invalidations when performing repeated accesses
191 * to an IVC channel by checking the old queue pointers first.
193 * Synchronization is only necessary when these pointers indicate
196 if (!tegra_ivc_empty(ivc
, ivc
->rx
.channel
))
199 tegra_ivc_invalidate(ivc
, ivc
->rx
.phys
+ offset
);
201 if (tegra_ivc_empty(ivc
, ivc
->rx
.channel
))
207 static inline int tegra_ivc_check_write(struct tegra_ivc
*ivc
)
209 unsigned int offset
= offsetof(struct tegra_ivc_header
, rx
.count
);
211 if (ivc
->tx
.channel
->tx
.state
!= TEGRA_IVC_STATE_ESTABLISHED
)
214 if (!tegra_ivc_full(ivc
, ivc
->tx
.channel
))
217 tegra_ivc_invalidate(ivc
, ivc
->tx
.phys
+ offset
);
219 if (tegra_ivc_full(ivc
, ivc
->tx
.channel
))
225 static void *tegra_ivc_frame_virt(struct tegra_ivc
*ivc
,
226 struct tegra_ivc_header
*header
,
229 if (WARN_ON(frame
>= ivc
->num_frames
))
230 return ERR_PTR(-EINVAL
);
232 return (void *)(header
+ 1) + ivc
->frame_size
* frame
;
235 static inline dma_addr_t
tegra_ivc_frame_phys(struct tegra_ivc
*ivc
,
239 unsigned long offset
;
241 offset
= sizeof(struct tegra_ivc_header
) + ivc
->frame_size
* frame
;
243 return phys
+ offset
;
246 static inline void tegra_ivc_invalidate_frame(struct tegra_ivc
*ivc
,
252 if (!ivc
->peer
|| WARN_ON(frame
>= ivc
->num_frames
))
255 phys
= tegra_ivc_frame_phys(ivc
, phys
, frame
) + offset
;
257 dma_sync_single_for_cpu(ivc
->peer
, phys
, size
, DMA_FROM_DEVICE
);
260 static inline void tegra_ivc_flush_frame(struct tegra_ivc
*ivc
,
266 if (!ivc
->peer
|| WARN_ON(frame
>= ivc
->num_frames
))
269 phys
= tegra_ivc_frame_phys(ivc
, phys
, frame
) + offset
;
271 dma_sync_single_for_device(ivc
->peer
, phys
, size
, DMA_TO_DEVICE
);
274 /* directly peek at the next frame rx'ed */
275 void *tegra_ivc_read_get_next_frame(struct tegra_ivc
*ivc
)
279 if (WARN_ON(ivc
== NULL
))
280 return ERR_PTR(-EINVAL
);
282 err
= tegra_ivc_check_read(ivc
);
287 * Order observation of ivc->rx.position potentially indicating new
288 * data before data read.
292 tegra_ivc_invalidate_frame(ivc
, ivc
->rx
.phys
, ivc
->rx
.position
, 0,
295 return tegra_ivc_frame_virt(ivc
, ivc
->rx
.channel
, ivc
->rx
.position
);
297 EXPORT_SYMBOL(tegra_ivc_read_get_next_frame
);
299 int tegra_ivc_read_advance(struct tegra_ivc
*ivc
)
301 unsigned int rx
= offsetof(struct tegra_ivc_header
, rx
.count
);
302 unsigned int tx
= offsetof(struct tegra_ivc_header
, tx
.count
);
306 * No read barriers or synchronization here: the caller is expected to
307 * have already observed the channel non-empty. This check is just to
308 * catch programming errors.
310 err
= tegra_ivc_check_read(ivc
);
314 tegra_ivc_advance_rx(ivc
);
316 tegra_ivc_flush(ivc
, ivc
->rx
.phys
+ rx
);
319 * Ensure our write to ivc->rx.position occurs before our read from
325 * Notify only upon transition from full to non-full. The available
326 * count can only asynchronously increase, so the worst possible
327 * side-effect will be a spurious notification.
329 tegra_ivc_invalidate(ivc
, ivc
->rx
.phys
+ tx
);
331 if (tegra_ivc_available(ivc
, ivc
->rx
.channel
) == ivc
->num_frames
- 1)
332 ivc
->notify(ivc
, ivc
->notify_data
);
336 EXPORT_SYMBOL(tegra_ivc_read_advance
);
338 /* directly poke at the next frame to be tx'ed */
339 void *tegra_ivc_write_get_next_frame(struct tegra_ivc
*ivc
)
343 err
= tegra_ivc_check_write(ivc
);
347 return tegra_ivc_frame_virt(ivc
, ivc
->tx
.channel
, ivc
->tx
.position
);
349 EXPORT_SYMBOL(tegra_ivc_write_get_next_frame
);
351 /* advance the tx buffer */
352 int tegra_ivc_write_advance(struct tegra_ivc
*ivc
)
354 unsigned int tx
= offsetof(struct tegra_ivc_header
, tx
.count
);
355 unsigned int rx
= offsetof(struct tegra_ivc_header
, rx
.count
);
358 err
= tegra_ivc_check_write(ivc
);
362 tegra_ivc_flush_frame(ivc
, ivc
->tx
.phys
, ivc
->tx
.position
, 0,
366 * Order any possible stores to the frame before update of
371 tegra_ivc_advance_tx(ivc
);
372 tegra_ivc_flush(ivc
, ivc
->tx
.phys
+ tx
);
375 * Ensure our write to ivc->tx.position occurs before our read from
381 * Notify only upon transition from empty to non-empty. The available
382 * count can only asynchronously decrease, so the worst possible
383 * side-effect will be a spurious notification.
385 tegra_ivc_invalidate(ivc
, ivc
->tx
.phys
+ rx
);
387 if (tegra_ivc_available(ivc
, ivc
->tx
.channel
) == 1)
388 ivc
->notify(ivc
, ivc
->notify_data
);
392 EXPORT_SYMBOL(tegra_ivc_write_advance
);
394 void tegra_ivc_reset(struct tegra_ivc
*ivc
)
396 unsigned int offset
= offsetof(struct tegra_ivc_header
, tx
.count
);
398 ivc
->tx
.channel
->tx
.state
= TEGRA_IVC_STATE_SYNC
;
399 tegra_ivc_flush(ivc
, ivc
->tx
.phys
+ offset
);
400 ivc
->notify(ivc
, ivc
->notify_data
);
402 EXPORT_SYMBOL(tegra_ivc_reset
);
405 * =======================================================
406 * IVC State Transition Table - see tegra_ivc_notified()
407 * =======================================================
409 * local remote action
410 * ----- ------ -----------------------------------
412 * SYNC ACK reset counters; move to EST; notify
413 * SYNC SYNC reset counters; move to ACK; notify
414 * ACK EST move to EST; notify
415 * ACK ACK move to EST; notify
416 * ACK SYNC reset counters; move to ACK; notify
419 * EST SYNC reset counters; move to ACK; notify
421 * ===============================================================
424 int tegra_ivc_notified(struct tegra_ivc
*ivc
)
426 unsigned int offset
= offsetof(struct tegra_ivc_header
, tx
.count
);
427 enum tegra_ivc_state state
;
429 /* Copy the receiver's state out of shared memory. */
430 tegra_ivc_invalidate(ivc
, ivc
->rx
.phys
+ offset
);
431 state
= ACCESS_ONCE(ivc
->rx
.channel
->tx
.state
);
433 if (state
== TEGRA_IVC_STATE_SYNC
) {
434 offset
= offsetof(struct tegra_ivc_header
, tx
.count
);
437 * Order observation of TEGRA_IVC_STATE_SYNC before stores
438 * clearing tx.channel.
443 * Reset tx.channel counters. The remote end is in the SYNC
444 * state and won't make progress until we change our state,
445 * so the counters are not in use at this time.
447 ivc
->tx
.channel
->tx
.count
= 0;
448 ivc
->rx
.channel
->rx
.count
= 0;
450 ivc
->tx
.position
= 0;
451 ivc
->rx
.position
= 0;
454 * Ensure that counters appear cleared before new state can be
460 * Move to ACK state. We have just cleared our counters, so it
461 * is now safe for the remote end to start using these values.
463 ivc
->tx
.channel
->tx
.state
= TEGRA_IVC_STATE_ACK
;
464 tegra_ivc_flush(ivc
, ivc
->tx
.phys
+ offset
);
467 * Notify remote end to observe state transition.
469 ivc
->notify(ivc
, ivc
->notify_data
);
471 } else if (ivc
->tx
.channel
->tx
.state
== TEGRA_IVC_STATE_SYNC
&&
472 state
== TEGRA_IVC_STATE_ACK
) {
473 offset
= offsetof(struct tegra_ivc_header
, tx
.count
);
476 * Order observation of ivc_state_sync before stores clearing
482 * Reset tx.channel counters. The remote end is in the ACK
483 * state and won't make progress until we change our state,
484 * so the counters are not in use at this time.
486 ivc
->tx
.channel
->tx
.count
= 0;
487 ivc
->rx
.channel
->rx
.count
= 0;
489 ivc
->tx
.position
= 0;
490 ivc
->rx
.position
= 0;
493 * Ensure that counters appear cleared before new state can be
499 * Move to ESTABLISHED state. We know that the remote end has
500 * already cleared its counters, so it is safe to start
501 * writing/reading on this channel.
503 ivc
->tx
.channel
->tx
.state
= TEGRA_IVC_STATE_ESTABLISHED
;
504 tegra_ivc_flush(ivc
, ivc
->tx
.phys
+ offset
);
507 * Notify remote end to observe state transition.
509 ivc
->notify(ivc
, ivc
->notify_data
);
511 } else if (ivc
->tx
.channel
->tx
.state
== TEGRA_IVC_STATE_ACK
) {
512 offset
= offsetof(struct tegra_ivc_header
, tx
.count
);
515 * At this point, we have observed the peer to be in either
516 * the ACK or ESTABLISHED state. Next, order observation of
517 * peer state before storing to tx.channel.
522 * Move to ESTABLISHED state. We know that we have previously
523 * cleared our counters, and we know that the remote end has
524 * cleared its counters, so it is safe to start writing/reading
527 ivc
->tx
.channel
->tx
.state
= TEGRA_IVC_STATE_ESTABLISHED
;
528 tegra_ivc_flush(ivc
, ivc
->tx
.phys
+ offset
);
531 * Notify remote end to observe state transition.
533 ivc
->notify(ivc
, ivc
->notify_data
);
537 * There is no need to handle any further action. Either the
538 * channel is already fully established, or we are waiting for
539 * the remote end to catch up with our current state. Refer
540 * to the diagram in "IVC State Transition Table" above.
544 if (ivc
->tx
.channel
->tx
.state
!= TEGRA_IVC_STATE_ESTABLISHED
)
549 EXPORT_SYMBOL(tegra_ivc_notified
);
551 size_t tegra_ivc_align(size_t size
)
553 return ALIGN(size
, TEGRA_IVC_ALIGN
);
555 EXPORT_SYMBOL(tegra_ivc_align
);
557 unsigned tegra_ivc_total_queue_size(unsigned queue_size
)
559 if (!IS_ALIGNED(queue_size
, TEGRA_IVC_ALIGN
)) {
560 pr_err("%s: queue_size (%u) must be %u-byte aligned\n",
561 __func__
, queue_size
, TEGRA_IVC_ALIGN
);
565 return queue_size
+ sizeof(struct tegra_ivc_header
);
567 EXPORT_SYMBOL(tegra_ivc_total_queue_size
);
569 static int tegra_ivc_check_params(unsigned long rx
, unsigned long tx
,
570 unsigned int num_frames
, size_t frame_size
)
572 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header
, tx
.count
),
574 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header
, rx
.count
),
576 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header
),
579 if ((uint64_t)num_frames
* (uint64_t)frame_size
>= 0x100000000UL
) {
580 pr_err("num_frames * frame_size overflows\n");
584 if (!IS_ALIGNED(frame_size
, TEGRA_IVC_ALIGN
)) {
585 pr_err("frame size not adequately aligned: %zu\n", frame_size
);
590 * The headers must at least be aligned enough for counters
591 * to be accessed atomically.
593 if (!IS_ALIGNED(rx
, TEGRA_IVC_ALIGN
)) {
594 pr_err("IVC channel start not aligned: %#lx\n", rx
);
598 if (!IS_ALIGNED(tx
, TEGRA_IVC_ALIGN
)) {
599 pr_err("IVC channel start not aligned: %#lx\n", tx
);
604 if (rx
+ frame_size
* num_frames
> tx
) {
605 pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
606 rx
, frame_size
* num_frames
, tx
);
610 if (tx
+ frame_size
* num_frames
> rx
) {
611 pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
612 tx
, frame_size
* num_frames
, rx
);
620 int tegra_ivc_init(struct tegra_ivc
*ivc
, struct device
*peer
, void *rx
,
621 dma_addr_t rx_phys
, void *tx
, dma_addr_t tx_phys
,
622 unsigned int num_frames
, size_t frame_size
,
623 void (*notify
)(struct tegra_ivc
*ivc
, void *data
),
629 if (WARN_ON(!ivc
|| !notify
))
633 * All sizes that can be returned by communication functions should
636 if (frame_size
> INT_MAX
)
639 err
= tegra_ivc_check_params((unsigned long)rx
, (unsigned long)tx
,
640 num_frames
, frame_size
);
644 queue_size
= tegra_ivc_total_queue_size(num_frames
* frame_size
);
647 ivc
->rx
.phys
= dma_map_single(peer
, rx
, queue_size
,
649 if (ivc
->rx
.phys
== DMA_ERROR_CODE
)
652 ivc
->tx
.phys
= dma_map_single(peer
, tx
, queue_size
,
654 if (ivc
->tx
.phys
== DMA_ERROR_CODE
) {
655 dma_unmap_single(peer
, ivc
->rx
.phys
, queue_size
,
660 ivc
->rx
.phys
= rx_phys
;
661 ivc
->tx
.phys
= tx_phys
;
664 ivc
->rx
.channel
= rx
;
665 ivc
->tx
.channel
= tx
;
667 ivc
->notify
= notify
;
668 ivc
->notify_data
= data
;
669 ivc
->frame_size
= frame_size
;
670 ivc
->num_frames
= num_frames
;
673 * These values aren't necessarily correct until the channel has been
676 ivc
->tx
.position
= 0;
677 ivc
->rx
.position
= 0;
681 EXPORT_SYMBOL(tegra_ivc_init
);
683 void tegra_ivc_cleanup(struct tegra_ivc
*ivc
)
686 size_t size
= tegra_ivc_total_queue_size(ivc
->num_frames
*
689 dma_unmap_single(ivc
->peer
, ivc
->rx
.phys
, size
,
691 dma_unmap_single(ivc
->peer
, ivc
->tx
.phys
, size
,
695 EXPORT_SYMBOL(tegra_ivc_cleanup
);