2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
16 #include <linux/atomic.h>
17 #include <linux/module.h>
18 #include <gxio/dma_queue.h>
20 /* Wait for a memory read to complete. */
21 #define wait_for_value(val) \
22 __asm__ __volatile__("move %0, %0" :: "r"(val))
24 /* The index is in the low 16. */
25 #define DMA_QUEUE_INDEX_MASK ((1 << 16) - 1)
28 * The hardware descriptor-ring type.
29 * This matches the types used by mpipe (MPIPE_EDMA_POST_REGION_VAL_t)
30 * and trio (TRIO_PUSH_DMA_REGION_VAL_t or TRIO_PULL_DMA_REGION_VAL_t).
31 * See those types for more documentation on the individual fields.
35 #ifndef __BIG_ENDIAN__
39 uint64_t __reserved
:31;
41 uint64_t __reserved
:31;
50 void __gxio_dma_queue_init(__gxio_dma_queue_t
*dma_queue
,
51 void *post_region_addr
, unsigned int num_entries
)
54 * Limit 65536 entry rings to 65535 credits because we only have a
55 * 16 bit completion counter.
57 int64_t credits
= (num_entries
< 65536) ? num_entries
: 65535;
59 memset(dma_queue
, 0, sizeof(*dma_queue
));
61 dma_queue
->post_region_addr
= post_region_addr
;
62 dma_queue
->hw_complete_count
= 0;
63 dma_queue
->credits_and_next_index
= credits
<< DMA_QUEUE_CREDIT_SHIFT
;
66 EXPORT_SYMBOL_GPL(__gxio_dma_queue_init
);
68 void __gxio_dma_queue_update_credits(__gxio_dma_queue_t
*dma_queue
)
76 * Read the 64-bit completion count without touching the cache, so
77 * we later avoid having to evict any sharers of this cache line
78 * when we update it below.
80 uint64_t orig_hw_complete_count
=
81 cmpxchg(&dma_queue
->hw_complete_count
,
84 /* Make sure the load completes before we access the hardware. */
85 wait_for_value(orig_hw_complete_count
);
87 /* Read the 16-bit count of how many packets it has completed. */
88 val
.word
= __gxio_mmio_read(dma_queue
->post_region_addr
);
92 * Calculate the number of completions since we last updated the
93 * 64-bit counter. It's safe to ignore the high bits because the
94 * maximum credit value is 65535.
96 delta
= (count
- orig_hw_complete_count
) & 0xffff;
101 * Try to write back the count, advanced by delta. If we race with
102 * another thread, this might fail, in which case we return
103 * immediately on the assumption that some credits are (or at least
106 new_count
= orig_hw_complete_count
+ delta
;
107 if (cmpxchg(&dma_queue
->hw_complete_count
,
108 orig_hw_complete_count
,
109 new_count
) != orig_hw_complete_count
)
113 * We succeeded in advancing the completion count; add back the
114 * corresponding number of egress credits.
116 __insn_fetchadd(&dma_queue
->credits_and_next_index
,
117 (delta
<< DMA_QUEUE_CREDIT_SHIFT
));
120 EXPORT_SYMBOL_GPL(__gxio_dma_queue_update_credits
);
123 * A separate 'blocked' method for put() so that backtraces and
124 * profiles will clearly indicate that we're wasting time spinning on
125 * egress availability rather than actually posting commands.
127 int64_t __gxio_dma_queue_wait_for_credits(__gxio_dma_queue_t
*dma_queue
,
135 /* Back off to avoid spamming memory networks. */
136 for (i
= backoff
; i
> 0; i
--)
137 __insn_mfspr(SPR_PASS
);
139 /* Check credits again. */
140 __gxio_dma_queue_update_credits(dma_queue
);
141 old
= __insn_fetchaddgez(&dma_queue
->credits_and_next_index
,
144 /* Calculate bounded exponential backoff for next iteration. */
147 } while (old
+ modifier
< 0);
152 EXPORT_SYMBOL_GPL(__gxio_dma_queue_wait_for_credits
);
154 int64_t __gxio_dma_queue_reserve_aux(__gxio_dma_queue_t
*dma_queue
,
155 unsigned int num
, int wait
)
157 return __gxio_dma_queue_reserve(dma_queue
, num
, wait
!= 0, true);
160 EXPORT_SYMBOL_GPL(__gxio_dma_queue_reserve_aux
);
162 int __gxio_dma_queue_is_complete(__gxio_dma_queue_t
*dma_queue
,
163 int64_t completion_slot
, int update
)
166 if (ACCESS_ONCE(dma_queue
->hw_complete_count
) >
170 __gxio_dma_queue_update_credits(dma_queue
);
173 return ACCESS_ONCE(dma_queue
->hw_complete_count
) > completion_slot
;
176 EXPORT_SYMBOL_GPL(__gxio_dma_queue_is_complete
);