4 * Derived from ivtv-queue.c
6 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
7 * Copyright (C) 2008 Andy Walls <awalls@radix.net>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
25 #include "cx18-driver.h"
26 #include "cx18-queue.h"
27 #include "cx18-streams.h"
31 void cx18_buf_swap(struct cx18_buffer
*buf
)
35 for (i
= 0; i
< buf
->bytesused
; i
+= 4)
36 swab32s((u32
*)(buf
->buf
+ i
));
39 void _cx18_mdl_swap(struct cx18_mdl
*mdl
)
41 struct cx18_buffer
*buf
;
43 list_for_each_entry(buf
, &mdl
->buf_list
, list
) {
44 if (buf
->bytesused
== 0)
50 void cx18_queue_init(struct cx18_queue
*q
)
52 INIT_LIST_HEAD(&q
->list
);
53 atomic_set(&q
->depth
, 0);
57 struct cx18_queue
*_cx18_enqueue(struct cx18_stream
*s
, struct cx18_mdl
*mdl
,
58 struct cx18_queue
*q
, int to_front
)
60 /* clear the mdl if it is not to be enqueued to the full queue */
61 if (q
!= &s
->q_full
) {
69 /* q_busy is restricted to a max buffer count imposed by firmware */
70 if (q
== &s
->q_busy
&&
71 atomic_read(&q
->depth
) >= CX18_MAX_FW_MDLS_PER_STREAM
)
77 list_add(&mdl
->list
, &q
->list
); /* LIFO */
79 list_add_tail(&mdl
->list
, &q
->list
); /* FIFO */
80 q
->bytesused
+= mdl
->bytesused
- mdl
->readpos
;
81 atomic_inc(&q
->depth
);
83 spin_unlock(&q
->lock
);
87 struct cx18_mdl
*cx18_dequeue(struct cx18_stream
*s
, struct cx18_queue
*q
)
89 struct cx18_mdl
*mdl
= NULL
;
92 if (!list_empty(&q
->list
)) {
93 mdl
= list_first_entry(&q
->list
, struct cx18_mdl
, list
);
94 list_del_init(&mdl
->list
);
95 q
->bytesused
-= mdl
->bytesused
- mdl
->readpos
;
97 atomic_dec(&q
->depth
);
99 spin_unlock(&q
->lock
);
103 static void _cx18_mdl_update_bufs_for_cpu(struct cx18_stream
*s
,
104 struct cx18_mdl
*mdl
)
106 struct cx18_buffer
*buf
;
107 u32 buf_size
= s
->buf_size
;
108 u32 bytesused
= mdl
->bytesused
;
110 list_for_each_entry(buf
, &mdl
->buf_list
, list
) {
112 if (bytesused
>= buf_size
) {
113 buf
->bytesused
= buf_size
;
114 bytesused
-= buf_size
;
116 buf
->bytesused
= bytesused
;
119 cx18_buf_sync_for_cpu(s
, buf
);
123 static inline void cx18_mdl_update_bufs_for_cpu(struct cx18_stream
*s
,
124 struct cx18_mdl
*mdl
)
126 struct cx18_buffer
*buf
;
128 if (list_is_singular(&mdl
->buf_list
)) {
129 buf
= list_first_entry(&mdl
->buf_list
, struct cx18_buffer
,
131 buf
->bytesused
= mdl
->bytesused
;
133 cx18_buf_sync_for_cpu(s
, buf
);
135 _cx18_mdl_update_bufs_for_cpu(s
, mdl
);
139 struct cx18_mdl
*cx18_queue_get_mdl(struct cx18_stream
*s
, u32 id
,
142 struct cx18
*cx
= s
->cx
;
143 struct cx18_mdl
*mdl
;
144 struct cx18_mdl
*tmp
;
145 struct cx18_mdl
*ret
= NULL
;
149 * We don't have to acquire multiple q locks here, because we are
150 * serialized by the single threaded work handler.
151 * MDLs from the firmware will thus remain in order as
152 * they are moved from q_busy to q_full or to the dvb ring buffer.
154 spin_lock(&s
->q_busy
.lock
);
155 list_for_each_entry_safe(mdl
, tmp
, &s
->q_busy
.list
, list
) {
157 * We should find what the firmware told us is done,
158 * right at the front of the queue. If we don't, we likely have
159 * missed an mdl done message from the firmware.
160 * Once we skip an mdl repeatedly, relative to the size of
161 * q_busy, we have high confidence we've missed it.
165 if (mdl
->skipped
>= atomic_read(&s
->q_busy
.depth
)-1) {
166 /* mdl must have fallen out of rotation */
167 CX18_WARN("Skipped %s, MDL %d, %d "
168 "times - it must have dropped out of "
169 "rotation\n", s
->name
, mdl
->id
,
171 /* Sweep it up to put it back into rotation */
172 list_move_tail(&mdl
->list
, &sweep_up
);
173 atomic_dec(&s
->q_busy
.depth
);
178 * We pull the desired mdl off of the queue here. Something
179 * will have to put it back on a queue later.
181 list_del_init(&mdl
->list
);
182 atomic_dec(&s
->q_busy
.depth
);
186 spin_unlock(&s
->q_busy
.lock
);
189 * We found the mdl for which we were looking. Get it ready for
190 * the caller to put on q_full or in the dvb ring buffer.
193 ret
->bytesused
= bytesused
;
195 /* 0'ed readpos, m_flags & curr_buf when mdl went on q_busy */
196 cx18_mdl_update_bufs_for_cpu(s
, ret
);
197 if (s
->type
!= CX18_ENC_STREAM_TYPE_TS
)
198 set_bit(CX18_F_M_NEED_SWAP
, &ret
->m_flags
);
201 /* Put any mdls the firmware is ignoring back into normal rotation */
202 list_for_each_entry_safe(mdl
, tmp
, &sweep_up
, list
) {
203 list_del_init(&mdl
->list
);
204 cx18_enqueue(s
, mdl
, &s
->q_free
);
209 /* Move all mdls of a queue, while flushing the mdl */
210 static void cx18_queue_flush(struct cx18_stream
*s
,
211 struct cx18_queue
*q_src
, struct cx18_queue
*q_dst
)
213 struct cx18_mdl
*mdl
;
215 /* It only makes sense to flush to q_free or q_idle */
216 if (q_src
== q_dst
|| q_dst
== &s
->q_full
|| q_dst
== &s
->q_busy
)
219 spin_lock(&q_src
->lock
);
220 spin_lock(&q_dst
->lock
);
221 while (!list_empty(&q_src
->list
)) {
222 mdl
= list_first_entry(&q_src
->list
, struct cx18_mdl
, list
);
223 list_move_tail(&mdl
->list
, &q_dst
->list
);
228 mdl
->curr_buf
= NULL
;
229 atomic_inc(&q_dst
->depth
);
231 cx18_queue_init(q_src
);
232 spin_unlock(&q_src
->lock
);
233 spin_unlock(&q_dst
->lock
);
236 void cx18_flush_queues(struct cx18_stream
*s
)
238 cx18_queue_flush(s
, &s
->q_busy
, &s
->q_free
);
239 cx18_queue_flush(s
, &s
->q_full
, &s
->q_free
);
243 * Note, s->buf_pool is not protected by a lock,
244 * the stream better not have *anything* going on when calling this
246 void cx18_unload_queues(struct cx18_stream
*s
)
248 struct cx18_queue
*q_idle
= &s
->q_idle
;
249 struct cx18_mdl
*mdl
;
250 struct cx18_buffer
*buf
;
252 /* Move all MDLS to q_idle */
253 cx18_queue_flush(s
, &s
->q_busy
, q_idle
);
254 cx18_queue_flush(s
, &s
->q_full
, q_idle
);
255 cx18_queue_flush(s
, &s
->q_free
, q_idle
);
257 /* Reset MDL id's and move all buffers back to the stream's buf_pool */
258 spin_lock(&q_idle
->lock
);
259 list_for_each_entry(mdl
, &q_idle
->list
, list
) {
260 while (!list_empty(&mdl
->buf_list
)) {
261 buf
= list_first_entry(&mdl
->buf_list
,
262 struct cx18_buffer
, list
);
263 list_move_tail(&buf
->list
, &s
->buf_pool
);
267 mdl
->id
= s
->mdl_base_idx
; /* reset id to a "safe" value */
268 /* all other mdl fields were cleared by cx18_queue_flush() */
270 spin_unlock(&q_idle
->lock
);
274 * Note, s->buf_pool is not protected by a lock,
275 * the stream better not have *anything* going on when calling this
277 void cx18_load_queues(struct cx18_stream
*s
)
279 struct cx18
*cx
= s
->cx
;
280 struct cx18_mdl
*mdl
;
281 struct cx18_buffer
*buf
;
284 u32 partial_buf_size
;
287 * Attach buffers to MDLs, give the MDLs ids, and add MDLs to q_free
288 * Excess MDLs are left on q_idle
289 * Excess buffers are left in buf_pool and/or on an MDL in q_idle
291 mdl_id
= s
->mdl_base_idx
;
292 for (mdl
= cx18_dequeue(s
, &s
->q_idle
), i
= s
->bufs_per_mdl
;
293 mdl
!= NULL
&& i
== s
->bufs_per_mdl
;
294 mdl
= cx18_dequeue(s
, &s
->q_idle
)) {
298 for (i
= 0; i
< s
->bufs_per_mdl
; i
++) {
299 if (list_empty(&s
->buf_pool
))
302 buf
= list_first_entry(&s
->buf_pool
, struct cx18_buffer
,
304 list_move_tail(&buf
->list
, &mdl
->buf_list
);
306 /* update the firmware's MDL array with this buffer */
307 cx18_writel(cx
, buf
->dma_handle
,
308 &cx
->scb
->cpu_mdl
[mdl_id
+ i
].paddr
);
309 cx18_writel(cx
, s
->buf_size
,
310 &cx
->scb
->cpu_mdl
[mdl_id
+ i
].length
);
313 if (i
== s
->bufs_per_mdl
) {
315 * The encoder doesn't honor s->mdl_size. So in the
316 * case of a non-integral number of buffers to meet
317 * mdl_size, we lie about the size of the last buffer
318 * in the MDL to get the encoder to really only send
319 * us mdl_size bytes per MDL transfer.
321 partial_buf_size
= s
->mdl_size
% s
->buf_size
;
322 if (partial_buf_size
) {
323 cx18_writel(cx
, partial_buf_size
,
324 &cx
->scb
->cpu_mdl
[mdl_id
+ i
- 1].length
);
326 cx18_enqueue(s
, mdl
, &s
->q_free
);
328 /* Not enough buffers for this MDL; we won't use it */
329 cx18_push(s
, mdl
, &s
->q_idle
);
335 void _cx18_mdl_sync_for_device(struct cx18_stream
*s
, struct cx18_mdl
*mdl
)
338 u32 buf_size
= s
->buf_size
;
339 struct pci_dev
*pci_dev
= s
->cx
->pci_dev
;
340 struct cx18_buffer
*buf
;
342 list_for_each_entry(buf
, &mdl
->buf_list
, list
)
343 pci_dma_sync_single_for_device(pci_dev
, buf
->dma_handle
,
347 int cx18_stream_alloc(struct cx18_stream
*s
)
349 struct cx18
*cx
= s
->cx
;
355 CX18_DEBUG_INFO("Allocate %s stream: %d x %d buffers "
356 "(%d.%02d kB total)\n",
357 s
->name
, s
->buffers
, s
->buf_size
,
358 s
->buffers
* s
->buf_size
/ 1024,
359 (s
->buffers
* s
->buf_size
* 100 / 1024) % 100);
361 if (((char __iomem
*)&cx
->scb
->cpu_mdl
[cx
->free_mdl_idx
+ s
->buffers
] -
362 (char __iomem
*)cx
->scb
) > SCB_RESERVED_SIZE
) {
363 unsigned bufsz
= (((char __iomem
*)cx
->scb
) + SCB_RESERVED_SIZE
-
364 ((char __iomem
*)cx
->scb
->cpu_mdl
));
366 CX18_ERR("Too many buffers, cannot fit in SCB area\n");
367 CX18_ERR("Max buffers = %zd\n",
368 bufsz
/ sizeof(struct cx18_mdl_ent
));
372 s
->mdl_base_idx
= cx
->free_mdl_idx
;
374 /* allocate stream buffers and MDLs */
375 for (i
= 0; i
< s
->buffers
; i
++) {
376 struct cx18_mdl
*mdl
;
377 struct cx18_buffer
*buf
;
379 /* 1 MDL per buffer to handle the worst & also default case */
380 mdl
= kzalloc(sizeof(struct cx18_mdl
), GFP_KERNEL
|__GFP_NOWARN
);
384 buf
= kzalloc(sizeof(struct cx18_buffer
),
385 GFP_KERNEL
|__GFP_NOWARN
);
391 buf
->buf
= kmalloc(s
->buf_size
, GFP_KERNEL
|__GFP_NOWARN
);
392 if (buf
->buf
== NULL
) {
398 INIT_LIST_HEAD(&mdl
->list
);
399 INIT_LIST_HEAD(&mdl
->buf_list
);
400 mdl
->id
= s
->mdl_base_idx
; /* a somewhat safe value */
401 cx18_enqueue(s
, mdl
, &s
->q_idle
);
403 INIT_LIST_HEAD(&buf
->list
);
404 buf
->dma_handle
= pci_map_single(s
->cx
->pci_dev
,
405 buf
->buf
, s
->buf_size
, s
->dma
);
406 cx18_buf_sync_for_cpu(s
, buf
);
407 list_add_tail(&buf
->list
, &s
->buf_pool
);
409 if (i
== s
->buffers
) {
410 cx
->free_mdl_idx
+= s
->buffers
;
413 CX18_ERR("Couldn't allocate buffers for %s stream\n", s
->name
);
418 void cx18_stream_free(struct cx18_stream
*s
)
420 struct cx18_mdl
*mdl
;
421 struct cx18_buffer
*buf
;
422 struct cx18
*cx
= s
->cx
;
424 CX18_DEBUG_INFO("Deallocating buffers for %s stream\n", s
->name
);
426 /* move all buffers to buf_pool and all MDLs to q_idle */
427 cx18_unload_queues(s
);
430 while ((mdl
= cx18_dequeue(s
, &s
->q_idle
)))
434 while (!list_empty(&s
->buf_pool
)) {
435 buf
= list_first_entry(&s
->buf_pool
, struct cx18_buffer
, list
);
436 list_del_init(&buf
->list
);
438 pci_unmap_single(s
->cx
->pci_dev
, buf
->dma_handle
,
439 s
->buf_size
, s
->dma
);