1 // SPDX-License-Identifier: GPL-2.0-or-later
5 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
6 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
7 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
11 #include "ivtv-driver.h"
12 #include "ivtv-udma.h"
14 void ivtv_udma_get_page_info(struct ivtv_dma_page_info
*dma_page
, unsigned long first
, unsigned long size
)
16 dma_page
->uaddr
= first
& PAGE_MASK
;
17 dma_page
->offset
= first
& ~PAGE_MASK
;
18 dma_page
->tail
= 1 + ((first
+size
-1) & ~PAGE_MASK
);
19 dma_page
->first
= (first
& PAGE_MASK
) >> PAGE_SHIFT
;
20 dma_page
->last
= ((first
+size
-1) & PAGE_MASK
) >> PAGE_SHIFT
;
21 dma_page
->page_count
= dma_page
->last
- dma_page
->first
+ 1;
22 if (dma_page
->page_count
== 1) dma_page
->tail
-= dma_page
->offset
;
25 int ivtv_udma_fill_sg_list (struct ivtv_user_dma
*dma
, struct ivtv_dma_page_info
*dma_page
, int map_offset
)
33 offset
= dma_page
->offset
;
35 /* Fill SG Array with new values */
36 for (i
= 0; i
< dma_page
->page_count
; i
++) {
37 unsigned int len
= (i
== dma_page
->page_count
- 1) ?
38 dma_page
->tail
: PAGE_SIZE
- offset
;
40 if (PageHighMem(dma
->map
[map_offset
])) {
43 if (dma
->bouncemap
[map_offset
] == NULL
)
44 dma
->bouncemap
[map_offset
] = alloc_page(GFP_KERNEL
);
45 if (dma
->bouncemap
[map_offset
] == NULL
)
47 local_irq_save(flags
);
48 src
= kmap_atomic(dma
->map
[map_offset
]) + offset
;
49 memcpy(page_address(dma
->bouncemap
[map_offset
]) + offset
, src
, len
);
51 local_irq_restore(flags
);
52 sg_set_page(&dma
->SGlist
[map_offset
], dma
->bouncemap
[map_offset
], len
, offset
);
55 sg_set_page(&dma
->SGlist
[map_offset
], dma
->map
[map_offset
], len
, offset
);
63 void ivtv_udma_fill_sg_array (struct ivtv_user_dma
*dma
, u32 buffer_offset
, u32 buffer_offset_2
, u32 split
) {
65 struct scatterlist
*sg
;
67 for_each_sg(dma
->SGlist
, sg
, dma
->SG_length
, i
) {
68 dma
->SGarray
[i
].size
= cpu_to_le32(sg_dma_len(sg
));
69 dma
->SGarray
[i
].src
= cpu_to_le32(sg_dma_address(sg
));
70 dma
->SGarray
[i
].dst
= cpu_to_le32(buffer_offset
);
71 buffer_offset
+= sg_dma_len(sg
);
73 split
-= sg_dma_len(sg
);
75 buffer_offset
= buffer_offset_2
;
79 /* User DMA Buffers */
80 void ivtv_udma_alloc(struct ivtv
*itv
)
82 if (itv
->udma
.SG_handle
== 0) {
83 /* Map DMA Page Array Buffer */
84 itv
->udma
.SG_handle
= dma_map_single(&itv
->pdev
->dev
,
86 sizeof(itv
->udma
.SGarray
),
88 ivtv_udma_sync_for_cpu(itv
);
92 int ivtv_udma_setup(struct ivtv
*itv
, unsigned long ivtv_dest_addr
,
93 void __user
*userbuf
, int size_in_bytes
)
95 struct ivtv_dma_page_info user_dma
;
96 struct ivtv_user_dma
*dma
= &itv
->udma
;
99 IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr
);
102 if (dma
->SG_length
|| dma
->page_count
) {
103 IVTV_DEBUG_WARN("ivtv_udma_setup: SG_length %d page_count %d still full?\n",
104 dma
->SG_length
, dma
->page_count
);
108 ivtv_udma_get_page_info(&user_dma
, (unsigned long)userbuf
, size_in_bytes
);
110 if (user_dma
.page_count
<= 0) {
111 IVTV_DEBUG_WARN("ivtv_udma_setup: Error %d page_count from %d bytes %d offset\n",
112 user_dma
.page_count
, size_in_bytes
, user_dma
.offset
);
116 /* Pin user pages for DMA Xfer */
117 err
= pin_user_pages_unlocked(user_dma
.uaddr
, user_dma
.page_count
,
120 if (user_dma
.page_count
!= err
) {
121 IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
122 err
, user_dma
.page_count
);
124 unpin_user_pages(dma
->map
, err
);
130 dma
->page_count
= user_dma
.page_count
;
132 /* Fill SG List with new values */
133 if (ivtv_udma_fill_sg_list(dma
, &user_dma
, 0) < 0) {
134 IVTV_DEBUG_WARN("%s: could not allocate bounce buffers for highmem userspace buffers\n",
136 unpin_user_pages(dma
->map
, dma
->page_count
);
142 dma
->SG_length
= dma_map_sg(&itv
->pdev
->dev
, dma
->SGlist
,
143 dma
->page_count
, DMA_TO_DEVICE
);
144 if (!dma
->SG_length
) {
145 IVTV_DEBUG_WARN("%s: DMA map error, SG_length is 0\n", __func__
);
146 unpin_user_pages(dma
->map
, dma
->page_count
);
151 /* Fill SG Array with new values */
152 ivtv_udma_fill_sg_array (dma
, ivtv_dest_addr
, 0, -1);
154 /* Tag SG Array with Interrupt Bit */
155 dma
->SGarray
[dma
->SG_length
- 1].size
|= cpu_to_le32(0x80000000);
157 ivtv_udma_sync_for_device(itv
);
158 return dma
->page_count
;
161 void ivtv_udma_unmap(struct ivtv
*itv
)
163 struct ivtv_user_dma
*dma
= &itv
->udma
;
165 IVTV_DEBUG_INFO("ivtv_unmap_user_dma\n");
167 /* Nothing to free */
168 if (dma
->page_count
== 0)
171 /* Unmap Scatterlist */
172 if (dma
->SG_length
) {
173 dma_unmap_sg(&itv
->pdev
->dev
, dma
->SGlist
, dma
->page_count
,
178 ivtv_udma_sync_for_cpu(itv
);
180 unpin_user_pages(dma
->map
, dma
->page_count
);
184 void ivtv_udma_free(struct ivtv
*itv
)
189 if (itv
->udma
.SG_handle
) {
190 dma_unmap_single(&itv
->pdev
->dev
, itv
->udma
.SG_handle
,
191 sizeof(itv
->udma
.SGarray
), DMA_TO_DEVICE
);
194 /* Unmap Scatterlist */
195 if (itv
->udma
.SG_length
) {
196 dma_unmap_sg(&itv
->pdev
->dev
, itv
->udma
.SGlist
,
197 itv
->udma
.page_count
, DMA_TO_DEVICE
);
200 for (i
= 0; i
< IVTV_DMA_SG_OSD_ENT
; i
++) {
201 if (itv
->udma
.bouncemap
[i
])
202 __free_page(itv
->udma
.bouncemap
[i
]);
206 void ivtv_udma_start(struct ivtv
*itv
)
208 IVTV_DEBUG_DMA("start UDMA\n");
209 write_reg(itv
->udma
.SG_handle
, IVTV_REG_DECDMAADDR
);
210 write_reg_sync(read_reg(IVTV_REG_DMAXFER
) | 0x01, IVTV_REG_DMAXFER
);
211 set_bit(IVTV_F_I_DMA
, &itv
->i_flags
);
212 set_bit(IVTV_F_I_UDMA
, &itv
->i_flags
);
213 clear_bit(IVTV_F_I_UDMA_PENDING
, &itv
->i_flags
);
216 void ivtv_udma_prepare(struct ivtv
*itv
)
220 spin_lock_irqsave(&itv
->dma_reg_lock
, flags
);
221 if (!test_bit(IVTV_F_I_DMA
, &itv
->i_flags
))
222 ivtv_udma_start(itv
);
224 set_bit(IVTV_F_I_UDMA_PENDING
, &itv
->i_flags
);
225 spin_unlock_irqrestore(&itv
->dma_reg_lock
, flags
);