4 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
5 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
6 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include "ivtv-driver.h"
24 #include "ivtv-udma.h"
26 void ivtv_udma_get_page_info(struct ivtv_dma_page_info
*dma_page
, unsigned long first
, unsigned long size
)
28 dma_page
->uaddr
= first
& PAGE_MASK
;
29 dma_page
->offset
= first
& ~PAGE_MASK
;
30 dma_page
->tail
= 1 + ((first
+size
-1) & ~PAGE_MASK
);
31 dma_page
->first
= (first
& PAGE_MASK
) >> PAGE_SHIFT
;
32 dma_page
->last
= ((first
+size
-1) & PAGE_MASK
) >> PAGE_SHIFT
;
33 dma_page
->page_count
= dma_page
->last
- dma_page
->first
+ 1;
34 if (dma_page
->page_count
== 1) dma_page
->tail
-= dma_page
->offset
;
37 int ivtv_udma_fill_sg_list (struct ivtv_user_dma
*dma
, struct ivtv_dma_page_info
*dma_page
, int map_offset
)
45 offset
= dma_page
->offset
;
47 /* Fill SG Array with new values */
48 for (i
= 0; i
< dma_page
->page_count
; i
++) {
49 unsigned int len
= (i
== dma_page
->page_count
- 1) ?
50 dma_page
->tail
: PAGE_SIZE
- offset
;
52 dma
->SGlist
[map_offset
].length
= len
;
53 dma
->SGlist
[map_offset
].offset
= offset
;
54 if (PageHighMem(dma
->map
[map_offset
])) {
57 if (dma
->bouncemap
[map_offset
] == NULL
)
58 dma
->bouncemap
[map_offset
] = alloc_page(GFP_KERNEL
);
59 if (dma
->bouncemap
[map_offset
] == NULL
)
61 local_irq_save(flags
);
62 src
= kmap_atomic(dma
->map
[map_offset
], KM_BOUNCE_READ
) + offset
;
63 memcpy(page_address(dma
->bouncemap
[map_offset
]) + offset
, src
, len
);
64 kunmap_atomic(src
, KM_BOUNCE_READ
);
65 local_irq_restore(flags
);
66 dma
->SGlist
[map_offset
].page
= dma
->bouncemap
[map_offset
];
69 dma
->SGlist
[map_offset
].page
= dma
->map
[map_offset
];
77 void ivtv_udma_fill_sg_array (struct ivtv_user_dma
*dma
, u32 buffer_offset
, u32 buffer_offset_2
, u32 split
) {
79 struct scatterlist
*sg
;
81 for (i
= 0, sg
= dma
->SGlist
; i
< dma
->SG_length
; i
++, sg
++) {
82 dma
->SGarray
[i
].size
= cpu_to_le32(sg_dma_len(sg
));
83 dma
->SGarray
[i
].src
= cpu_to_le32(sg_dma_address(sg
));
84 dma
->SGarray
[i
].dst
= cpu_to_le32(buffer_offset
);
85 buffer_offset
+= sg_dma_len(sg
);
87 split
-= sg_dma_len(sg
);
89 buffer_offset
= buffer_offset_2
;
93 /* User DMA Buffers */
94 void ivtv_udma_alloc(struct ivtv
*itv
)
96 if (itv
->udma
.SG_handle
== 0) {
97 /* Map DMA Page Array Buffer */
98 itv
->udma
.SG_handle
= pci_map_single(itv
->dev
, itv
->udma
.SGarray
,
99 sizeof(itv
->udma
.SGarray
), PCI_DMA_TODEVICE
);
100 ivtv_udma_sync_for_cpu(itv
);
104 int ivtv_udma_setup(struct ivtv
*itv
, unsigned long ivtv_dest_addr
,
105 void __user
*userbuf
, int size_in_bytes
)
107 struct ivtv_dma_page_info user_dma
;
108 struct ivtv_user_dma
*dma
= &itv
->udma
;
111 IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr
);
114 if (dma
->SG_length
|| dma
->page_count
) {
115 IVTV_DEBUG_WARN("ivtv_udma_setup: SG_length %d page_count %d still full?\n",
116 dma
->SG_length
, dma
->page_count
);
120 ivtv_udma_get_page_info(&user_dma
, (unsigned long)userbuf
, size_in_bytes
);
122 if (user_dma
.page_count
<= 0) {
123 IVTV_DEBUG_WARN("ivtv_udma_setup: Error %d page_count from %d bytes %d offset\n",
124 user_dma
.page_count
, size_in_bytes
, user_dma
.offset
);
128 /* Get user pages for DMA Xfer */
129 down_read(¤t
->mm
->mmap_sem
);
130 err
= get_user_pages(current
, current
->mm
,
131 user_dma
.uaddr
, user_dma
.page_count
, 0, 1, dma
->map
, NULL
);
132 up_read(¤t
->mm
->mmap_sem
);
134 if (user_dma
.page_count
!= err
) {
135 IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
136 err
, user_dma
.page_count
);
140 dma
->page_count
= user_dma
.page_count
;
142 /* Fill SG List with new values */
143 if (ivtv_udma_fill_sg_list(dma
, &user_dma
, 0) < 0) {
144 for (i
= 0; i
< dma
->page_count
; i
++) {
145 put_page(dma
->map
[i
]);
152 dma
->SG_length
= pci_map_sg(itv
->dev
, dma
->SGlist
, dma
->page_count
, PCI_DMA_TODEVICE
);
154 /* Fill SG Array with new values */
155 ivtv_udma_fill_sg_array (dma
, ivtv_dest_addr
, 0, -1);
157 /* Tag SG Array with Interrupt Bit */
158 dma
->SGarray
[dma
->SG_length
- 1].size
|= cpu_to_le32(0x80000000);
160 ivtv_udma_sync_for_device(itv
);
161 return dma
->page_count
;
164 void ivtv_udma_unmap(struct ivtv
*itv
)
166 struct ivtv_user_dma
*dma
= &itv
->udma
;
169 IVTV_DEBUG_INFO("ivtv_unmap_user_dma\n");
171 /* Nothing to free */
172 if (dma
->page_count
== 0)
175 /* Unmap Scatterlist */
176 if (dma
->SG_length
) {
177 pci_unmap_sg(itv
->dev
, dma
->SGlist
, dma
->page_count
, PCI_DMA_TODEVICE
);
181 ivtv_udma_sync_for_cpu(itv
);
183 /* Release User Pages */
184 for (i
= 0; i
< dma
->page_count
; i
++) {
185 put_page(dma
->map
[i
]);
190 void ivtv_udma_free(struct ivtv
*itv
)
195 if (itv
->udma
.SG_handle
) {
196 pci_unmap_single(itv
->dev
, itv
->udma
.SG_handle
,
197 sizeof(itv
->udma
.SGarray
), PCI_DMA_TODEVICE
);
200 /* Unmap Scatterlist */
201 if (itv
->udma
.SG_length
) {
202 pci_unmap_sg(itv
->dev
, itv
->udma
.SGlist
, itv
->udma
.page_count
, PCI_DMA_TODEVICE
);
205 for (i
= 0; i
< IVTV_DMA_SG_OSD_ENT
; i
++) {
206 if (itv
->udma
.bouncemap
[i
])
207 __free_page(itv
->udma
.bouncemap
[i
]);
211 void ivtv_udma_start(struct ivtv
*itv
)
213 IVTV_DEBUG_DMA("start UDMA\n");
214 write_reg(itv
->udma
.SG_handle
, IVTV_REG_DECDMAADDR
);
215 write_reg_sync(read_reg(IVTV_REG_DMAXFER
) | 0x01, IVTV_REG_DMAXFER
);
216 set_bit(IVTV_F_I_DMA
, &itv
->i_flags
);
217 set_bit(IVTV_F_I_UDMA
, &itv
->i_flags
);
220 void ivtv_udma_prepare(struct ivtv
*itv
)
224 spin_lock_irqsave(&itv
->dma_reg_lock
, flags
);
225 if (!test_bit(IVTV_F_I_DMA
, &itv
->i_flags
))
226 ivtv_udma_start(itv
);
228 set_bit(IVTV_F_I_UDMA_PENDING
, &itv
->i_flags
);
229 spin_unlock_irqrestore(&itv
->dma_reg_lock
, flags
);