vfs: check userland buffers before reading them.
[haiku.git] / src / add-ons / kernel / drivers / graphics / radeon / DMA.c
blob429e072fd2a5d7ca7095706430b3c098e37be6c6
1 /*
2 Copyright (c) 2002-04, Thomas Kurschel
5 Part of Radeon accelerant
7 DMA engine handling.
9 Currently, VID DMA is always used and data is always copied from
10 graphics memory to other memory.
13 #include "radeon_driver.h"
14 #include "mmio.h"
15 #include "rbbm_regs.h"
16 #include "dma_regs.h"
17 #include <string.h>
19 // this is arbitrary and hopefully sufficiently high
20 #define RADEON_MAX_DMA_SIZE 16*1024*1024
23 // initialize DMA engine
24 status_t Radeon_InitDMA( device_info *di )
26 status_t res;
28 // allocate descriptor table in graphics mem
29 // (docu says that is _must_ be in graphics mem)
30 di->dma_desc_max_num = RADEON_MAX_DMA_SIZE / 4096;
32 res = mem_alloc( di->memmgr[mt_local], di->dma_desc_max_num * sizeof( DMA_descriptor ), 0,
33 &di->dma_desc_handle, &di->dma_desc_offset );
35 if( res != B_OK )
36 return res;
38 // allow DMA IRQ
39 OUTREGP( di->regs, RADEON_GEN_INT_CNTL, RADEON_VIDDMA_MASK, ~RADEON_VIDDMA_MASK );
40 // acknowledge possibly pending IRQ
41 OUTREG( di->regs, RADEON_GEN_INT_STATUS, RADEON_VIDDMA_AK );
43 return B_OK;
47 // prepare DMA engine to copy data from graphics mem to other mem
48 static status_t Radeon_PrepareDMA(
49 device_info *di, uint32 src, char *target, size_t size, bool lock_mem, bool contiguous )
51 physical_entry map[16];
52 status_t res;
53 DMA_descriptor *cur_desc;
54 int num_desc;
56 if( lock_mem && !contiguous ) {
57 res = lock_memory( target, size, B_DMA_IO | B_READ_DEVICE );
59 if( res != B_OK ) {
60 SHOW_ERROR( 2, "Cannot lock memory (%s)", strerror( res ));
61 return res;
65 // adjust virtual address for graphics card
66 src += di->si->memory[mt_local].virtual_addr_start;
68 cur_desc = (DMA_descriptor *)(di->si->local_mem + di->dma_desc_offset);
69 num_desc = 0;
71 // memory may be fragmented, so we create S/G list
72 while( size > 0 ) {
73 int i;
75 if( contiguous ) {
76 // if memory is contiguous, ask for start address only to reduce work
77 get_memory_map( target, 1, map, 16 );
78 // replace received size with total size
79 map[0].size = size;
80 } else {
81 get_memory_map( target, size, map, 16 );
84 for( i = 0; i < 16; ++i ) {
85 phys_addr_t address = map[i].address;
86 size_t contig_size = map[i].size;
88 if( contig_size == 0 )
89 break;
91 #if B_HAIKU_PHYSICAL_BITS > 32
92 if (address + contig_size > (phys_addr_t)1 << 32) {
93 SHOW_ERROR(2, "Physical address > 4 GB: %#" B_PRIxPHYSADDR
94 "size: %#" B_PRIxSIZE, address, size);
95 res = B_BAD_VALUE;
96 goto err;
98 #endif
100 target += contig_size;
102 while( contig_size > 0 ) {
103 size_t cur_size;
105 cur_size = min( contig_size, RADEON_DMA_DESC_MAX_SIZE );
107 if( ++num_desc > (int)di->dma_desc_max_num ) {
108 SHOW_ERROR( 2, "Overflow of DMA descriptors, %ld bytes left", size );
109 res = B_BAD_VALUE;
110 goto err;
113 cur_desc->src_address = src;
114 cur_desc->dest_address = address;
115 cur_desc->command = cur_size;
116 cur_desc->res = 0;
118 ++cur_desc;
119 address += cur_size;
120 contig_size -= cur_size;
121 src += cur_size;
122 size -= cur_size;
127 // mark last descriptor as being last one
128 (cur_desc - 1)->command |= RADEON_DMA_COMMAND_EOL;
130 return B_OK;
132 err:
133 if( lock_mem && !contiguous )
134 unlock_memory( target, size, B_DMA_IO| B_READ_DEVICE );
136 return res;
140 // finish DMA
141 // caller must ensure that DMA channel has stopped
142 static void Radeon_FinishDMA(
143 device_info *di, uint32 src, char *target, size_t size, bool lock_mem, bool contiguous )
145 if( lock_mem && !contiguous )
146 unlock_memory( target, size, B_DMA_IO| B_READ_DEVICE );
150 // copy from graphics memory to other memory via DMA
151 // src - offset in graphics mem
152 // target - target address
153 // size - number of bytes to copy
154 // lock_mem - true, if memory is not locked
155 // contiguous - true, if memory is physically contiguous (implies lock_mem=false)
156 status_t Radeon_DMACopy(
157 device_info *di, uint32 src, char *target, size_t size, bool lock_mem, bool contiguous )
159 status_t res;
161 /*SHOW_FLOW( 0, "src=%ld, target=%p, size=%ld, lock_mem=%d, contiguous=%d",
162 src, target, size, lock_mem, contiguous );*/
164 res = Radeon_PrepareDMA( di, src, target, size, lock_mem, contiguous );
165 if( res != B_OK )
166 return res;
168 //SHOW_FLOW0( 0, "2" );
170 OUTREG( di->regs, RADEON_DMA_VID_TABLE_ADDR, di->si->memory[mt_local].virtual_addr_start +
171 di->dma_desc_offset );
173 res = acquire_sem_etc( di->dma_sem, 1, B_RELATIVE_TIMEOUT, 1000000 );
175 // be sure that transmission is really finished
176 while( (INREG( di->regs, RADEON_DMA_VID_STATUS ) & RADEON_DMA_STATUS_ACTIVE) != 0 ) {
177 SHOW_FLOW0( 0, "DMA transmission still active" );
178 snooze( 1000 );
181 Radeon_FinishDMA( di, src, target, size, lock_mem, contiguous );
183 //SHOW_FLOW0( 0, "3" );
185 return res;