2 * DMA region bookkeeping routines
4 * Copyright (C) 2002 Maas Digital LLC
6 * This code is licensed under the GPL. See the file COPYING in the root
7 * directory of the kernel sources for details.
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/slab.h>
18 void dma_prog_region_init(struct dma_prog_region
*prog
)
26 int dma_prog_region_alloc(struct dma_prog_region
*prog
, unsigned long n_bytes
, struct pci_dev
*dev
)
28 /* round up to page size */
29 n_bytes
= PAGE_ALIGN(n_bytes
);
31 prog
->n_pages
= n_bytes
>> PAGE_SHIFT
;
33 prog
->kvirt
= pci_alloc_consistent(dev
, n_bytes
, &prog
->bus_addr
);
35 printk(KERN_ERR
"dma_prog_region_alloc: pci_alloc_consistent() failed\n");
36 dma_prog_region_free(prog
);
45 void dma_prog_region_free(struct dma_prog_region
*prog
)
48 pci_free_consistent(prog
->dev
, prog
->n_pages
<< PAGE_SHIFT
, prog
->kvirt
, prog
->bus_addr
);
59 void dma_region_init(struct dma_region
*dma
)
68 int dma_region_alloc(struct dma_region
*dma
, unsigned long n_bytes
, struct pci_dev
*dev
, int direction
)
72 /* round up to page size */
73 n_bytes
= PAGE_ALIGN(n_bytes
);
75 dma
->n_pages
= n_bytes
>> PAGE_SHIFT
;
77 dma
->kvirt
= vmalloc_32(n_bytes
);
79 printk(KERN_ERR
"dma_region_alloc: vmalloc_32() failed\n");
83 /* Clear the ram out, no junk to the user */
84 memset(dma
->kvirt
, 0, n_bytes
);
86 /* allocate scatter/gather list */
87 dma
->sglist
= vmalloc(dma
->n_pages
* sizeof(*dma
->sglist
));
89 printk(KERN_ERR
"dma_region_alloc: vmalloc(sglist) failed\n");
93 /* just to be safe - this will become unnecessary once sglist->address goes away */
94 memset(dma
->sglist
, 0, dma
->n_pages
* sizeof(*dma
->sglist
));
96 /* fill scatter/gather list with pages */
97 for (i
= 0; i
< dma
->n_pages
; i
++) {
98 unsigned long va
= (unsigned long) dma
->kvirt
+ (i
<< PAGE_SHIFT
);
100 dma
->sglist
[i
].page
= vmalloc_to_page((void *)va
);
101 dma
->sglist
[i
].length
= PAGE_SIZE
;
104 /* map sglist to the IOMMU */
105 dma
->n_dma_pages
= pci_map_sg(dev
, dma
->sglist
, dma
->n_pages
, direction
);
107 if (dma
->n_dma_pages
== 0) {
108 printk(KERN_ERR
"dma_region_alloc: pci_map_sg() failed\n");
113 dma
->direction
= direction
;
118 dma_region_free(dma
);
122 void dma_region_free(struct dma_region
*dma
)
124 if (dma
->n_dma_pages
) {
125 pci_unmap_sg(dma
->dev
, dma
->sglist
, dma
->n_pages
, dma
->direction
);
126 dma
->n_dma_pages
= 0;
138 /* find the scatterlist index and remaining offset corresponding to a
139 given offset from the beginning of the buffer */
140 static inline int dma_region_find(struct dma_region
*dma
, unsigned long offset
, unsigned long *rem
)
143 unsigned long off
= offset
;
145 for (i
= 0; i
< dma
->n_dma_pages
; i
++) {
146 if (off
< sg_dma_len(&dma
->sglist
[i
])) {
151 off
-= sg_dma_len(&dma
->sglist
[i
]);
154 BUG_ON(i
>= dma
->n_dma_pages
);
159 dma_addr_t
dma_region_offset_to_bus(struct dma_region
*dma
, unsigned long offset
)
163 struct scatterlist
*sg
= &dma
->sglist
[dma_region_find(dma
, offset
, &rem
)];
164 return sg_dma_address(sg
) + rem
;
167 void dma_region_sync_for_cpu(struct dma_region
*dma
, unsigned long offset
, unsigned long len
)
175 first
= dma_region_find(dma
, offset
, &rem
);
176 last
= dma_region_find(dma
, offset
+ len
- 1, &rem
);
178 pci_dma_sync_sg_for_cpu(dma
->dev
, &dma
->sglist
[first
], last
- first
+ 1, dma
->direction
);
181 void dma_region_sync_for_device(struct dma_region
*dma
, unsigned long offset
, unsigned long len
)
189 first
= dma_region_find(dma
, offset
, &rem
);
190 last
= dma_region_find(dma
, offset
+ len
- 1, &rem
);
192 pci_dma_sync_sg_for_device(dma
->dev
, &dma
->sglist
[first
], last
- first
+ 1, dma
->direction
);
197 /* nopage() handler for mmap access */
200 dma_region_pagefault(struct vm_area_struct
*area
, unsigned long address
, int *type
)
202 unsigned long offset
;
203 unsigned long kernel_virt_addr
;
204 struct page
*ret
= NOPAGE_SIGBUS
;
206 struct dma_region
*dma
= (struct dma_region
*) area
->vm_private_data
;
211 if ( (address
< (unsigned long) area
->vm_start
) ||
212 (address
> (unsigned long) area
->vm_start
+ (dma
->n_pages
<< PAGE_SHIFT
)) )
216 *type
= VM_FAULT_MINOR
;
217 offset
= address
- area
->vm_start
;
218 kernel_virt_addr
= (unsigned long) dma
->kvirt
+ offset
;
219 ret
= vmalloc_to_page((void*) kernel_virt_addr
);
225 static struct vm_operations_struct dma_region_vm_ops
= {
226 .nopage
= dma_region_pagefault
,
229 int dma_region_mmap(struct dma_region
*dma
, struct file
*file
, struct vm_area_struct
*vma
)
236 /* must be page-aligned */
237 if (vma
->vm_pgoff
!= 0)
240 /* check the length */
241 size
= vma
->vm_end
- vma
->vm_start
;
242 if (size
> (dma
->n_pages
<< PAGE_SHIFT
))
245 vma
->vm_ops
= &dma_region_vm_ops
;
246 vma
->vm_private_data
= dma
;
248 vma
->vm_flags
|= VM_RESERVED
;
253 #else /* CONFIG_MMU */
255 int dma_region_mmap(struct dma_region
*dma
, struct file
*file
, struct vm_area_struct
*vma
)
260 #endif /* CONFIG_MMU */