1 /* $NetBSD: xen_bus_dma.c,v 1.14 2009/01/24 19:03:12 bouyer Exp $ */
2 /* NetBSD bus_dma.c,v 1.21 2005/04/16 07:53:35 yamt Exp */
5 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: xen_bus_dma.c,v 1.14 2009/01/24 19:03:12 bouyer Exp $");
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
43 #include <machine/bus.h>
44 #include <machine/bus_private.h>
46 #include <uvm/uvm_extern.h>
48 extern paddr_t avail_end
;
50 /* Pure 2^n version of get_order */
51 static inline int get_order(unsigned long size
)
54 size
= (size
- 1) >> (PAGE_SHIFT
- 1);
63 _xen_alloc_contig(bus_size_t size
, bus_size_t alignment
, bus_size_t boundary
,
64 struct pglist
*mlistp
, int flags
, bus_addr_t low
, bus_addr_t high
)
67 unsigned long npagesreq
, npages
, mfn
;
69 struct vm_page
*pg
, *pgnext
;
71 struct xen_memory_reservation res
;
74 * When requesting a contigous memory region, the hypervisor will
75 * return a memory range aligned on size. This will automagically
76 * handle "boundary", but the only way to enforce alignment
77 * is to request a memory region of size max(alignment, size).
79 order
= max(get_order(size
), get_order(alignment
));
80 npages
= (1 << order
);
81 npagesreq
= (size
>> PAGE_SHIFT
);
82 KASSERT(npages
>= npagesreq
);
84 /* get npages from UWM, and give them back to the hypervisor */
85 error
= uvm_pglistalloc(npages
<< PAGE_SHIFT
, 0, avail_end
, 0, 0,
86 mlistp
, npages
, (flags
& BUS_DMA_NOWAIT
) == 0);
90 for (pg
= mlistp
->tqh_first
; pg
!= NULL
; pg
= pg
->pageq
.queue
.tqe_next
) {
91 pa
= VM_PAGE_TO_PHYS(pg
);
92 mfn
= xpmap_ptom(pa
) >> PAGE_SHIFT
;
93 xpmap_phys_to_machine_mapping
[
94 (pa
- XPMAP_OFFSET
) >> PAGE_SHIFT
] = INVALID_P2M_ENTRY
;
95 xenguest_handle(res
.extent_start
) = &mfn
;
98 res
.domid
= DOMID_SELF
;
99 if (HYPERVISOR_memory_op(XENMEM_decrease_reservation
, &res
)
102 printf("xen_alloc_contig: XENMEM_decrease_reservation "
105 xpmap_phys_to_machine_mapping
[
106 (pa
- XPMAP_OFFSET
) >> PAGE_SHIFT
] = mfn
;
112 /* Get the new contiguous memory extent */
113 xenguest_handle(res
.extent_start
) = &mfn
;
115 res
.extent_order
= order
;
116 res
.address_bits
= get_order(high
) + PAGE_SHIFT
;
117 res
.domid
= DOMID_SELF
;
118 error
= HYPERVISOR_memory_op(XENMEM_increase_reservation
, &res
);
121 printf("xen_alloc_contig: XENMEM_increase_reservation "
122 "failed: %d (order %d address_bits %d)\n",
123 error
, order
, res
.address_bits
);
130 /* Map the new extent in place of the old pages */
131 for (pg
= mlistp
->tqh_first
, i
= 0; pg
!= NULL
; pg
= pgnext
, i
++) {
132 pgnext
= pg
->pageq
.queue
.tqe_next
;
133 pa
= VM_PAGE_TO_PHYS(pg
);
134 xpmap_phys_to_machine_mapping
[
135 (pa
- XPMAP_OFFSET
) >> PAGE_SHIFT
] = mfn
+i
;
136 xpq_queue_machphys_update((mfn
+i
) << PAGE_SHIFT
, pa
);
137 /* while here, give extra pages back to UVM */
138 if (i
>= npagesreq
) {
139 TAILQ_REMOVE(mlistp
, pg
, pageq
.queue
);
143 /* Flush updates through and flush the TLB */
144 xpq_queue_tlb_flush();
151 * Attempt to recover from a failed decrease or increase reservation:
152 * if decrease_reservation failed, we don't have given all pages
153 * back to Xen; give them back to UVM, and get the missing pages
155 * if increase_reservation failed, we expect pg to be NULL and we just
156 * get back the missing pages from Xen one by one.
158 /* give back remaining pages to UVM */
159 for (; pg
!= NULL
; pg
= pgnext
) {
160 pgnext
= pg
->pageq
.queue
.tqe_next
;
161 TAILQ_REMOVE(mlistp
, pg
, pageq
.queue
);
164 /* remplace the pages that we already gave to Xen */
166 for (pg
= mlistp
->tqh_first
; pg
!= NULL
; pg
= pgnext
) {
167 pgnext
= pg
->pageq
.queue
.tqe_next
;
168 xenguest_handle(res
.extent_start
) = &mfn
;
170 res
.extent_order
= 0;
171 res
.address_bits
= 32;
172 res
.domid
= DOMID_SELF
;
173 if (HYPERVISOR_memory_op(XENMEM_increase_reservation
, &res
)
175 printf("xen_alloc_contig: recovery "
176 "XENMEM_increase_reservation failed!\n");
179 pa
= VM_PAGE_TO_PHYS(pg
);
180 xpmap_phys_to_machine_mapping
[
181 (pa
- XPMAP_OFFSET
) >> PAGE_SHIFT
] = mfn
;
182 xpq_queue_machphys_update((mfn
) << PAGE_SHIFT
, pa
);
183 TAILQ_REMOVE(mlistp
, pg
, pageq
.queue
);
186 /* Flush updates through and flush the TLB */
187 xpq_queue_tlb_flush();
195 * Allocate physical memory from the given physical address range.
196 * Called by DMA-safe memory allocation methods.
197 * We need our own version to deal with physical vs machine addresses.
200 _xen_bus_dmamem_alloc_range(bus_dma_tag_t t
, bus_size_t size
,
201 bus_size_t alignment
, bus_size_t boundary
, bus_dma_segment_t
*segs
,
202 int nsegs
, int *rsegs
, int flags
, bus_addr_t low
, bus_addr_t high
)
204 bus_addr_t curaddr
, lastaddr
;
208 int doingrealloc
= 0;
210 /* Always round the size. */
211 size
= round_page(size
);
213 KASSERT((alignment
& (alignment
- 1)) == 0);
214 KASSERT((boundary
& (boundary
- 1)) == 0);
215 if (alignment
< PAGE_SIZE
)
216 alignment
= PAGE_SIZE
;
217 if (boundary
!= 0 && boundary
< size
)
221 * Allocate pages from the VM system.
223 error
= uvm_pglistalloc(size
, 0, avail_end
, alignment
, boundary
,
224 &mlist
, nsegs
, (flags
& BUS_DMA_NOWAIT
) == 0);
230 * Compute the location, size, and number of segments actually
231 * returned by the VM code.
235 curaddr
= lastaddr
= segs
[curseg
].ds_addr
= _BUS_VM_PAGE_TO_BUS(m
);
236 if (curaddr
< low
|| curaddr
>= high
)
238 segs
[curseg
].ds_len
= PAGE_SIZE
;
239 m
= m
->pageq
.queue
.tqe_next
;
240 if ((segs
[curseg
].ds_addr
& (alignment
- 1)) != 0)
243 for (; m
!= NULL
; m
= m
->pageq
.queue
.tqe_next
) {
244 curaddr
= _BUS_VM_PAGE_TO_BUS(m
);
245 if (curaddr
< low
|| curaddr
>= high
)
247 if (curaddr
== (lastaddr
+ PAGE_SIZE
)) {
248 segs
[curseg
].ds_len
+= PAGE_SIZE
;
249 if ((lastaddr
& boundary
) != (curaddr
& boundary
))
253 if (curseg
>= nsegs
|| (curaddr
& (alignment
- 1)) != 0)
255 segs
[curseg
].ds_addr
= curaddr
;
256 segs
[curseg
].ds_len
= PAGE_SIZE
;
265 if (doingrealloc
== 0)
268 /* no way to enforce this */
269 printf("_xen_bus_dmamem_alloc_range: no way to "
270 "enforce address range (0x%" PRIx64
" - 0x%" PRIx64
")\n",
271 (uint64_t)low
, (uint64_t)high
);
272 uvm_pglistfree(&mlist
);
275 printf("xen_bus_dmamem_alloc_range: "
276 "curraddr=0x%lx > high=0x%lx\n",
277 (u_long
)curaddr
, (u_long
)high
);
278 panic("xen_bus_dmamem_alloc_range 1");
280 if (doingrealloc
== 1)
281 panic("_xen_bus_dmamem_alloc_range: "
282 "xen_alloc_contig returned "
283 "too much segments");
286 * Too much segments, or memory doesn't fit
287 * constraints. Free this memory and
288 * get a contigous segment from the hypervisor.
290 uvm_pglistfree(&mlist
);
291 for (curseg
= 0; curseg
< nsegs
; curseg
++) {
292 segs
[curseg
].ds_addr
= 0;
293 segs
[curseg
].ds_len
= 0;
295 error
= _xen_alloc_contig(size
, alignment
,
296 boundary
, &mlist
, flags
, low
, high
);