2 * Copyright(c) 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/types.h>
48 #include <linux/scatterlist.h>
49 #include <rdma/ib_verbs.h>
53 #define BAD_DMA_ADDRESS ((u64)0)
56 * The following functions implement driver specific replacements
57 * for the ib_dma_*() functions.
59 * These functions return kernel virtual addresses instead of
60 * device bus addresses since the driver uses the CPU to copy
61 * data instead of using hardware DMA.
64 static int rvt_mapping_error(struct ib_device
*dev
, u64 dma_addr
)
66 return dma_addr
== BAD_DMA_ADDRESS
;
69 static u64
rvt_dma_map_single(struct ib_device
*dev
, void *cpu_addr
,
70 size_t size
, enum dma_data_direction direction
)
72 if (WARN_ON(!valid_dma_direction(direction
)))
73 return BAD_DMA_ADDRESS
;
78 static void rvt_dma_unmap_single(struct ib_device
*dev
, u64 addr
, size_t size
,
79 enum dma_data_direction direction
)
81 /* This is a stub, nothing to be done here */
84 static u64
rvt_dma_map_page(struct ib_device
*dev
, struct page
*page
,
85 unsigned long offset
, size_t size
,
86 enum dma_data_direction direction
)
90 if (WARN_ON(!valid_dma_direction(direction
)))
91 return BAD_DMA_ADDRESS
;
93 addr
= (u64
)page_address(page
);
100 static void rvt_dma_unmap_page(struct ib_device
*dev
, u64 addr
, size_t size
,
101 enum dma_data_direction direction
)
103 /* This is a stub, nothing to be done here */
106 static int rvt_map_sg(struct ib_device
*dev
, struct scatterlist
*sgl
,
107 int nents
, enum dma_data_direction direction
)
109 struct scatterlist
*sg
;
114 if (WARN_ON(!valid_dma_direction(direction
)))
117 for_each_sg(sgl
, sg
, nents
, i
) {
118 addr
= (u64
)page_address(sg_page(sg
));
123 sg
->dma_address
= addr
+ sg
->offset
;
124 #ifdef CONFIG_NEED_SG_DMA_LENGTH
125 sg
->dma_length
= sg
->length
;
131 static void rvt_unmap_sg(struct ib_device
*dev
,
132 struct scatterlist
*sg
, int nents
,
133 enum dma_data_direction direction
)
135 /* This is a stub, nothing to be done here */
138 static int rvt_map_sg_attrs(struct ib_device
*dev
, struct scatterlist
*sgl
,
139 int nents
, enum dma_data_direction direction
,
142 return rvt_map_sg(dev
, sgl
, nents
, direction
);
145 static void rvt_unmap_sg_attrs(struct ib_device
*dev
,
146 struct scatterlist
*sg
, int nents
,
147 enum dma_data_direction direction
,
150 return rvt_unmap_sg(dev
, sg
, nents
, direction
);
153 static void rvt_sync_single_for_cpu(struct ib_device
*dev
, u64 addr
,
154 size_t size
, enum dma_data_direction dir
)
158 static void rvt_sync_single_for_device(struct ib_device
*dev
, u64 addr
,
160 enum dma_data_direction dir
)
164 static void *rvt_dma_alloc_coherent(struct ib_device
*dev
, size_t size
,
165 u64
*dma_handle
, gfp_t flag
)
170 p
= alloc_pages(flag
, get_order(size
));
172 addr
= page_address(p
);
174 *dma_handle
= (u64
)addr
;
178 static void rvt_dma_free_coherent(struct ib_device
*dev
, size_t size
,
179 void *cpu_addr
, u64 dma_handle
)
181 free_pages((unsigned long)cpu_addr
, get_order(size
));
184 struct ib_dma_mapping_ops rvt_default_dma_mapping_ops
= {
185 .mapping_error
= rvt_mapping_error
,
186 .map_single
= rvt_dma_map_single
,
187 .unmap_single
= rvt_dma_unmap_single
,
188 .map_page
= rvt_dma_map_page
,
189 .unmap_page
= rvt_dma_unmap_page
,
190 .map_sg
= rvt_map_sg
,
191 .unmap_sg
= rvt_unmap_sg
,
192 .map_sg_attrs
= rvt_map_sg_attrs
,
193 .unmap_sg_attrs
= rvt_unmap_sg_attrs
,
194 .sync_single_for_cpu
= rvt_sync_single_for_cpu
,
195 .sync_single_for_device
= rvt_sync_single_for_device
,
196 .alloc_coherent
= rvt_dma_alloc_coherent
,
197 .free_coherent
= rvt_dma_free_coherent