spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / drivers / infiniband / hw / qib / qib_dma.c
blob2920bb39a65b946d6033e18d7ee4033e1a455ac2
1 /*
2 * Copyright (c) 2006, 2009, 2010 QLogic, Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
32 #include <linux/types.h>
33 #include <linux/scatterlist.h>
35 #include "qib_verbs.h"
37 #define BAD_DMA_ADDRESS ((u64) 0)
40 * The following functions implement driver specific replacements
41 * for the ib_dma_*() functions.
43 * These functions return kernel virtual addresses instead of
44 * device bus addresses since the driver uses the CPU to copy
45 * data instead of using hardware DMA.
48 static int qib_mapping_error(struct ib_device *dev, u64 dma_addr)
50 return dma_addr == BAD_DMA_ADDRESS;
53 static u64 qib_dma_map_single(struct ib_device *dev, void *cpu_addr,
54 size_t size, enum dma_data_direction direction)
56 BUG_ON(!valid_dma_direction(direction));
57 return (u64) cpu_addr;
60 static void qib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
61 enum dma_data_direction direction)
63 BUG_ON(!valid_dma_direction(direction));
66 static u64 qib_dma_map_page(struct ib_device *dev, struct page *page,
67 unsigned long offset, size_t size,
68 enum dma_data_direction direction)
70 u64 addr;
72 BUG_ON(!valid_dma_direction(direction));
74 if (offset + size > PAGE_SIZE) {
75 addr = BAD_DMA_ADDRESS;
76 goto done;
79 addr = (u64) page_address(page);
80 if (addr)
81 addr += offset;
82 /* TODO: handle highmem pages */
84 done:
85 return addr;
88 static void qib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
89 enum dma_data_direction direction)
91 BUG_ON(!valid_dma_direction(direction));
94 static int qib_map_sg(struct ib_device *dev, struct scatterlist *sgl,
95 int nents, enum dma_data_direction direction)
97 struct scatterlist *sg;
98 u64 addr;
99 int i;
100 int ret = nents;
102 BUG_ON(!valid_dma_direction(direction));
104 for_each_sg(sgl, sg, nents, i) {
105 addr = (u64) page_address(sg_page(sg));
106 /* TODO: handle highmem pages */
107 if (!addr) {
108 ret = 0;
109 break;
112 return ret;
115 static void qib_unmap_sg(struct ib_device *dev,
116 struct scatterlist *sg, int nents,
117 enum dma_data_direction direction)
119 BUG_ON(!valid_dma_direction(direction));
122 static u64 qib_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
124 u64 addr = (u64) page_address(sg_page(sg));
126 if (addr)
127 addr += sg->offset;
128 return addr;
131 static unsigned int qib_sg_dma_len(struct ib_device *dev,
132 struct scatterlist *sg)
134 return sg->length;
137 static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr,
138 size_t size, enum dma_data_direction dir)
142 static void qib_sync_single_for_device(struct ib_device *dev, u64 addr,
143 size_t size,
144 enum dma_data_direction dir)
148 static void *qib_dma_alloc_coherent(struct ib_device *dev, size_t size,
149 u64 *dma_handle, gfp_t flag)
151 struct page *p;
152 void *addr = NULL;
154 p = alloc_pages(flag, get_order(size));
155 if (p)
156 addr = page_address(p);
157 if (dma_handle)
158 *dma_handle = (u64) addr;
159 return addr;
162 static void qib_dma_free_coherent(struct ib_device *dev, size_t size,
163 void *cpu_addr, u64 dma_handle)
165 free_pages((unsigned long) cpu_addr, get_order(size));
168 struct ib_dma_mapping_ops qib_dma_mapping_ops = {
169 .mapping_error = qib_mapping_error,
170 .map_single = qib_dma_map_single,
171 .unmap_single = qib_dma_unmap_single,
172 .map_page = qib_dma_map_page,
173 .unmap_page = qib_dma_unmap_page,
174 .map_sg = qib_map_sg,
175 .unmap_sg = qib_unmap_sg,
176 .dma_address = qib_sg_dma_address,
177 .dma_len = qib_sg_dma_len,
178 .sync_single_for_cpu = qib_sync_single_for_cpu,
179 .sync_single_for_device = qib_sync_single_for_device,
180 .alloc_coherent = qib_dma_alloc_coherent,
181 .free_coherent = qib_dma_free_coherent