2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/errno.h>
36 #include <linux/scatterlist.h>
38 #include <linux/mlx4/cmd.h>
45 * We allocate in as big chunks as we can, up to a maximum of 256 KB
49 MLX4_ICM_ALLOC_SIZE
= 1 << 18,
50 MLX4_TABLE_CHUNK_SIZE
= 1 << 18
53 static void mlx4_free_icm_pages(struct mlx4_dev
*dev
, struct mlx4_icm_chunk
*chunk
)
58 pci_unmap_sg(dev
->pdev
, chunk
->mem
, chunk
->npages
,
59 PCI_DMA_BIDIRECTIONAL
);
61 for (i
= 0; i
< chunk
->npages
; ++i
)
62 __free_pages(sg_page(&chunk
->mem
[i
]),
63 get_order(chunk
->mem
[i
].length
));
66 static void mlx4_free_icm_coherent(struct mlx4_dev
*dev
, struct mlx4_icm_chunk
*chunk
)
70 for (i
= 0; i
< chunk
->npages
; ++i
)
71 dma_free_coherent(&dev
->pdev
->dev
, chunk
->mem
[i
].length
,
72 lowmem_page_address(sg_page(&chunk
->mem
[i
])),
73 sg_dma_address(&chunk
->mem
[i
]));
76 void mlx4_free_icm(struct mlx4_dev
*dev
, struct mlx4_icm
*icm
, int coherent
)
78 struct mlx4_icm_chunk
*chunk
, *tmp
;
83 list_for_each_entry_safe(chunk
, tmp
, &icm
->chunk_list
, list
) {
85 mlx4_free_icm_coherent(dev
, chunk
);
87 mlx4_free_icm_pages(dev
, chunk
);
95 static int mlx4_alloc_icm_pages(struct scatterlist
*mem
, int order
, gfp_t gfp_mask
)
99 page
= alloc_pages(gfp_mask
, order
);
103 sg_set_page(mem
, page
, PAGE_SIZE
<< order
, 0);
107 static int mlx4_alloc_icm_coherent(struct device
*dev
, struct scatterlist
*mem
,
108 int order
, gfp_t gfp_mask
)
110 void *buf
= dma_alloc_coherent(dev
, PAGE_SIZE
<< order
,
111 &sg_dma_address(mem
), gfp_mask
);
115 sg_set_buf(mem
, buf
, PAGE_SIZE
<< order
);
117 sg_dma_len(mem
) = PAGE_SIZE
<< order
;
121 struct mlx4_icm
*mlx4_alloc_icm(struct mlx4_dev
*dev
, int npages
,
122 gfp_t gfp_mask
, int coherent
)
124 struct mlx4_icm
*icm
;
125 struct mlx4_icm_chunk
*chunk
= NULL
;
129 /* We use sg_set_buf for coherent allocs, which assumes low memory */
130 BUG_ON(coherent
&& (gfp_mask
& __GFP_HIGHMEM
));
132 icm
= kmalloc(sizeof *icm
, gfp_mask
& ~(__GFP_HIGHMEM
| __GFP_NOWARN
));
137 INIT_LIST_HEAD(&icm
->chunk_list
);
139 cur_order
= get_order(MLX4_ICM_ALLOC_SIZE
);
143 chunk
= kmalloc(sizeof *chunk
,
144 gfp_mask
& ~(__GFP_HIGHMEM
| __GFP_NOWARN
));
148 sg_init_table(chunk
->mem
, MLX4_ICM_CHUNK_LEN
);
151 list_add_tail(&chunk
->list
, &icm
->chunk_list
);
154 while (1 << cur_order
> npages
)
158 ret
= mlx4_alloc_icm_coherent(&dev
->pdev
->dev
,
159 &chunk
->mem
[chunk
->npages
],
160 cur_order
, gfp_mask
);
162 ret
= mlx4_alloc_icm_pages(&chunk
->mem
[chunk
->npages
],
163 cur_order
, gfp_mask
);
170 else if (chunk
->npages
== MLX4_ICM_CHUNK_LEN
) {
171 chunk
->nsg
= pci_map_sg(dev
->pdev
, chunk
->mem
,
173 PCI_DMA_BIDIRECTIONAL
);
181 npages
-= 1 << cur_order
;
189 if (!coherent
&& chunk
) {
190 chunk
->nsg
= pci_map_sg(dev
->pdev
, chunk
->mem
,
192 PCI_DMA_BIDIRECTIONAL
);
201 mlx4_free_icm(dev
, icm
, coherent
);
205 static int mlx4_MAP_ICM(struct mlx4_dev
*dev
, struct mlx4_icm
*icm
, u64 virt
)
207 return mlx4_map_cmd(dev
, MLX4_CMD_MAP_ICM
, icm
, virt
);
210 int mlx4_UNMAP_ICM(struct mlx4_dev
*dev
, u64 virt
, u32 page_count
)
212 return mlx4_cmd(dev
, virt
, page_count
, 0, MLX4_CMD_UNMAP_ICM
,
213 MLX4_CMD_TIME_CLASS_B
);
216 int mlx4_MAP_ICM_page(struct mlx4_dev
*dev
, u64 dma_addr
, u64 virt
)
218 struct mlx4_cmd_mailbox
*mailbox
;
222 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
224 return PTR_ERR(mailbox
);
225 inbox
= mailbox
->buf
;
227 inbox
[0] = cpu_to_be64(virt
);
228 inbox
[1] = cpu_to_be64(dma_addr
);
230 err
= mlx4_cmd(dev
, mailbox
->dma
, 1, 0, MLX4_CMD_MAP_ICM
,
231 MLX4_CMD_TIME_CLASS_B
);
233 mlx4_free_cmd_mailbox(dev
, mailbox
);
236 mlx4_dbg(dev
, "Mapped page at %llx to %llx for ICM.\n",
237 (unsigned long long) dma_addr
, (unsigned long long) virt
);
242 int mlx4_MAP_ICM_AUX(struct mlx4_dev
*dev
, struct mlx4_icm
*icm
)
244 return mlx4_map_cmd(dev
, MLX4_CMD_MAP_ICM_AUX
, icm
, -1);
247 int mlx4_UNMAP_ICM_AUX(struct mlx4_dev
*dev
)
249 return mlx4_cmd(dev
, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX
, MLX4_CMD_TIME_CLASS_B
);
252 int mlx4_table_get(struct mlx4_dev
*dev
, struct mlx4_icm_table
*table
, int obj
)
254 int i
= (obj
& (table
->num_obj
- 1)) / (MLX4_TABLE_CHUNK_SIZE
/ table
->obj_size
);
257 mutex_lock(&table
->mutex
);
260 ++table
->icm
[i
]->refcount
;
264 table
->icm
[i
] = mlx4_alloc_icm(dev
, MLX4_TABLE_CHUNK_SIZE
>> PAGE_SHIFT
,
265 (table
->lowmem
? GFP_KERNEL
: GFP_HIGHUSER
) |
266 __GFP_NOWARN
, table
->coherent
);
267 if (!table
->icm
[i
]) {
272 if (mlx4_MAP_ICM(dev
, table
->icm
[i
], table
->virt
+
273 (u64
) i
* MLX4_TABLE_CHUNK_SIZE
)) {
274 mlx4_free_icm(dev
, table
->icm
[i
], table
->coherent
);
275 table
->icm
[i
] = NULL
;
280 ++table
->icm
[i
]->refcount
;
283 mutex_unlock(&table
->mutex
);
287 void mlx4_table_put(struct mlx4_dev
*dev
, struct mlx4_icm_table
*table
, int obj
)
291 i
= (obj
& (table
->num_obj
- 1)) / (MLX4_TABLE_CHUNK_SIZE
/ table
->obj_size
);
293 mutex_lock(&table
->mutex
);
295 if (--table
->icm
[i
]->refcount
== 0) {
296 mlx4_UNMAP_ICM(dev
, table
->virt
+ i
* MLX4_TABLE_CHUNK_SIZE
,
297 MLX4_TABLE_CHUNK_SIZE
/ MLX4_ICM_PAGE_SIZE
);
298 mlx4_free_icm(dev
, table
->icm
[i
], table
->coherent
);
299 table
->icm
[i
] = NULL
;
302 mutex_unlock(&table
->mutex
);
305 void *mlx4_table_find(struct mlx4_icm_table
*table
, int obj
, dma_addr_t
*dma_handle
)
307 int idx
, offset
, dma_offset
, i
;
308 struct mlx4_icm_chunk
*chunk
;
309 struct mlx4_icm
*icm
;
310 struct page
*page
= NULL
;
315 mutex_lock(&table
->mutex
);
317 idx
= (obj
& (table
->num_obj
- 1)) * table
->obj_size
;
318 icm
= table
->icm
[idx
/ MLX4_TABLE_CHUNK_SIZE
];
319 dma_offset
= offset
= idx
% MLX4_TABLE_CHUNK_SIZE
;
324 list_for_each_entry(chunk
, &icm
->chunk_list
, list
) {
325 for (i
= 0; i
< chunk
->npages
; ++i
) {
326 if (dma_handle
&& dma_offset
>= 0) {
327 if (sg_dma_len(&chunk
->mem
[i
]) > dma_offset
)
328 *dma_handle
= sg_dma_address(&chunk
->mem
[i
]) +
330 dma_offset
-= sg_dma_len(&chunk
->mem
[i
]);
333 * DMA mapping can merge pages but not split them,
334 * so if we found the page, dma_handle has already
337 if (chunk
->mem
[i
].length
> offset
) {
338 page
= sg_page(&chunk
->mem
[i
]);
341 offset
-= chunk
->mem
[i
].length
;
346 mutex_unlock(&table
->mutex
);
347 return page
? lowmem_page_address(page
) + offset
: NULL
;
350 int mlx4_table_get_range(struct mlx4_dev
*dev
, struct mlx4_icm_table
*table
,
353 int inc
= MLX4_TABLE_CHUNK_SIZE
/ table
->obj_size
;
356 for (i
= start
; i
<= end
; i
+= inc
) {
357 err
= mlx4_table_get(dev
, table
, i
);
367 mlx4_table_put(dev
, table
, i
);
373 void mlx4_table_put_range(struct mlx4_dev
*dev
, struct mlx4_icm_table
*table
,
378 for (i
= start
; i
<= end
; i
+= MLX4_TABLE_CHUNK_SIZE
/ table
->obj_size
)
379 mlx4_table_put(dev
, table
, i
);
382 int mlx4_init_icm_table(struct mlx4_dev
*dev
, struct mlx4_icm_table
*table
,
383 u64 virt
, int obj_size
, int nobj
, int reserved
,
384 int use_lowmem
, int use_coherent
)
391 obj_per_chunk
= MLX4_TABLE_CHUNK_SIZE
/ obj_size
;
392 num_icm
= (nobj
+ obj_per_chunk
- 1) / obj_per_chunk
;
394 table
->icm
= kcalloc(num_icm
, sizeof *table
->icm
, GFP_KERNEL
);
398 table
->num_icm
= num_icm
;
399 table
->num_obj
= nobj
;
400 table
->obj_size
= obj_size
;
401 table
->lowmem
= use_lowmem
;
402 table
->coherent
= use_coherent
;
403 mutex_init(&table
->mutex
);
405 for (i
= 0; i
* MLX4_TABLE_CHUNK_SIZE
< reserved
* obj_size
; ++i
) {
406 chunk_size
= MLX4_TABLE_CHUNK_SIZE
;
407 if ((i
+ 1) * MLX4_TABLE_CHUNK_SIZE
> nobj
* obj_size
)
408 chunk_size
= PAGE_ALIGN(nobj
* obj_size
- i
* MLX4_TABLE_CHUNK_SIZE
);
410 table
->icm
[i
] = mlx4_alloc_icm(dev
, chunk_size
>> PAGE_SHIFT
,
411 (use_lowmem
? GFP_KERNEL
: GFP_HIGHUSER
) |
412 __GFP_NOWARN
, use_coherent
);
415 if (mlx4_MAP_ICM(dev
, table
->icm
[i
], virt
+ i
* MLX4_TABLE_CHUNK_SIZE
)) {
416 mlx4_free_icm(dev
, table
->icm
[i
], use_coherent
);
417 table
->icm
[i
] = NULL
;
422 * Add a reference to this ICM chunk so that it never
423 * gets freed (since it contains reserved firmware objects).
425 ++table
->icm
[i
]->refcount
;
431 for (i
= 0; i
< num_icm
; ++i
)
433 mlx4_UNMAP_ICM(dev
, virt
+ i
* MLX4_TABLE_CHUNK_SIZE
,
434 MLX4_TABLE_CHUNK_SIZE
/ MLX4_ICM_PAGE_SIZE
);
435 mlx4_free_icm(dev
, table
->icm
[i
], use_coherent
);
441 void mlx4_cleanup_icm_table(struct mlx4_dev
*dev
, struct mlx4_icm_table
*table
)
445 for (i
= 0; i
< table
->num_icm
; ++i
)
447 mlx4_UNMAP_ICM(dev
, table
->virt
+ i
* MLX4_TABLE_CHUNK_SIZE
,
448 MLX4_TABLE_CHUNK_SIZE
/ MLX4_ICM_PAGE_SIZE
);
449 mlx4_free_icm(dev
, table
->icm
[i
], table
->coherent
);