x86: merge sched_clock handling
[linux/fpc-iii.git] / drivers / net / mlx4 / icm.h
blob6c44edf35847e07c1de518f2bc78d4dbe70947b7
1 /*
2 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
34 #ifndef MLX4_ICM_H
35 #define MLX4_ICM_H
37 #include <linux/list.h>
38 #include <linux/pci.h>
39 #include <linux/mutex.h>
41 #define MLX4_ICM_CHUNK_LEN \
42 ((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \
43 (sizeof (struct scatterlist)))
45 enum {
46 MLX4_ICM_PAGE_SHIFT = 12,
47 MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
50 struct mlx4_icm_chunk {
51 struct list_head list;
52 int npages;
53 int nsg;
54 struct scatterlist mem[MLX4_ICM_CHUNK_LEN];
57 struct mlx4_icm {
58 struct list_head chunk_list;
59 int refcount;
62 struct mlx4_icm_iter {
63 struct mlx4_icm *icm;
64 struct mlx4_icm_chunk *chunk;
65 int page_idx;
68 struct mlx4_dev;
70 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
71 gfp_t gfp_mask, int coherent);
72 void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent);
74 int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
75 void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
76 int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
77 int start, int end);
78 void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
79 int start, int end);
80 int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
81 u64 virt, int obj_size, int nobj, int reserved,
82 int use_lowmem, int use_coherent);
83 void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
84 int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
85 void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
86 void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle);
87 int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
88 int start, int end);
89 void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
90 int start, int end);
92 static inline void mlx4_icm_first(struct mlx4_icm *icm,
93 struct mlx4_icm_iter *iter)
95 iter->icm = icm;
96 iter->chunk = list_empty(&icm->chunk_list) ?
97 NULL : list_entry(icm->chunk_list.next,
98 struct mlx4_icm_chunk, list);
99 iter->page_idx = 0;
102 static inline int mlx4_icm_last(struct mlx4_icm_iter *iter)
104 return !iter->chunk;
107 static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
109 if (++iter->page_idx >= iter->chunk->nsg) {
110 if (iter->chunk->list.next == &iter->icm->chunk_list) {
111 iter->chunk = NULL;
112 return;
115 iter->chunk = list_entry(iter->chunk->list.next,
116 struct mlx4_icm_chunk, list);
117 iter->page_idx = 0;
121 static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
123 return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
126 static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
128 return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
131 int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count);
132 int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt);
133 int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
134 int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
136 #endif /* MLX4_ICM_H */