2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/init.h>
35 #include <linux/errno.h>
36 #include <linux/export.h>
37 #include <linux/io-mapping.h>
45 MLX4_NUM_RESERVED_UARS
= 8
48 int mlx4_pd_alloc(struct mlx4_dev
*dev
, u32
*pdn
)
50 struct mlx4_priv
*priv
= mlx4_priv(dev
);
52 *pdn
= mlx4_bitmap_alloc(&priv
->pd_bitmap
);
58 EXPORT_SYMBOL_GPL(mlx4_pd_alloc
);
60 void mlx4_pd_free(struct mlx4_dev
*dev
, u32 pdn
)
62 mlx4_bitmap_free(&mlx4_priv(dev
)->pd_bitmap
, pdn
);
64 EXPORT_SYMBOL_GPL(mlx4_pd_free
);
66 int mlx4_xrcd_alloc(struct mlx4_dev
*dev
, u32
*xrcdn
)
68 struct mlx4_priv
*priv
= mlx4_priv(dev
);
70 *xrcdn
= mlx4_bitmap_alloc(&priv
->xrcd_bitmap
);
76 EXPORT_SYMBOL_GPL(mlx4_xrcd_alloc
);
78 void mlx4_xrcd_free(struct mlx4_dev
*dev
, u32 xrcdn
)
80 mlx4_bitmap_free(&mlx4_priv(dev
)->xrcd_bitmap
, xrcdn
);
82 EXPORT_SYMBOL_GPL(mlx4_xrcd_free
);
84 int mlx4_init_pd_table(struct mlx4_dev
*dev
)
86 struct mlx4_priv
*priv
= mlx4_priv(dev
);
88 return mlx4_bitmap_init(&priv
->pd_bitmap
, dev
->caps
.num_pds
,
89 (1 << NOT_MASKED_PD_BITS
) - 1,
90 dev
->caps
.reserved_pds
, 0);
93 void mlx4_cleanup_pd_table(struct mlx4_dev
*dev
)
95 mlx4_bitmap_cleanup(&mlx4_priv(dev
)->pd_bitmap
);
98 int mlx4_init_xrcd_table(struct mlx4_dev
*dev
)
100 struct mlx4_priv
*priv
= mlx4_priv(dev
);
102 return mlx4_bitmap_init(&priv
->xrcd_bitmap
, (1 << 16),
103 (1 << 16) - 1, dev
->caps
.reserved_xrcds
+ 1, 0);
106 void mlx4_cleanup_xrcd_table(struct mlx4_dev
*dev
)
108 mlx4_bitmap_cleanup(&mlx4_priv(dev
)->xrcd_bitmap
);
111 int mlx4_uar_alloc(struct mlx4_dev
*dev
, struct mlx4_uar
*uar
)
115 uar
->index
= mlx4_bitmap_alloc(&mlx4_priv(dev
)->uar_table
.bitmap
);
116 if (uar
->index
== -1)
119 if (mlx4_is_slave(dev
))
120 offset
= uar
->index
% ((int) pci_resource_len(dev
->pdev
, 2) /
121 dev
->caps
.uar_page_size
);
124 uar
->pfn
= (pci_resource_start(dev
->pdev
, 2) >> PAGE_SHIFT
) + offset
;
128 EXPORT_SYMBOL_GPL(mlx4_uar_alloc
);
130 void mlx4_uar_free(struct mlx4_dev
*dev
, struct mlx4_uar
*uar
)
132 mlx4_bitmap_free(&mlx4_priv(dev
)->uar_table
.bitmap
, uar
->index
);
134 EXPORT_SYMBOL_GPL(mlx4_uar_free
);
136 int mlx4_bf_alloc(struct mlx4_dev
*dev
, struct mlx4_bf
*bf
)
138 struct mlx4_priv
*priv
= mlx4_priv(dev
);
139 struct mlx4_uar
*uar
;
143 if (!priv
->bf_mapping
)
146 mutex_lock(&priv
->bf_mutex
);
147 if (!list_empty(&priv
->bf_list
))
148 uar
= list_entry(priv
->bf_list
.next
, struct mlx4_uar
, bf_list
);
150 if (mlx4_bitmap_avail(&priv
->uar_table
.bitmap
) < MLX4_NUM_RESERVED_UARS
) {
154 uar
= kmalloc(sizeof *uar
, GFP_KERNEL
);
159 err
= mlx4_uar_alloc(dev
, uar
);
163 uar
->map
= ioremap(uar
->pfn
<< PAGE_SHIFT
, PAGE_SIZE
);
169 uar
->bf_map
= io_mapping_map_wc(priv
->bf_mapping
, uar
->index
<< PAGE_SHIFT
);
174 uar
->free_bf_bmap
= 0;
175 list_add(&uar
->bf_list
, &priv
->bf_list
);
179 idx
= ffz(uar
->free_bf_bmap
);
180 uar
->free_bf_bmap
|= 1 << idx
;
183 bf
->buf_size
= dev
->caps
.bf_reg_size
/ 2;
184 bf
->reg
= uar
->bf_map
+ idx
* dev
->caps
.bf_reg_size
;
185 if (uar
->free_bf_bmap
== (1 << dev
->caps
.bf_regs_per_page
) - 1)
186 list_del_init(&uar
->bf_list
);
195 mlx4_uar_free(dev
, uar
);
201 mutex_unlock(&priv
->bf_mutex
);
204 EXPORT_SYMBOL_GPL(mlx4_bf_alloc
);
206 void mlx4_bf_free(struct mlx4_dev
*dev
, struct mlx4_bf
*bf
)
208 struct mlx4_priv
*priv
= mlx4_priv(dev
);
211 if (!bf
->uar
|| !bf
->uar
->bf_map
)
214 mutex_lock(&priv
->bf_mutex
);
215 idx
= (bf
->reg
- bf
->uar
->bf_map
) / dev
->caps
.bf_reg_size
;
216 bf
->uar
->free_bf_bmap
&= ~(1 << idx
);
217 if (!bf
->uar
->free_bf_bmap
) {
218 if (!list_empty(&bf
->uar
->bf_list
))
219 list_del(&bf
->uar
->bf_list
);
221 io_mapping_unmap(bf
->uar
->bf_map
);
222 iounmap(bf
->uar
->map
);
223 mlx4_uar_free(dev
, bf
->uar
);
225 } else if (list_empty(&bf
->uar
->bf_list
))
226 list_add(&bf
->uar
->bf_list
, &priv
->bf_list
);
228 mutex_unlock(&priv
->bf_mutex
);
230 EXPORT_SYMBOL_GPL(mlx4_bf_free
);
232 int mlx4_init_uar_table(struct mlx4_dev
*dev
)
234 if (dev
->caps
.num_uars
<= 128) {
235 mlx4_err(dev
, "Only %d UAR pages (need more than 128)\n",
237 mlx4_err(dev
, "Increase firmware log2_uar_bar_megabytes?\n");
241 return mlx4_bitmap_init(&mlx4_priv(dev
)->uar_table
.bitmap
,
242 dev
->caps
.num_uars
, dev
->caps
.num_uars
- 1,
243 dev
->caps
.reserved_uars
, 0);
246 void mlx4_cleanup_uar_table(struct mlx4_dev
*dev
)
248 mlx4_bitmap_cleanup(&mlx4_priv(dev
)->uar_table
.bitmap
);