2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/io-mapping.h>
36 #include <linux/mlx5/driver.h>
37 #include <linux/mlx5/cmd.h>
38 #include "mlx5_core.h"
42 NUM_LOW_LAT_UUARS
= 4,
46 struct mlx5_alloc_uar_mbox_in
{
47 struct mlx5_inbox_hdr hdr
;
51 struct mlx5_alloc_uar_mbox_out
{
52 struct mlx5_outbox_hdr hdr
;
57 struct mlx5_free_uar_mbox_in
{
58 struct mlx5_inbox_hdr hdr
;
63 struct mlx5_free_uar_mbox_out
{
64 struct mlx5_outbox_hdr hdr
;
68 int mlx5_cmd_alloc_uar(struct mlx5_core_dev
*dev
, u32
*uarn
)
70 struct mlx5_alloc_uar_mbox_in in
;
71 struct mlx5_alloc_uar_mbox_out out
;
74 memset(&in
, 0, sizeof(in
));
75 memset(&out
, 0, sizeof(out
));
76 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_ALLOC_UAR
);
77 err
= mlx5_cmd_exec(dev
, &in
, sizeof(in
), &out
, sizeof(out
));
82 err
= mlx5_cmd_status_to_err(&out
.hdr
);
86 *uarn
= be32_to_cpu(out
.uarn
) & 0xffffff;
91 EXPORT_SYMBOL(mlx5_cmd_alloc_uar
);
93 int mlx5_cmd_free_uar(struct mlx5_core_dev
*dev
, u32 uarn
)
95 struct mlx5_free_uar_mbox_in in
;
96 struct mlx5_free_uar_mbox_out out
;
99 memset(&in
, 0, sizeof(in
));
100 memset(&out
, 0, sizeof(out
));
101 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_DEALLOC_UAR
);
102 in
.uarn
= cpu_to_be32(uarn
);
103 err
= mlx5_cmd_exec(dev
, &in
, sizeof(in
), &out
, sizeof(out
));
108 err
= mlx5_cmd_status_to_err(&out
.hdr
);
113 EXPORT_SYMBOL(mlx5_cmd_free_uar
);
115 static int need_uuar_lock(int uuarn
)
117 int tot_uuars
= NUM_DRIVER_UARS
* MLX5_BF_REGS_PER_PAGE
;
119 if (uuarn
== 0 || tot_uuars
- NUM_LOW_LAT_UUARS
)
125 int mlx5_alloc_uuars(struct mlx5_core_dev
*dev
, struct mlx5_uuar_info
*uuari
)
127 int tot_uuars
= NUM_DRIVER_UARS
* MLX5_BF_REGS_PER_PAGE
;
133 uuari
->num_uars
= NUM_DRIVER_UARS
;
134 uuari
->num_low_latency_uuars
= NUM_LOW_LAT_UUARS
;
136 mutex_init(&uuari
->lock
);
137 uuari
->uars
= kcalloc(uuari
->num_uars
, sizeof(*uuari
->uars
), GFP_KERNEL
);
141 uuari
->bfs
= kcalloc(tot_uuars
, sizeof(*uuari
->bfs
), GFP_KERNEL
);
147 uuari
->bitmap
= kcalloc(BITS_TO_LONGS(tot_uuars
), sizeof(*uuari
->bitmap
),
149 if (!uuari
->bitmap
) {
154 uuari
->count
= kcalloc(tot_uuars
, sizeof(*uuari
->count
), GFP_KERNEL
);
160 for (i
= 0; i
< uuari
->num_uars
; i
++) {
161 err
= mlx5_cmd_alloc_uar(dev
, &uuari
->uars
[i
].index
);
165 addr
= dev
->iseg_base
+ ((phys_addr_t
)(uuari
->uars
[i
].index
) << PAGE_SHIFT
);
166 uuari
->uars
[i
].map
= ioremap(addr
, PAGE_SIZE
);
167 if (!uuari
->uars
[i
].map
) {
168 mlx5_cmd_free_uar(dev
, uuari
->uars
[i
].index
);
172 mlx5_core_dbg(dev
, "allocated uar index 0x%x, mmaped at %p\n",
173 uuari
->uars
[i
].index
, uuari
->uars
[i
].map
);
176 for (i
= 0; i
< tot_uuars
; i
++) {
179 bf
->buf_size
= (1 << MLX5_CAP_GEN(dev
, log_bf_reg_size
)) / 2;
180 bf
->uar
= &uuari
->uars
[i
/ MLX5_BF_REGS_PER_PAGE
];
181 bf
->regreg
= uuari
->uars
[i
/ MLX5_BF_REGS_PER_PAGE
].map
;
182 bf
->reg
= NULL
; /* Add WC support */
183 bf
->offset
= (i
% MLX5_BF_REGS_PER_PAGE
) *
184 (1 << MLX5_CAP_GEN(dev
, log_bf_reg_size
)) +
186 bf
->need_lock
= need_uuar_lock(i
);
187 spin_lock_init(&bf
->lock
);
188 spin_lock_init(&bf
->lock32
);
195 for (i
--; i
>= 0; i
--) {
196 iounmap(uuari
->uars
[i
].map
);
197 mlx5_cmd_free_uar(dev
, uuari
->uars
[i
].index
);
202 kfree(uuari
->bitmap
);
212 int mlx5_free_uuars(struct mlx5_core_dev
*dev
, struct mlx5_uuar_info
*uuari
)
214 int i
= uuari
->num_uars
;
216 for (i
--; i
>= 0; i
--) {
217 iounmap(uuari
->uars
[i
].map
);
218 mlx5_cmd_free_uar(dev
, uuari
->uars
[i
].index
);
222 kfree(uuari
->bitmap
);
229 int mlx5_alloc_map_uar(struct mlx5_core_dev
*mdev
, struct mlx5_uar
*uar
)
232 phys_addr_t uar_bar_start
;
235 err
= mlx5_cmd_alloc_uar(mdev
, &uar
->index
);
237 mlx5_core_warn(mdev
, "mlx5_cmd_alloc_uar() failed, %d\n", err
);
241 uar_bar_start
= pci_resource_start(mdev
->pdev
, 0);
242 pfn
= (uar_bar_start
>> PAGE_SHIFT
) + uar
->index
;
243 uar
->map
= ioremap(pfn
<< PAGE_SHIFT
, PAGE_SIZE
);
245 mlx5_core_warn(mdev
, "ioremap() failed, %d\n", err
);
250 if (mdev
->priv
.bf_mapping
)
251 uar
->bf_map
= io_mapping_map_wc(mdev
->priv
.bf_mapping
,
252 uar
->index
<< PAGE_SHIFT
);
257 mlx5_cmd_free_uar(mdev
, uar
->index
);
261 EXPORT_SYMBOL(mlx5_alloc_map_uar
);
263 void mlx5_unmap_free_uar(struct mlx5_core_dev
*mdev
, struct mlx5_uar
*uar
)
265 io_mapping_unmap(uar
->bf_map
);
267 mlx5_cmd_free_uar(mdev
, uar
->index
);
269 EXPORT_SYMBOL(mlx5_unmap_free_uar
);