2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/slab.h>
38 static u32
convert_access(int acc
)
40 return (acc
& IB_ACCESS_REMOTE_ATOMIC
? MLX4_PERM_ATOMIC
: 0) |
41 (acc
& IB_ACCESS_REMOTE_WRITE
? MLX4_PERM_REMOTE_WRITE
: 0) |
42 (acc
& IB_ACCESS_REMOTE_READ
? MLX4_PERM_REMOTE_READ
: 0) |
43 (acc
& IB_ACCESS_LOCAL_WRITE
? MLX4_PERM_LOCAL_WRITE
: 0) |
44 (acc
& IB_ACCESS_MW_BIND
? MLX4_PERM_BIND_MW
: 0) |
48 static enum mlx4_mw_type
to_mlx4_type(enum ib_mw_type type
)
51 case IB_MW_TYPE_1
: return MLX4_MW_TYPE_1
;
52 case IB_MW_TYPE_2
: return MLX4_MW_TYPE_2
;
57 struct ib_mr
*mlx4_ib_get_dma_mr(struct ib_pd
*pd
, int acc
)
59 struct mlx4_ib_mr
*mr
;
62 mr
= kmalloc(sizeof *mr
, GFP_KERNEL
);
64 return ERR_PTR(-ENOMEM
);
66 err
= mlx4_mr_alloc(to_mdev(pd
->device
)->dev
, to_mpd(pd
)->pdn
, 0,
67 ~0ull, convert_access(acc
), 0, 0, &mr
->mmr
);
71 err
= mlx4_mr_enable(to_mdev(pd
->device
)->dev
, &mr
->mmr
);
75 mr
->ibmr
.rkey
= mr
->ibmr
.lkey
= mr
->mmr
.key
;
81 (void) mlx4_mr_free(to_mdev(pd
->device
)->dev
, &mr
->mmr
);
89 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev
*dev
, struct mlx4_mtt
*mtt
,
93 struct ib_umem_chunk
*chunk
;
99 pages
= (u64
*) __get_free_page(GFP_KERNEL
);
105 list_for_each_entry(chunk
, &umem
->chunk_list
, list
)
106 for (j
= 0; j
< chunk
->nmap
; ++j
) {
107 len
= sg_dma_len(&chunk
->page_list
[j
]) >> mtt
->page_shift
;
108 for (k
= 0; k
< len
; ++k
) {
109 pages
[i
++] = sg_dma_address(&chunk
->page_list
[j
]) +
112 * Be friendly to mlx4_write_mtt() and
113 * pass it chunks of appropriate size.
115 if (i
== PAGE_SIZE
/ sizeof (u64
)) {
116 err
= mlx4_write_mtt(dev
->dev
, mtt
, n
,
127 err
= mlx4_write_mtt(dev
->dev
, mtt
, n
, i
, pages
);
130 free_page((unsigned long) pages
);
134 struct ib_mr
*mlx4_ib_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
135 u64 virt_addr
, int access_flags
,
136 struct ib_udata
*udata
)
138 struct mlx4_ib_dev
*dev
= to_mdev(pd
->device
);
139 struct mlx4_ib_mr
*mr
;
144 mr
= kmalloc(sizeof *mr
, GFP_KERNEL
);
146 return ERR_PTR(-ENOMEM
);
148 mr
->umem
= ib_umem_get(pd
->uobject
->context
, start
, length
,
150 if (IS_ERR(mr
->umem
)) {
151 err
= PTR_ERR(mr
->umem
);
155 n
= ib_umem_page_count(mr
->umem
);
156 shift
= ilog2(mr
->umem
->page_size
);
158 err
= mlx4_mr_alloc(dev
->dev
, to_mpd(pd
)->pdn
, virt_addr
, length
,
159 convert_access(access_flags
), n
, shift
, &mr
->mmr
);
163 err
= mlx4_ib_umem_write_mtt(dev
, &mr
->mmr
.mtt
, mr
->umem
);
167 err
= mlx4_mr_enable(dev
->dev
, &mr
->mmr
);
171 mr
->ibmr
.rkey
= mr
->ibmr
.lkey
= mr
->mmr
.key
;
176 (void) mlx4_mr_free(to_mdev(pd
->device
)->dev
, &mr
->mmr
);
179 ib_umem_release(mr
->umem
);
187 int mlx4_ib_dereg_mr(struct ib_mr
*ibmr
)
189 struct mlx4_ib_mr
*mr
= to_mmr(ibmr
);
192 ret
= mlx4_mr_free(to_mdev(ibmr
->device
)->dev
, &mr
->mmr
);
196 ib_umem_release(mr
->umem
);
202 struct ib_mw
*mlx4_ib_alloc_mw(struct ib_pd
*pd
, enum ib_mw_type type
)
204 struct mlx4_ib_dev
*dev
= to_mdev(pd
->device
);
205 struct mlx4_ib_mw
*mw
;
208 mw
= kmalloc(sizeof(*mw
), GFP_KERNEL
);
210 return ERR_PTR(-ENOMEM
);
212 err
= mlx4_mw_alloc(dev
->dev
, to_mpd(pd
)->pdn
,
213 to_mlx4_type(type
), &mw
->mmw
);
217 err
= mlx4_mw_enable(dev
->dev
, &mw
->mmw
);
221 mw
->ibmw
.rkey
= mw
->mmw
.key
;
226 mlx4_mw_free(dev
->dev
, &mw
->mmw
);
234 int mlx4_ib_bind_mw(struct ib_qp
*qp
, struct ib_mw
*mw
,
235 struct ib_mw_bind
*mw_bind
)
237 struct ib_send_wr wr
;
238 struct ib_send_wr
*bad_wr
;
241 memset(&wr
, 0, sizeof(wr
));
242 wr
.opcode
= IB_WR_BIND_MW
;
243 wr
.wr_id
= mw_bind
->wr_id
;
244 wr
.send_flags
= mw_bind
->send_flags
;
245 wr
.wr
.bind_mw
.mw
= mw
;
246 wr
.wr
.bind_mw
.bind_info
= mw_bind
->bind_info
;
247 wr
.wr
.bind_mw
.rkey
= ib_inc_rkey(mw
->rkey
);
249 ret
= mlx4_ib_post_send(qp
, &wr
, &bad_wr
);
251 mw
->rkey
= wr
.wr
.bind_mw
.rkey
;
256 int mlx4_ib_dealloc_mw(struct ib_mw
*ibmw
)
258 struct mlx4_ib_mw
*mw
= to_mmw(ibmw
);
260 mlx4_mw_free(to_mdev(ibmw
->device
)->dev
, &mw
->mmw
);
266 struct ib_mr
*mlx4_ib_alloc_fast_reg_mr(struct ib_pd
*pd
,
267 int max_page_list_len
)
269 struct mlx4_ib_dev
*dev
= to_mdev(pd
->device
);
270 struct mlx4_ib_mr
*mr
;
273 mr
= kmalloc(sizeof *mr
, GFP_KERNEL
);
275 return ERR_PTR(-ENOMEM
);
277 err
= mlx4_mr_alloc(dev
->dev
, to_mpd(pd
)->pdn
, 0, 0, 0,
278 max_page_list_len
, 0, &mr
->mmr
);
282 err
= mlx4_mr_enable(dev
->dev
, &mr
->mmr
);
286 mr
->ibmr
.rkey
= mr
->ibmr
.lkey
= mr
->mmr
.key
;
292 (void) mlx4_mr_free(dev
->dev
, &mr
->mmr
);
299 struct ib_fast_reg_page_list
*mlx4_ib_alloc_fast_reg_page_list(struct ib_device
*ibdev
,
302 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
303 struct mlx4_ib_fast_reg_page_list
*mfrpl
;
304 int size
= page_list_len
* sizeof (u64
);
306 if (page_list_len
> MLX4_MAX_FAST_REG_PAGES
)
307 return ERR_PTR(-EINVAL
);
309 mfrpl
= kmalloc(sizeof *mfrpl
, GFP_KERNEL
);
311 return ERR_PTR(-ENOMEM
);
313 mfrpl
->ibfrpl
.page_list
= kmalloc(size
, GFP_KERNEL
);
314 if (!mfrpl
->ibfrpl
.page_list
)
317 mfrpl
->mapped_page_list
= dma_alloc_coherent(&dev
->dev
->pdev
->dev
,
320 if (!mfrpl
->mapped_page_list
)
323 WARN_ON(mfrpl
->map
& 0x3f);
325 return &mfrpl
->ibfrpl
;
328 kfree(mfrpl
->ibfrpl
.page_list
);
330 return ERR_PTR(-ENOMEM
);
333 void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list
*page_list
)
335 struct mlx4_ib_dev
*dev
= to_mdev(page_list
->device
);
336 struct mlx4_ib_fast_reg_page_list
*mfrpl
= to_mfrpl(page_list
);
337 int size
= page_list
->max_page_list_len
* sizeof (u64
);
339 dma_free_coherent(&dev
->dev
->pdev
->dev
, size
, mfrpl
->mapped_page_list
,
341 kfree(mfrpl
->ibfrpl
.page_list
);
345 struct ib_fmr
*mlx4_ib_fmr_alloc(struct ib_pd
*pd
, int acc
,
346 struct ib_fmr_attr
*fmr_attr
)
348 struct mlx4_ib_dev
*dev
= to_mdev(pd
->device
);
349 struct mlx4_ib_fmr
*fmr
;
352 fmr
= kmalloc(sizeof *fmr
, GFP_KERNEL
);
354 return ERR_PTR(-ENOMEM
);
356 err
= mlx4_fmr_alloc(dev
->dev
, to_mpd(pd
)->pdn
, convert_access(acc
),
357 fmr_attr
->max_pages
, fmr_attr
->max_maps
,
358 fmr_attr
->page_shift
, &fmr
->mfmr
);
362 err
= mlx4_fmr_enable(to_mdev(pd
->device
)->dev
, &fmr
->mfmr
);
366 fmr
->ibfmr
.rkey
= fmr
->ibfmr
.lkey
= fmr
->mfmr
.mr
.key
;
371 (void) mlx4_mr_free(to_mdev(pd
->device
)->dev
, &fmr
->mfmr
.mr
);
379 int mlx4_ib_map_phys_fmr(struct ib_fmr
*ibfmr
, u64
*page_list
,
380 int npages
, u64 iova
)
382 struct mlx4_ib_fmr
*ifmr
= to_mfmr(ibfmr
);
383 struct mlx4_ib_dev
*dev
= to_mdev(ifmr
->ibfmr
.device
);
385 return mlx4_map_phys_fmr(dev
->dev
, &ifmr
->mfmr
, page_list
, npages
, iova
,
386 &ifmr
->ibfmr
.lkey
, &ifmr
->ibfmr
.rkey
);
389 int mlx4_ib_unmap_fmr(struct list_head
*fmr_list
)
391 struct ib_fmr
*ibfmr
;
393 struct mlx4_dev
*mdev
= NULL
;
395 list_for_each_entry(ibfmr
, fmr_list
, list
) {
396 if (mdev
&& to_mdev(ibfmr
->device
)->dev
!= mdev
)
398 mdev
= to_mdev(ibfmr
->device
)->dev
;
404 list_for_each_entry(ibfmr
, fmr_list
, list
) {
405 struct mlx4_ib_fmr
*ifmr
= to_mfmr(ibfmr
);
407 mlx4_fmr_unmap(mdev
, &ifmr
->mfmr
, &ifmr
->ibfmr
.lkey
, &ifmr
->ibfmr
.rkey
);
411 * Make sure all MPT status updates are visible before issuing
412 * SYNC_TPT firmware command.
416 err
= mlx4_SYNC_TPT(mdev
);
418 pr_warn("SYNC_TPT error %d when "
419 "unmapping FMRs\n", err
);
424 int mlx4_ib_fmr_dealloc(struct ib_fmr
*ibfmr
)
426 struct mlx4_ib_fmr
*ifmr
= to_mfmr(ibfmr
);
427 struct mlx4_ib_dev
*dev
= to_mdev(ibfmr
->device
);
430 err
= mlx4_fmr_free(dev
->dev
, &ifmr
->mfmr
);