2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 static u32
convert_access(int acc
)
37 return (acc
& IB_ACCESS_REMOTE_ATOMIC
? MLX4_PERM_ATOMIC
: 0) |
38 (acc
& IB_ACCESS_REMOTE_WRITE
? MLX4_PERM_REMOTE_WRITE
: 0) |
39 (acc
& IB_ACCESS_REMOTE_READ
? MLX4_PERM_REMOTE_READ
: 0) |
40 (acc
& IB_ACCESS_LOCAL_WRITE
? MLX4_PERM_LOCAL_WRITE
: 0) |
44 struct ib_mr
*mlx4_ib_get_dma_mr(struct ib_pd
*pd
, int acc
)
46 struct mlx4_ib_mr
*mr
;
49 mr
= kmalloc(sizeof *mr
, GFP_KERNEL
);
51 return ERR_PTR(-ENOMEM
);
53 err
= mlx4_mr_alloc(to_mdev(pd
->device
)->dev
, to_mpd(pd
)->pdn
, 0,
54 ~0ull, convert_access(acc
), 0, 0, &mr
->mmr
);
58 err
= mlx4_mr_enable(to_mdev(pd
->device
)->dev
, &mr
->mmr
);
62 mr
->ibmr
.rkey
= mr
->ibmr
.lkey
= mr
->mmr
.key
;
68 mlx4_mr_free(to_mdev(pd
->device
)->dev
, &mr
->mmr
);
76 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev
*dev
, struct mlx4_mtt
*mtt
,
80 struct ib_umem_chunk
*chunk
;
86 pages
= (u64
*) __get_free_page(GFP_KERNEL
);
92 list_for_each_entry(chunk
, &umem
->chunk_list
, list
)
93 for (j
= 0; j
< chunk
->nmap
; ++j
) {
94 len
= sg_dma_len(&chunk
->page_list
[j
]) >> mtt
->page_shift
;
95 for (k
= 0; k
< len
; ++k
) {
96 pages
[i
++] = sg_dma_address(&chunk
->page_list
[j
]) +
99 * Be friendly to WRITE_MTT firmware
100 * command, and pass it chunks of
103 if (i
== PAGE_SIZE
/ sizeof (u64
) - 2) {
104 err
= mlx4_write_mtt(dev
->dev
, mtt
, n
,
115 err
= mlx4_write_mtt(dev
->dev
, mtt
, n
, i
, pages
);
118 free_page((unsigned long) pages
);
122 struct ib_mr
*mlx4_ib_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
123 u64 virt_addr
, int access_flags
,
124 struct ib_udata
*udata
)
126 struct mlx4_ib_dev
*dev
= to_mdev(pd
->device
);
127 struct mlx4_ib_mr
*mr
;
132 mr
= kmalloc(sizeof *mr
, GFP_KERNEL
);
134 return ERR_PTR(-ENOMEM
);
136 mr
->umem
= ib_umem_get(pd
->uobject
->context
, start
, length
, access_flags
);
137 if (IS_ERR(mr
->umem
)) {
138 err
= PTR_ERR(mr
->umem
);
142 n
= ib_umem_page_count(mr
->umem
);
143 shift
= ilog2(mr
->umem
->page_size
);
145 err
= mlx4_mr_alloc(dev
->dev
, to_mpd(pd
)->pdn
, virt_addr
, length
,
146 convert_access(access_flags
), n
, shift
, &mr
->mmr
);
150 err
= mlx4_ib_umem_write_mtt(dev
, &mr
->mmr
.mtt
, mr
->umem
);
154 err
= mlx4_mr_enable(dev
->dev
, &mr
->mmr
);
158 mr
->ibmr
.rkey
= mr
->ibmr
.lkey
= mr
->mmr
.key
;
163 mlx4_mr_free(to_mdev(pd
->device
)->dev
, &mr
->mmr
);
166 ib_umem_release(mr
->umem
);
174 int mlx4_ib_dereg_mr(struct ib_mr
*ibmr
)
176 struct mlx4_ib_mr
*mr
= to_mmr(ibmr
);
178 mlx4_mr_free(to_mdev(ibmr
->device
)->dev
, &mr
->mmr
);
180 ib_umem_release(mr
->umem
);