2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/slab.h>
36 #include <rdma/ib_umem.h>
37 #include <rdma/ib_pack.h>
38 #include <rdma/ib_smi.h>
40 #include "ipath_verbs.h"
42 /* Fast memory region */
46 struct ipath_mregion mr
; /* must be last */
49 static inline struct ipath_fmr
*to_ifmr(struct ib_fmr
*ibfmr
)
51 return container_of(ibfmr
, struct ipath_fmr
, ibfmr
);
55 * ipath_get_dma_mr - get a DMA memory region
56 * @pd: protection domain for this memory region
59 * Returns the memory region on success, otherwise returns an errno.
60 * Note that all DMA addresses should be created via the
61 * struct ib_dma_mapping_ops functions (see ipath_dma.c).
63 struct ib_mr
*ipath_get_dma_mr(struct ib_pd
*pd
, int acc
)
68 mr
= kzalloc(sizeof *mr
, GFP_KERNEL
);
70 ret
= ERR_PTR(-ENOMEM
);
74 mr
->mr
.access_flags
= acc
;
81 static struct ipath_mr
*alloc_mr(int count
,
82 struct ipath_lkey_table
*lk_table
)
87 /* Allocate struct plus pointers to first level page tables. */
88 m
= (count
+ IPATH_SEGSZ
- 1) / IPATH_SEGSZ
;
89 mr
= kmalloc(sizeof *mr
+ m
* sizeof mr
->mr
.map
[0], GFP_KERNEL
);
93 /* Allocate first level page tables. */
95 mr
->mr
.map
[i
] = kmalloc(sizeof *mr
->mr
.map
[0], GFP_KERNEL
);
102 * ib_reg_phys_mr() will initialize mr->ibmr except for
105 if (!ipath_alloc_lkey(lk_table
, &mr
->mr
))
107 mr
->ibmr
.rkey
= mr
->ibmr
.lkey
= mr
->mr
.lkey
;
114 kfree(mr
->mr
.map
[i
]);
124 * ipath_reg_phys_mr - register a physical memory region
125 * @pd: protection domain for this memory region
126 * @buffer_list: pointer to the list of physical buffers to register
127 * @num_phys_buf: the number of physical buffers to register
128 * @iova_start: the starting address passed over IB which maps to this MR
130 * Returns the memory region on success, otherwise returns an errno.
132 struct ib_mr
*ipath_reg_phys_mr(struct ib_pd
*pd
,
133 struct ib_phys_buf
*buffer_list
,
134 int num_phys_buf
, int acc
, u64
*iova_start
)
140 mr
= alloc_mr(num_phys_buf
, &to_idev(pd
->device
)->lk_table
);
142 ret
= ERR_PTR(-ENOMEM
);
147 mr
->mr
.user_base
= *iova_start
;
148 mr
->mr
.iova
= *iova_start
;
151 mr
->mr
.access_flags
= acc
;
152 mr
->mr
.max_segs
= num_phys_buf
;
157 for (i
= 0; i
< num_phys_buf
; i
++) {
158 mr
->mr
.map
[m
]->segs
[n
].vaddr
= (void *) buffer_list
[i
].addr
;
159 mr
->mr
.map
[m
]->segs
[n
].length
= buffer_list
[i
].size
;
160 mr
->mr
.length
+= buffer_list
[i
].size
;
162 if (n
== IPATH_SEGSZ
) {
175 * ipath_reg_user_mr - register a userspace memory region
176 * @pd: protection domain for this memory region
177 * @start: starting userspace address
178 * @length: length of region to register
179 * @virt_addr: virtual address to use (from HCA's point of view)
180 * @mr_access_flags: access flags for this memory region
181 * @udata: unused by the InfiniPath driver
183 * Returns the memory region on success, otherwise returns an errno.
185 struct ib_mr
*ipath_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
186 u64 virt_addr
, int mr_access_flags
,
187 struct ib_udata
*udata
)
190 struct ib_umem
*umem
;
191 struct ib_umem_chunk
*chunk
;
196 ret
= ERR_PTR(-EINVAL
);
200 umem
= ib_umem_get(pd
->uobject
->context
, start
, length
,
203 return (void *) umem
;
206 list_for_each_entry(chunk
, &umem
->chunk_list
, list
)
209 mr
= alloc_mr(n
, &to_idev(pd
->device
)->lk_table
);
211 ret
= ERR_PTR(-ENOMEM
);
212 ib_umem_release(umem
);
217 mr
->mr
.user_base
= start
;
218 mr
->mr
.iova
= virt_addr
;
219 mr
->mr
.length
= length
;
220 mr
->mr
.offset
= umem
->offset
;
221 mr
->mr
.access_flags
= mr_access_flags
;
227 list_for_each_entry(chunk
, &umem
->chunk_list
, list
) {
228 for (i
= 0; i
< chunk
->nents
; i
++) {
231 vaddr
= page_address(sg_page(&chunk
->page_list
[i
]));
233 ret
= ERR_PTR(-EINVAL
);
236 mr
->mr
.map
[m
]->segs
[n
].vaddr
= vaddr
;
237 mr
->mr
.map
[m
]->segs
[n
].length
= umem
->page_size
;
239 if (n
== IPATH_SEGSZ
) {
252 * ipath_dereg_mr - unregister and free a memory region
253 * @ibmr: the memory region to free
255 * Returns 0 on success.
257 * Note that this is called to free MRs created by ipath_get_dma_mr()
258 * or ipath_reg_user_mr().
260 int ipath_dereg_mr(struct ib_mr
*ibmr
)
262 struct ipath_mr
*mr
= to_imr(ibmr
);
265 ipath_free_lkey(&to_idev(ibmr
->device
)->lk_table
, ibmr
->lkey
);
269 kfree(mr
->mr
.map
[i
]);
273 ib_umem_release(mr
->umem
);
280 * ipath_alloc_fmr - allocate a fast memory region
281 * @pd: the protection domain for this memory region
282 * @mr_access_flags: access flags for this memory region
283 * @fmr_attr: fast memory region attributes
285 * Returns the memory region on success, otherwise returns an errno.
287 struct ib_fmr
*ipath_alloc_fmr(struct ib_pd
*pd
, int mr_access_flags
,
288 struct ib_fmr_attr
*fmr_attr
)
290 struct ipath_fmr
*fmr
;
294 /* Allocate struct plus pointers to first level page tables. */
295 m
= (fmr_attr
->max_pages
+ IPATH_SEGSZ
- 1) / IPATH_SEGSZ
;
296 fmr
= kmalloc(sizeof *fmr
+ m
* sizeof fmr
->mr
.map
[0], GFP_KERNEL
);
300 /* Allocate first level page tables. */
302 fmr
->mr
.map
[i
] = kmalloc(sizeof *fmr
->mr
.map
[0],
310 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
313 if (!ipath_alloc_lkey(&to_idev(pd
->device
)->lk_table
, &fmr
->mr
))
315 fmr
->ibfmr
.rkey
= fmr
->ibfmr
.lkey
= fmr
->mr
.lkey
;
317 * Resources are allocated but no valid mapping (RKEY can't be
321 fmr
->mr
.user_base
= 0;
325 fmr
->mr
.access_flags
= mr_access_flags
;
326 fmr
->mr
.max_segs
= fmr_attr
->max_pages
;
327 fmr
->page_shift
= fmr_attr
->page_shift
;
334 kfree(fmr
->mr
.map
[--i
]);
336 ret
= ERR_PTR(-ENOMEM
);
343 * ipath_map_phys_fmr - set up a fast memory region
344 * @ibmfr: the fast memory region to set up
345 * @page_list: the list of pages to associate with the fast memory region
346 * @list_len: the number of pages to associate with the fast memory region
347 * @iova: the virtual address of the start of the fast memory region
349 * This may be called from interrupt context.
352 int ipath_map_phys_fmr(struct ib_fmr
*ibfmr
, u64
* page_list
,
353 int list_len
, u64 iova
)
355 struct ipath_fmr
*fmr
= to_ifmr(ibfmr
);
356 struct ipath_lkey_table
*rkt
;
362 if (list_len
> fmr
->mr
.max_segs
) {
366 rkt
= &to_idev(ibfmr
->device
)->lk_table
;
367 spin_lock_irqsave(&rkt
->lock
, flags
);
368 fmr
->mr
.user_base
= iova
;
370 ps
= 1 << fmr
->page_shift
;
371 fmr
->mr
.length
= list_len
* ps
;
374 ps
= 1 << fmr
->page_shift
;
375 for (i
= 0; i
< list_len
; i
++) {
376 fmr
->mr
.map
[m
]->segs
[n
].vaddr
= (void *) page_list
[i
];
377 fmr
->mr
.map
[m
]->segs
[n
].length
= ps
;
378 if (++n
== IPATH_SEGSZ
) {
383 spin_unlock_irqrestore(&rkt
->lock
, flags
);
391 * ipath_unmap_fmr - unmap fast memory regions
392 * @fmr_list: the list of fast memory regions to unmap
394 * Returns 0 on success.
396 int ipath_unmap_fmr(struct list_head
*fmr_list
)
398 struct ipath_fmr
*fmr
;
399 struct ipath_lkey_table
*rkt
;
402 list_for_each_entry(fmr
, fmr_list
, ibfmr
.list
) {
403 rkt
= &to_idev(fmr
->ibfmr
.device
)->lk_table
;
404 spin_lock_irqsave(&rkt
->lock
, flags
);
405 fmr
->mr
.user_base
= 0;
408 spin_unlock_irqrestore(&rkt
->lock
, flags
);
414 * ipath_dealloc_fmr - deallocate a fast memory region
415 * @ibfmr: the fast memory region to deallocate
417 * Returns 0 on success.
419 int ipath_dealloc_fmr(struct ib_fmr
*ibfmr
)
421 struct ipath_fmr
*fmr
= to_ifmr(ibfmr
);
424 ipath_free_lkey(&to_idev(ibfmr
->device
)->lk_table
, ibfmr
->lkey
);
427 kfree(fmr
->mr
.map
[--i
]);