2 * Copyright (c) 2006, 2007, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 * qib_alloc_lkey - allocate an lkey
38 * @rkt: lkey table in which to allocate the lkey
39 * @mr: memory region that this lkey protects
41 * Returns 1 if successful, otherwise returns 0.
44 int qib_alloc_lkey(struct qib_lkey_table
*rkt
, struct qib_mregion
*mr
)
51 spin_lock_irqsave(&rkt
->lock
, flags
);
53 /* Find the next available LKEY */
57 if (rkt
->table
[r
] == NULL
)
59 r
= (r
+ 1) & (rkt
->max
- 1);
61 spin_unlock_irqrestore(&rkt
->lock
, flags
);
66 rkt
->next
= (r
+ 1) & (rkt
->max
- 1);
68 * Make sure lkey is never zero which is reserved to indicate an
72 mr
->lkey
= (r
<< (32 - ib_qib_lkey_table_size
)) |
73 ((((1 << (24 - ib_qib_lkey_table_size
)) - 1) & rkt
->gen
)
80 spin_unlock_irqrestore(&rkt
->lock
, flags
);
89 * qib_free_lkey - free an lkey
90 * @rkt: table from which to free the lkey
91 * @lkey: lkey id to free
93 int qib_free_lkey(struct qib_ibdev
*dev
, struct qib_mregion
*mr
)
100 spin_lock_irqsave(&dev
->lk_table
.lock
, flags
);
102 if (dev
->dma_mr
&& dev
->dma_mr
== mr
) {
103 ret
= atomic_read(&dev
->dma_mr
->refcount
);
109 r
= lkey
>> (32 - ib_qib_lkey_table_size
);
110 ret
= atomic_read(&dev
->lk_table
.table
[r
]->refcount
);
112 dev
->lk_table
.table
[r
] = NULL
;
114 spin_unlock_irqrestore(&dev
->lk_table
.lock
, flags
);
122 * qib_lkey_ok - check IB SGE for validity and initialize
123 * @rkt: table containing lkey to check SGE against
124 * @isge: outgoing internal SGE
128 * Return 1 if valid and successful, otherwise returns 0.
130 * Check the IB SGE for validity and initialize our internal version
133 int qib_lkey_ok(struct qib_lkey_table
*rkt
, struct qib_pd
*pd
,
134 struct qib_sge
*isge
, struct ib_sge
*sge
, int acc
)
136 struct qib_mregion
*mr
;
143 * We use LKEY == zero for kernel virtual addresses
144 * (see qib_get_dma_mr and qib_dma.c).
146 spin_lock_irqsave(&rkt
->lock
, flags
);
147 if (sge
->lkey
== 0) {
148 struct qib_ibdev
*dev
= to_idev(pd
->ibpd
.device
);
154 atomic_inc(&dev
->dma_mr
->refcount
);
155 isge
->mr
= dev
->dma_mr
;
156 isge
->vaddr
= (void *) sge
->addr
;
157 isge
->length
= sge
->length
;
158 isge
->sge_length
= sge
->length
;
163 mr
= rkt
->table
[(sge
->lkey
>> (32 - ib_qib_lkey_table_size
))];
164 if (unlikely(mr
== NULL
|| mr
->lkey
!= sge
->lkey
||
165 mr
->pd
!= &pd
->ibpd
))
168 off
= sge
->addr
- mr
->user_base
;
169 if (unlikely(sge
->addr
< mr
->user_base
||
170 off
+ sge
->length
> mr
->length
||
171 (mr
->access_flags
& acc
) != acc
))
177 while (off
>= mr
->map
[m
]->segs
[n
].length
) {
178 off
-= mr
->map
[m
]->segs
[n
].length
;
180 if (n
>= QIB_SEGSZ
) {
185 atomic_inc(&mr
->refcount
);
187 isge
->vaddr
= mr
->map
[m
]->segs
[n
].vaddr
+ off
;
188 isge
->length
= mr
->map
[m
]->segs
[n
].length
- off
;
189 isge
->sge_length
= sge
->length
;
195 spin_unlock_irqrestore(&rkt
->lock
, flags
);
200 * qib_rkey_ok - check the IB virtual address, length, and RKEY
201 * @dev: infiniband device
203 * @len: length of data
204 * @vaddr: virtual address to place data
205 * @rkey: rkey to check
208 * Return 1 if successful, otherwise 0.
210 int qib_rkey_ok(struct qib_qp
*qp
, struct qib_sge
*sge
,
211 u32 len
, u64 vaddr
, u32 rkey
, int acc
)
213 struct qib_lkey_table
*rkt
= &to_idev(qp
->ibqp
.device
)->lk_table
;
214 struct qib_mregion
*mr
;
221 * We use RKEY == zero for kernel virtual addresses
222 * (see qib_get_dma_mr and qib_dma.c).
224 spin_lock_irqsave(&rkt
->lock
, flags
);
226 struct qib_pd
*pd
= to_ipd(qp
->ibqp
.pd
);
227 struct qib_ibdev
*dev
= to_idev(pd
->ibpd
.device
);
233 atomic_inc(&dev
->dma_mr
->refcount
);
234 sge
->mr
= dev
->dma_mr
;
235 sge
->vaddr
= (void *) vaddr
;
237 sge
->sge_length
= len
;
243 mr
= rkt
->table
[(rkey
>> (32 - ib_qib_lkey_table_size
))];
244 if (unlikely(mr
== NULL
|| mr
->lkey
!= rkey
|| qp
->ibqp
.pd
!= mr
->pd
))
247 off
= vaddr
- mr
->iova
;
248 if (unlikely(vaddr
< mr
->iova
|| off
+ len
> mr
->length
||
249 (mr
->access_flags
& acc
) == 0))
255 while (off
>= mr
->map
[m
]->segs
[n
].length
) {
256 off
-= mr
->map
[m
]->segs
[n
].length
;
258 if (n
>= QIB_SEGSZ
) {
263 atomic_inc(&mr
->refcount
);
265 sge
->vaddr
= mr
->map
[m
]->segs
[n
].vaddr
+ off
;
266 sge
->length
= mr
->map
[m
]->segs
[n
].length
- off
;
267 sge
->sge_length
= len
;
273 spin_unlock_irqrestore(&rkt
->lock
, flags
);
278 * Initialize the memory region specified by the work reqeust.
280 int qib_fast_reg_mr(struct qib_qp
*qp
, struct ib_send_wr
*wr
)
282 struct qib_lkey_table
*rkt
= &to_idev(qp
->ibqp
.device
)->lk_table
;
283 struct qib_pd
*pd
= to_ipd(qp
->ibqp
.pd
);
284 struct qib_mregion
*mr
;
285 u32 rkey
= wr
->wr
.fast_reg
.rkey
;
292 spin_lock_irqsave(&rkt
->lock
, flags
);
293 if (pd
->user
|| rkey
== 0)
296 mr
= rkt
->table
[(rkey
>> (32 - ib_qib_lkey_table_size
))];
297 if (unlikely(mr
== NULL
|| qp
->ibqp
.pd
!= mr
->pd
))
300 if (wr
->wr
.fast_reg
.page_list_len
> mr
->max_segs
)
303 ps
= 1UL << wr
->wr
.fast_reg
.page_shift
;
304 if (wr
->wr
.fast_reg
.length
> ps
* wr
->wr
.fast_reg
.page_list_len
)
307 mr
->user_base
= wr
->wr
.fast_reg
.iova_start
;
308 mr
->iova
= wr
->wr
.fast_reg
.iova_start
;
310 mr
->length
= wr
->wr
.fast_reg
.length
;
311 mr
->access_flags
= wr
->wr
.fast_reg
.access_flags
;
312 page_list
= wr
->wr
.fast_reg
.page_list
->page_list
;
315 for (i
= 0; i
< wr
->wr
.fast_reg
.page_list_len
; i
++) {
316 mr
->map
[m
]->segs
[n
].vaddr
= (void *) page_list
[i
];
317 mr
->map
[m
]->segs
[n
].length
= ps
;
318 if (++n
== QIB_SEGSZ
) {
326 spin_unlock_irqrestore(&rkt
->lock
, flags
);