2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * userspace support verbs
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Heiko J Schick <schickhj@de.ibm.com>
10 * Copyright (c) 2005 IBM Corporation
12 * All rights reserved.
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
43 #include <linux/slab.h>
45 #include "ehca_classes.h"
46 #include "ehca_iverbs.h"
47 #include "ehca_mrmw.h"
48 #include "ehca_tools.h"
51 struct ib_ucontext
*ehca_alloc_ucontext(struct ib_device
*device
,
52 struct ib_udata
*udata
)
54 struct ehca_ucontext
*my_context
;
56 my_context
= kzalloc(sizeof *my_context
, GFP_KERNEL
);
58 ehca_err(device
, "Out of memory device=%p", device
);
59 return ERR_PTR(-ENOMEM
);
62 return &my_context
->ib_ucontext
;
65 int ehca_dealloc_ucontext(struct ib_ucontext
*context
)
67 kfree(container_of(context
, struct ehca_ucontext
, ib_ucontext
));
71 static void ehca_mm_open(struct vm_area_struct
*vma
)
73 u32
*count
= (u32
*)vma
->vm_private_data
;
75 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
76 vma
->vm_start
, vma
->vm_end
);
81 ehca_gen_err("Use count overflow vm_start=%lx vm_end=%lx",
82 vma
->vm_start
, vma
->vm_end
);
83 ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
84 vma
->vm_start
, vma
->vm_end
, *count
);
87 static void ehca_mm_close(struct vm_area_struct
*vma
)
89 u32
*count
= (u32
*)vma
->vm_private_data
;
91 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
92 vma
->vm_start
, vma
->vm_end
);
96 ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
97 vma
->vm_start
, vma
->vm_end
, *count
);
100 static const struct vm_operations_struct vm_ops
= {
101 .open
= ehca_mm_open
,
102 .close
= ehca_mm_close
,
105 static int ehca_mmap_fw(struct vm_area_struct
*vma
, struct h_galpas
*galpas
,
111 vsize
= vma
->vm_end
- vma
->vm_start
;
112 if (vsize
< EHCA_PAGESIZE
) {
113 ehca_gen_err("invalid vsize=%lx", vma
->vm_end
- vma
->vm_start
);
117 physical
= galpas
->user
.fw_handle
;
118 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
119 ehca_gen_dbg("vsize=%llx physical=%llx", vsize
, physical
);
120 /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
121 ret
= remap_4k_pfn(vma
, vma
->vm_start
, physical
>> EHCA_PAGESHIFT
,
124 ehca_gen_err("remap_pfn_range() failed ret=%i", ret
);
128 vma
->vm_private_data
= mm_count
;
130 vma
->vm_ops
= &vm_ops
;
135 static int ehca_mmap_queue(struct vm_area_struct
*vma
, struct ipz_queue
*queue
,
142 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
143 start
= vma
->vm_start
;
144 for (ofs
= 0; ofs
< queue
->queue_length
; ofs
+= PAGE_SIZE
) {
145 u64 virt_addr
= (u64
)ipz_qeit_calc(queue
, ofs
);
146 page
= virt_to_page(virt_addr
);
147 ret
= vm_insert_page(vma
, start
, page
);
149 ehca_gen_err("vm_insert_page() failed rc=%i", ret
);
154 vma
->vm_private_data
= mm_count
;
156 vma
->vm_ops
= &vm_ops
;
161 static int ehca_mmap_cq(struct vm_area_struct
*vma
, struct ehca_cq
*cq
,
167 case 0: /* galpa fw handle */
168 ehca_dbg(cq
->ib_cq
.device
, "cq_num=%x fw", cq
->cq_number
);
169 ret
= ehca_mmap_fw(vma
, &cq
->galpas
, &cq
->mm_count_galpa
);
171 ehca_err(cq
->ib_cq
.device
,
172 "ehca_mmap_fw() failed rc=%i cq_num=%x",
178 case 1: /* cq queue_addr */
179 ehca_dbg(cq
->ib_cq
.device
, "cq_num=%x queue", cq
->cq_number
);
180 ret
= ehca_mmap_queue(vma
, &cq
->ipz_queue
, &cq
->mm_count_queue
);
182 ehca_err(cq
->ib_cq
.device
,
183 "ehca_mmap_queue() failed rc=%i cq_num=%x",
190 ehca_err(cq
->ib_cq
.device
, "bad resource type=%x cq_num=%x",
191 rsrc_type
, cq
->cq_number
);
198 static int ehca_mmap_qp(struct vm_area_struct
*vma
, struct ehca_qp
*qp
,
204 case 0: /* galpa fw handle */
205 ehca_dbg(qp
->ib_qp
.device
, "qp_num=%x fw", qp
->ib_qp
.qp_num
);
206 ret
= ehca_mmap_fw(vma
, &qp
->galpas
, &qp
->mm_count_galpa
);
208 ehca_err(qp
->ib_qp
.device
,
209 "remap_pfn_range() failed ret=%i qp_num=%x",
210 ret
, qp
->ib_qp
.qp_num
);
215 case 1: /* qp rqueue_addr */
216 ehca_dbg(qp
->ib_qp
.device
, "qp_num=%x rq", qp
->ib_qp
.qp_num
);
217 ret
= ehca_mmap_queue(vma
, &qp
->ipz_rqueue
,
218 &qp
->mm_count_rqueue
);
220 ehca_err(qp
->ib_qp
.device
,
221 "ehca_mmap_queue(rq) failed rc=%i qp_num=%x",
222 ret
, qp
->ib_qp
.qp_num
);
227 case 2: /* qp squeue_addr */
228 ehca_dbg(qp
->ib_qp
.device
, "qp_num=%x sq", qp
->ib_qp
.qp_num
);
229 ret
= ehca_mmap_queue(vma
, &qp
->ipz_squeue
,
230 &qp
->mm_count_squeue
);
232 ehca_err(qp
->ib_qp
.device
,
233 "ehca_mmap_queue(sq) failed rc=%i qp_num=%x",
234 ret
, qp
->ib_qp
.qp_num
);
240 ehca_err(qp
->ib_qp
.device
, "bad resource type=%x qp=num=%x",
241 rsrc_type
, qp
->ib_qp
.qp_num
);
248 int ehca_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
250 u64 fileoffset
= vma
->vm_pgoff
;
251 u32 idr_handle
= fileoffset
& 0x1FFFFFF;
252 u32 q_type
= (fileoffset
>> 27) & 0x1; /* CQ, QP,... */
253 u32 rsrc_type
= (fileoffset
>> 25) & 0x3; /* sq,rq,cmnd_window */
257 struct ib_uobject
*uobject
;
261 read_lock(&ehca_cq_idr_lock
);
262 cq
= idr_find(&ehca_cq_idr
, idr_handle
);
263 read_unlock(&ehca_cq_idr_lock
);
265 /* make sure this mmap really belongs to the authorized user */
269 if (!cq
->ib_cq
.uobject
|| cq
->ib_cq
.uobject
->context
!= context
)
272 ret
= ehca_mmap_cq(vma
, cq
, rsrc_type
);
274 ehca_err(cq
->ib_cq
.device
,
275 "ehca_mmap_cq() failed rc=%i cq_num=%x",
282 read_lock(&ehca_qp_idr_lock
);
283 qp
= idr_find(&ehca_qp_idr
, idr_handle
);
284 read_unlock(&ehca_qp_idr_lock
);
286 /* make sure this mmap really belongs to the authorized user */
290 uobject
= IS_SRQ(qp
) ? qp
->ib_srq
.uobject
: qp
->ib_qp
.uobject
;
291 if (!uobject
|| uobject
->context
!= context
)
294 ret
= ehca_mmap_qp(vma
, qp
, rsrc_type
);
296 ehca_err(qp
->ib_qp
.device
,
297 "ehca_mmap_qp() failed rc=%i qp_num=%x",
298 ret
, qp
->ib_qp
.qp_num
);
304 ehca_gen_err("bad queue type %x", q_type
);