2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * userspace support verbs
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Heiko J Schick <schickhj@de.ibm.com>
10 * Copyright (c) 2005 IBM Corporation
12 * All rights reserved.
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
43 #include <asm/current.h>
45 #include "ehca_classes.h"
46 #include "ehca_iverbs.h"
47 #include "ehca_mrmw.h"
48 #include "ehca_tools.h"
51 struct ib_ucontext
*ehca_alloc_ucontext(struct ib_device
*device
,
52 struct ib_udata
*udata
)
54 struct ehca_ucontext
*my_context
;
56 my_context
= kzalloc(sizeof *my_context
, GFP_KERNEL
);
58 ehca_err(device
, "Out of memory device=%p", device
);
59 return ERR_PTR(-ENOMEM
);
62 return &my_context
->ib_ucontext
;
65 int ehca_dealloc_ucontext(struct ib_ucontext
*context
)
67 kfree(container_of(context
, struct ehca_ucontext
, ib_ucontext
));
71 static void ehca_mm_open(struct vm_area_struct
*vma
)
73 u32
*count
= (u32
*)vma
->vm_private_data
;
75 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
76 vma
->vm_start
, vma
->vm_end
);
81 ehca_gen_err("Use count overflow vm_start=%lx vm_end=%lx",
82 vma
->vm_start
, vma
->vm_end
);
83 ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
84 vma
->vm_start
, vma
->vm_end
, *count
);
87 static void ehca_mm_close(struct vm_area_struct
*vma
)
89 u32
*count
= (u32
*)vma
->vm_private_data
;
91 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
92 vma
->vm_start
, vma
->vm_end
);
96 ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
97 vma
->vm_start
, vma
->vm_end
, *count
);
100 static struct vm_operations_struct vm_ops
= {
101 .open
= ehca_mm_open
,
102 .close
= ehca_mm_close
,
105 static int ehca_mmap_fw(struct vm_area_struct
*vma
, struct h_galpas
*galpas
,
111 vsize
= vma
->vm_end
- vma
->vm_start
;
112 if (vsize
!= EHCA_PAGESIZE
) {
113 ehca_gen_err("invalid vsize=%lx", vma
->vm_end
- vma
->vm_start
);
117 physical
= galpas
->user
.fw_handle
;
118 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
119 ehca_gen_dbg("vsize=%lx physical=%lx", vsize
, physical
);
120 /* VM_IO | VM_RESERVED are set by remap_pfn_range() */
121 ret
= remap_pfn_range(vma
, vma
->vm_start
, physical
>> PAGE_SHIFT
,
122 vsize
, vma
->vm_page_prot
);
124 ehca_gen_err("remap_pfn_range() failed ret=%x", ret
);
128 vma
->vm_private_data
= mm_count
;
130 vma
->vm_ops
= &vm_ops
;
135 static int ehca_mmap_queue(struct vm_area_struct
*vma
, struct ipz_queue
*queue
,
142 vma
->vm_flags
|= VM_RESERVED
;
143 start
= vma
->vm_start
;
144 for (ofs
= 0; ofs
< queue
->queue_length
; ofs
+= PAGE_SIZE
) {
145 u64 virt_addr
= (u64
)ipz_qeit_calc(queue
, ofs
);
146 page
= virt_to_page(virt_addr
);
147 ret
= vm_insert_page(vma
, start
, page
);
149 ehca_gen_err("vm_insert_page() failed rc=%x", ret
);
154 vma
->vm_private_data
= mm_count
;
156 vma
->vm_ops
= &vm_ops
;
161 static int ehca_mmap_cq(struct vm_area_struct
*vma
, struct ehca_cq
*cq
,
167 case 1: /* galpa fw handle */
168 ehca_dbg(cq
->ib_cq
.device
, "cq_num=%x fw", cq
->cq_number
);
169 ret
= ehca_mmap_fw(vma
, &cq
->galpas
, &cq
->mm_count_galpa
);
171 ehca_err(cq
->ib_cq
.device
,
172 "ehca_mmap_fw() failed rc=%x cq_num=%x",
178 case 2: /* cq queue_addr */
179 ehca_dbg(cq
->ib_cq
.device
, "cq_num=%x queue", cq
->cq_number
);
180 ret
= ehca_mmap_queue(vma
, &cq
->ipz_queue
, &cq
->mm_count_queue
);
182 ehca_err(cq
->ib_cq
.device
,
183 "ehca_mmap_queue() failed rc=%x cq_num=%x",
190 ehca_err(cq
->ib_cq
.device
, "bad resource type=%x cq_num=%x",
191 rsrc_type
, cq
->cq_number
);
198 static int ehca_mmap_qp(struct vm_area_struct
*vma
, struct ehca_qp
*qp
,
204 case 1: /* galpa fw handle */
205 ehca_dbg(qp
->ib_qp
.device
, "qp_num=%x fw", qp
->ib_qp
.qp_num
);
206 ret
= ehca_mmap_fw(vma
, &qp
->galpas
, &qp
->mm_count_galpa
);
208 ehca_err(qp
->ib_qp
.device
,
209 "remap_pfn_range() failed ret=%x qp_num=%x",
210 ret
, qp
->ib_qp
.qp_num
);
215 case 2: /* qp rqueue_addr */
216 ehca_dbg(qp
->ib_qp
.device
, "qp_num=%x rqueue",
218 ret
= ehca_mmap_queue(vma
, &qp
->ipz_rqueue
, &qp
->mm_count_rqueue
);
220 ehca_err(qp
->ib_qp
.device
,
221 "ehca_mmap_queue(rq) failed rc=%x qp_num=%x",
222 ret
, qp
->ib_qp
.qp_num
);
227 case 3: /* qp squeue_addr */
228 ehca_dbg(qp
->ib_qp
.device
, "qp_num=%x squeue",
230 ret
= ehca_mmap_queue(vma
, &qp
->ipz_squeue
, &qp
->mm_count_squeue
);
232 ehca_err(qp
->ib_qp
.device
,
233 "ehca_mmap_queue(sq) failed rc=%x qp_num=%x",
234 ret
, qp
->ib_qp
.qp_num
);
240 ehca_err(qp
->ib_qp
.device
, "bad resource type=%x qp=num=%x",
241 rsrc_type
, qp
->ib_qp
.qp_num
);
248 int ehca_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
250 u64 fileoffset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
251 u32 idr_handle
= fileoffset
>> 32;
252 u32 q_type
= (fileoffset
>> 28) & 0xF; /* CQ, QP,... */
253 u32 rsrc_type
= (fileoffset
>> 24) & 0xF; /* sq,rq,cmnd_window */
254 u32 cur_pid
= current
->tgid
;
263 spin_lock_irqsave(&ehca_cq_idr_lock
, flags
);
264 cq
= idr_find(&ehca_cq_idr
, idr_handle
);
265 spin_unlock_irqrestore(&ehca_cq_idr_lock
, flags
);
267 /* make sure this mmap really belongs to the authorized user */
271 if (cq
->ownpid
!= cur_pid
) {
272 ehca_err(cq
->ib_cq
.device
,
273 "Invalid caller pid=%x ownpid=%x",
274 cur_pid
, cq
->ownpid
);
278 if (!cq
->ib_cq
.uobject
|| cq
->ib_cq
.uobject
->context
!= context
)
281 ret
= ehca_mmap_cq(vma
, cq
, rsrc_type
);
283 ehca_err(cq
->ib_cq
.device
,
284 "ehca_mmap_cq() failed rc=%x cq_num=%x",
291 spin_lock_irqsave(&ehca_qp_idr_lock
, flags
);
292 qp
= idr_find(&ehca_qp_idr
, idr_handle
);
293 spin_unlock_irqrestore(&ehca_qp_idr_lock
, flags
);
295 /* make sure this mmap really belongs to the authorized user */
299 pd
= container_of(qp
->ib_qp
.pd
, struct ehca_pd
, ib_pd
);
300 if (pd
->ownpid
!= cur_pid
) {
301 ehca_err(qp
->ib_qp
.device
,
302 "Invalid caller pid=%x ownpid=%x",
303 cur_pid
, pd
->ownpid
);
307 if (!qp
->ib_qp
.uobject
|| qp
->ib_qp
.uobject
->context
!= context
)
310 ret
= ehca_mmap_qp(vma
, qp
, rsrc_type
);
312 ehca_err(qp
->ib_qp
.device
,
313 "ehca_mmap_qp() failed rc=%x qp_num=%x",
314 ret
, qp
->ib_qp
.qp_num
);
320 ehca_gen_err("bad queue type %x", q_type
);