2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * Struct definition for eHCA internal structures
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
10 * Copyright (c) 2005 IBM Corporation
12 * All rights reserved.
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
43 #ifndef __EHCA_CLASSES_H__
44 #define __EHCA_CLASSES_H__
55 #include <linux/wait.h>
56 #include <linux/mutex.h>
58 #include <rdma/ib_verbs.h>
59 #include <rdma/ib_user_verbs.h>
62 #include "ehca_classes_pSeries.h"
64 #include "ipz_pt_fn.h"
68 #define EHCA_EQE_CACHE_SIZE 20
70 struct ehca_eqe_cache_entry
{
77 struct ipz_queue ipz_queue
;
78 struct ipz_eq_handle ipz_eq_handle
;
79 struct work_struct work
;
80 struct h_galpas galpas
;
84 struct tasklet_struct interrupt_task
;
86 spinlock_t irq_spinlock
;
87 struct ehca_eqe_cache_entry eqe_cache
[EHCA_EQE_CACHE_SIZE
];
90 struct ehca_sma_attr
{
91 u16 lid
, lmc
, sm_sl
, sm_lid
;
92 u16 pkey_tbl_len
, pkeys
[16];
96 struct ib_cq
*ibcq_aqp1
;
97 struct ib_qp
*ibqp_aqp1
;
99 enum ib_port_state port_state
;
100 struct ehca_sma_attr saved_attr
;
103 #define HCA_CAP_MR_PGSIZE_4K 0x80000000
104 #define HCA_CAP_MR_PGSIZE_64K 0x40000000
105 #define HCA_CAP_MR_PGSIZE_1M 0x20000000
106 #define HCA_CAP_MR_PGSIZE_16M 0x10000000
109 struct ib_device ib_device
;
110 struct ibmebus_dev
*ibmebus_dev
;
113 struct list_head shca_list
;
114 struct ipz_adapter_handle ipz_hca_handle
;
115 struct ehca_sport sport
[2];
118 struct ehca_mr
*maxmr
;
120 struct h_galpas galpas
;
121 struct mutex modify_mutex
;
123 /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
124 u32 hca_cap_mr_pgsize
;
132 /* small queue mgmt */
134 struct list_head free
[2];
135 struct list_head full
[2];
138 enum ehca_ext_qp_type
{
148 struct ib_srq ib_srq
;
151 enum ehca_ext_qp_type ext_type
;
152 struct ipz_queue ipz_squeue
;
153 struct ipz_queue ipz_rqueue
;
154 struct h_galpas galpas
;
158 spinlock_t spinlock_s
;
159 spinlock_t spinlock_r
;
160 u32 sq_max_inline_data_size
;
161 struct ipz_qp_handle ipz_qp_handle
;
163 struct ib_qp_init_attr init_attr
;
164 struct ehca_cq
*send_cq
;
165 struct ehca_cq
*recv_cq
;
166 unsigned int sqerr_purgeflag
;
167 struct hlist_node list_entries
;
168 /* mmap counter for resources mapped into user space */
174 #define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
175 #define HAS_SQ(qp) (qp->ext_type != EQPT_SRQ)
176 #define HAS_RQ(qp) (qp->ext_type != EQPT_SRQBASE)
178 /* must be power of 2 */
179 #define QP_HASHTAB_LEN 8
183 struct ipz_queue ipz_queue
;
184 struct h_galpas galpas
;
189 struct ipz_cq_handle ipz_cq_handle
;
192 struct hlist_head qp_hashtab
[QP_HASHTAB_LEN
];
193 struct list_head entry
;
194 u32 nr_callbacks
; /* #events assigned to cpu by scaling code */
195 atomic_t nr_events
; /* #events seen */
196 wait_queue_head_t wait_completion
;
197 spinlock_t task_lock
;
199 /* mmap counter for resources mapped into user space */
205 EHCA_MR_FLAG_FMR
= 0x80000000, /* FMR, created with ehca_alloc_fmr */
206 EHCA_MR_FLAG_MAXMR
= 0x40000000, /* max-MR */
211 struct ib_mr ib_mr
; /* must always be first in ehca_mr */
212 struct ib_fmr ib_fmr
; /* must always be first in ehca_mr */
214 struct ib_umem
*umem
;
217 enum ehca_mr_flag flags
;
218 u32 num_kpages
; /* number of kernel pages */
219 u32 num_hwpages
; /* number of hw pages to form MR */
220 u64 hwpage_size
; /* hw page size used for this MR */
221 int acl
; /* ACL (stored here for usage in reregister) */
222 u64
*start
; /* virtual start address (stored here for */
223 /* usage in reregister) */
224 u64 size
; /* size (stored here for usage in reregister) */
225 u32 fmr_page_size
; /* page size for FMR */
226 u32 fmr_max_pages
; /* max pages for FMR */
227 u32 fmr_max_maps
; /* max outstanding maps for FMR */
228 u32 fmr_map_cnt
; /* map counter for FMR */
229 /* fw specific data */
230 struct ipz_mrmw_handle ipz_mr_handle
; /* MR handle for h-calls */
231 struct h_galpas galpas
;
235 struct ib_mw ib_mw
; /* gen2 mw, must always be first in ehca_mw */
238 u8 never_bound
; /* indication MW was never bound */
239 struct ipz_mrmw_handle ipz_mw_handle
; /* MW handle for h-calls */
240 struct h_galpas galpas
;
243 enum ehca_mr_pgi_type
{
244 EHCA_MR_PGI_PHYS
= 1, /* type of ehca_reg_phys_mr,
245 * ehca_rereg_phys_mr,
246 * ehca_reg_internal_maxmr */
247 EHCA_MR_PGI_USER
= 2, /* type of ehca_reg_user_mr */
248 EHCA_MR_PGI_FMR
= 3 /* type of ehca_map_phys_fmr */
251 struct ehca_mr_pginfo
{
252 enum ehca_mr_pgi_type type
;
255 u64 hwpage_size
; /* hw page size used for this MR */
256 u64 num_hwpages
; /* number of hw pages */
257 u64 hwpage_cnt
; /* counter for hw pages */
258 u64 next_hwpage
; /* next hw page in buffer/chunk/listelem */
261 struct { /* type EHCA_MR_PGI_PHYS section */
263 struct ib_phys_buf
*phys_buf_array
;
266 struct { /* type EHCA_MR_PGI_USER section */
267 struct ib_umem
*region
;
268 struct ib_umem_chunk
*next_chunk
;
271 struct { /* type EHCA_MR_PGI_FMR section */
279 /* output parameters for MR/FMR hipz calls */
280 struct ehca_mr_hipzout_parms
{
281 struct ipz_mrmw_handle handle
;
289 /* output parameters for MW hipz calls */
290 struct ehca_mw_hipzout_parms
{
291 struct ipz_mrmw_handle handle
;
297 struct ehca_ud_av av
;
300 struct ehca_ucontext
{
301 struct ib_ucontext ib_ucontext
;
304 int ehca_init_pd_cache(void);
305 void ehca_cleanup_pd_cache(void);
306 int ehca_init_cq_cache(void);
307 void ehca_cleanup_cq_cache(void);
308 int ehca_init_qp_cache(void);
309 void ehca_cleanup_qp_cache(void);
310 int ehca_init_av_cache(void);
311 void ehca_cleanup_av_cache(void);
312 int ehca_init_mrmw_cache(void);
313 void ehca_cleanup_mrmw_cache(void);
314 int ehca_init_small_qp_cache(void);
315 void ehca_cleanup_small_qp_cache(void);
317 extern rwlock_t ehca_qp_idr_lock
;
318 extern rwlock_t ehca_cq_idr_lock
;
319 extern struct idr ehca_qp_idr
;
320 extern struct idr ehca_cq_idr
;
322 extern int ehca_static_rate
;
323 extern int ehca_port_act_time
;
324 extern int ehca_use_hp_mr
;
325 extern int ehca_scaling_code
;
326 extern int ehca_mr_largepage
;
328 struct ipzu_queue_resp
{
329 u32 qe_size
; /* queue entry size */
331 u32 queue_length
; /* queue length allocated in bytes */
334 u32 offset
; /* save offset within a page for small_qp */
337 struct ehca_create_cq_resp
{
340 struct ipzu_queue_resp ipz_queue
;
345 struct ehca_create_qp_resp
{
351 /* qp_num assigned by ehca: sqp0/1 may have got different numbers */
355 struct ipzu_queue_resp ipz_squeue
;
356 struct ipzu_queue_resp ipz_rqueue
;
359 struct ehca_alloc_cq_parms
{
361 u32 act_nr_of_entries
;
363 struct ipz_eq_handle eq_handle
;
366 enum ehca_service_type
{
373 enum ehca_ll_comp_flags
{
374 LLQP_SEND_COMP
= 0x20,
375 LLQP_RECV_COMP
= 0x40,
376 LLQP_COMP_MASK
= 0x60,
379 struct ehca_alloc_queue_parms
{
380 /* input parameters */
386 /* output parameters */
389 u32 queue_size
; /* bytes for small queues, pages otherwise */
392 struct ehca_alloc_qp_parms
{
393 struct ehca_alloc_queue_parms squeue
;
394 struct ehca_alloc_queue_parms rqueue
;
396 /* input parameters */
397 enum ehca_service_type servicetype
;
400 enum ehca_ext_qp_type ext_type
;
401 enum ehca_ll_comp_flags ll_comp_flags
;
405 struct ipz_eq_handle eq_handle
;
407 struct ipz_cq_handle send_cq_handle
, recv_cq_handle
;
409 u32 srq_qpn
, srq_token
, srq_limit
;
411 /* output parameters */
413 struct ipz_qp_handle qp_handle
;
414 struct h_galpas galpas
;
417 int ehca_cq_assign_qp(struct ehca_cq
*cq
, struct ehca_qp
*qp
);
418 int ehca_cq_unassign_qp(struct ehca_cq
*cq
, unsigned int qp_num
);
419 struct ehca_qp
*ehca_cq_get_qp(struct ehca_cq
*cq
, int qp_num
);