EDAC: i7core, sb_edac: Don't return NOTIFY_BAD from mce_decoder callback
[linux/fpc-iii.git] / drivers / infiniband / hw / mlx5 / user.h
blob61bc308bb802ce93069601e85507849a8f61e98d
1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #ifndef MLX5_IB_USER_H
34 #define MLX5_IB_USER_H
36 #include <linux/types.h>
38 #include "mlx5_ib.h"
40 enum {
41 MLX5_QP_FLAG_SIGNATURE = 1 << 0,
42 MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
45 enum {
46 MLX5_SRQ_FLAG_SIGNATURE = 1 << 0,
50 /* Increment this value if any changes that break userspace ABI
51 * compatibility are made.
53 #define MLX5_IB_UVERBS_ABI_VERSION 1
55 /* Make sure that all structs defined in this file remain laid out so
56 * that they pack the same way on 32-bit and 64-bit architectures (to
57 * avoid incompatibility between 32-bit userspace and 64-bit kernels).
58 * In particular do not use pointer types -- pass pointers in __u64
59 * instead.
62 struct mlx5_ib_alloc_ucontext_req {
63 __u32 total_num_uuars;
64 __u32 num_low_latency_uuars;
67 struct mlx5_ib_alloc_ucontext_req_v2 {
68 __u32 total_num_uuars;
69 __u32 num_low_latency_uuars;
70 __u32 flags;
71 __u32 comp_mask;
72 __u8 max_cqe_version;
73 __u8 reserved0;
74 __u16 reserved1;
75 __u32 reserved2;
78 enum mlx5_ib_alloc_ucontext_resp_mask {
79 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
82 struct mlx5_ib_alloc_ucontext_resp {
83 __u32 qp_tab_size;
84 __u32 bf_reg_size;
85 __u32 tot_uuars;
86 __u32 cache_line_size;
87 __u16 max_sq_desc_sz;
88 __u16 max_rq_desc_sz;
89 __u32 max_send_wqebb;
90 __u32 max_recv_wr;
91 __u32 max_srq_recv_wr;
92 __u16 num_ports;
93 __u16 reserved1;
94 __u32 comp_mask;
95 __u32 response_length;
96 __u8 cqe_version;
97 __u8 reserved2;
98 __u16 reserved3;
99 __u64 hca_core_clock_offset;
102 struct mlx5_ib_alloc_pd_resp {
103 __u32 pdn;
106 struct mlx5_ib_create_cq {
107 __u64 buf_addr;
108 __u64 db_addr;
109 __u32 cqe_size;
110 __u32 reserved; /* explicit padding (optional on i386) */
113 struct mlx5_ib_create_cq_resp {
114 __u32 cqn;
115 __u32 reserved;
118 struct mlx5_ib_resize_cq {
119 __u64 buf_addr;
120 __u16 cqe_size;
121 __u16 reserved0;
122 __u32 reserved1;
125 struct mlx5_ib_create_srq {
126 __u64 buf_addr;
127 __u64 db_addr;
128 __u32 flags;
129 __u32 reserved0; /* explicit padding (optional on i386) */
130 __u32 uidx;
131 __u32 reserved1;
134 struct mlx5_ib_create_srq_resp {
135 __u32 srqn;
136 __u32 reserved;
139 struct mlx5_ib_create_qp {
140 __u64 buf_addr;
141 __u64 db_addr;
142 __u32 sq_wqe_count;
143 __u32 rq_wqe_count;
144 __u32 rq_wqe_shift;
145 __u32 flags;
146 __u32 uidx;
147 __u32 reserved0;
148 __u64 sq_buf_addr;
151 struct mlx5_ib_create_qp_resp {
152 __u32 uuar_index;
155 struct mlx5_ib_alloc_mw {
156 __u32 comp_mask;
157 __u8 num_klms;
158 __u8 reserved1;
159 __u16 reserved2;
162 static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
163 struct mlx5_ib_create_qp *ucmd,
164 int inlen,
165 u32 *user_index)
167 u8 cqe_version = ucontext->cqe_version;
169 if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
170 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
171 return 0;
173 if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
174 !!cqe_version))
175 return -EINVAL;
177 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
180 static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
181 struct mlx5_ib_create_srq *ucmd,
182 int inlen,
183 u32 *user_index)
185 u8 cqe_version = ucontext->cqe_version;
187 if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
188 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
189 return 0;
191 if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
192 !!cqe_version))
193 return -EINVAL;
195 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
197 #endif /* MLX5_IB_USER_H */