inet: frag: enforce memory limits earlier
[linux/fpc-iii.git] / drivers / infiniband / ulp / srp / ib_srp.h
blob32ed40db3ca25d9266f6bb136c806858ba38880b
1 /*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #ifndef IB_SRP_H
34 #define IB_SRP_H
36 #include <linux/types.h>
37 #include <linux/list.h>
38 #include <linux/mutex.h>
39 #include <linux/scatterlist.h>
41 #include <scsi/scsi_host.h>
42 #include <scsi/scsi_cmnd.h>
44 #include <rdma/ib_verbs.h>
45 #include <rdma/ib_sa.h>
46 #include <rdma/ib_cm.h>
47 #include <rdma/ib_fmr_pool.h>
49 enum {
50 SRP_PATH_REC_TIMEOUT_MS = 1000,
51 SRP_ABORT_TIMEOUT_MS = 5000,
53 SRP_PORT_REDIRECT = 1,
54 SRP_DLID_REDIRECT = 2,
55 SRP_STALE_CONN = 3,
57 SRP_DEF_SG_TABLESIZE = 12,
59 SRP_DEFAULT_QUEUE_SIZE = 1 << 6,
60 SRP_RSP_SQ_SIZE = 1,
61 SRP_TSK_MGMT_SQ_SIZE = 1,
62 SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
63 SRP_TSK_MGMT_SQ_SIZE,
65 SRP_TAG_NO_REQ = ~0U,
66 SRP_TAG_TSK_MGMT = 1U << 31,
68 SRP_MAX_PAGES_PER_MR = 512,
71 enum srp_target_state {
72 SRP_TARGET_SCANNING,
73 SRP_TARGET_LIVE,
74 SRP_TARGET_REMOVED,
77 enum srp_iu_type {
78 SRP_IU_CMD,
79 SRP_IU_TSK_MGMT,
80 SRP_IU_RSP,
84 * @mr_page_mask: HCA memory registration page mask.
85 * @mr_page_size: HCA memory registration page size.
86 * @mr_max_size: Maximum size in bytes of a single FMR / FR registration
87 * request.
89 struct srp_device {
90 struct list_head dev_list;
91 struct ib_device *dev;
92 struct ib_pd *pd;
93 u64 mr_page_mask;
94 int mr_page_size;
95 int mr_max_size;
96 int max_pages_per_mr;
97 bool has_fmr;
98 bool has_fr;
99 bool use_fmr;
100 bool use_fast_reg;
103 struct srp_host {
104 struct srp_device *srp_dev;
105 u8 port;
106 struct device dev;
107 struct list_head target_list;
108 spinlock_t target_lock;
109 struct completion released;
110 struct list_head list;
111 struct mutex add_target_mutex;
114 struct srp_request {
115 struct scsi_cmnd *scmnd;
116 struct srp_iu *cmd;
117 union {
118 struct ib_pool_fmr **fmr_list;
119 struct srp_fr_desc **fr_list;
121 u64 *map_page;
122 struct srp_direct_buf *indirect_desc;
123 dma_addr_t indirect_dma_addr;
124 short nmdesc;
125 struct ib_cqe reg_cqe;
129 * struct srp_rdma_ch
130 * @comp_vector: Completion vector used by this RDMA channel.
132 struct srp_rdma_ch {
133 /* These are RW in the hot path, and commonly used together */
134 struct list_head free_tx;
135 spinlock_t lock;
136 s32 req_lim;
138 /* These are read-only in the hot path */
139 struct srp_target_port *target ____cacheline_aligned_in_smp;
140 struct ib_cq *send_cq;
141 struct ib_cq *recv_cq;
142 struct ib_qp *qp;
143 union {
144 struct ib_fmr_pool *fmr_pool;
145 struct srp_fr_pool *fr_pool;
148 /* Everything above this point is used in the hot path of
149 * command processing. Try to keep them packed into cachelines.
152 struct completion done;
153 int status;
155 struct ib_sa_path_rec path;
156 struct ib_sa_query *path_query;
157 int path_query_id;
159 struct ib_cm_id *cm_id;
160 struct srp_iu **tx_ring;
161 struct srp_iu **rx_ring;
162 struct srp_request *req_ring;
163 int max_ti_iu_len;
164 int comp_vector;
166 u64 tsk_mgmt_tag;
167 struct completion tsk_mgmt_done;
168 u8 tsk_mgmt_status;
169 bool connected;
173 * struct srp_target_port
174 * @comp_vector: Completion vector used by the first RDMA channel created for
175 * this target port.
177 struct srp_target_port {
178 /* read and written in the hot path */
179 spinlock_t lock;
181 /* read only in the hot path */
182 struct ib_pd *pd;
183 struct srp_rdma_ch *ch;
184 u32 ch_count;
185 u32 lkey;
186 enum srp_target_state state;
187 unsigned int max_iu_len;
188 unsigned int cmd_sg_cnt;
189 unsigned int indirect_size;
190 bool allow_ext_sg;
192 /* other member variables */
193 union ib_gid sgid;
194 __be64 id_ext;
195 __be64 ioc_guid;
196 __be64 service_id;
197 __be64 initiator_ext;
198 u16 io_class;
199 struct srp_host *srp_host;
200 struct Scsi_Host *scsi_host;
201 struct srp_rport *rport;
202 char target_name[32];
203 unsigned int scsi_id;
204 unsigned int sg_tablesize;
205 int mr_pool_size;
206 int mr_per_cmd;
207 int queue_size;
208 int req_ring_size;
209 int comp_vector;
210 int tl_retry_count;
212 union ib_gid orig_dgid;
213 __be16 pkey;
215 u32 rq_tmo_jiffies;
217 int zero_req_lim;
219 struct work_struct tl_err_work;
220 struct work_struct remove_work;
222 struct list_head list;
223 bool qp_in_error;
226 struct srp_iu {
227 struct list_head list;
228 u64 dma;
229 void *buf;
230 size_t size;
231 enum dma_data_direction direction;
232 struct ib_cqe cqe;
236 * struct srp_fr_desc - fast registration work request arguments
237 * @entry: Entry in srp_fr_pool.free_list.
238 * @mr: Memory region.
239 * @frpl: Fast registration page list.
241 struct srp_fr_desc {
242 struct list_head entry;
243 struct ib_mr *mr;
247 * struct srp_fr_pool - pool of fast registration descriptors
249 * An entry is available for allocation if and only if it occurs in @free_list.
251 * @size: Number of descriptors in this pool.
252 * @max_page_list_len: Maximum fast registration work request page list length.
253 * @lock: Protects free_list.
254 * @free_list: List of free descriptors.
255 * @desc: Fast registration descriptor pool.
257 struct srp_fr_pool {
258 int size;
259 int max_page_list_len;
260 spinlock_t lock;
261 struct list_head free_list;
262 struct srp_fr_desc desc[0];
266 * struct srp_map_state - per-request DMA memory mapping state
267 * @desc: Pointer to the element of the SRP buffer descriptor array
268 * that is being filled in.
269 * @pages: Array with DMA addresses of pages being considered for
270 * memory registration.
271 * @base_dma_addr: DMA address of the first page that has not yet been mapped.
272 * @dma_len: Number of bytes that will be registered with the next
273 * FMR or FR memory registration call.
274 * @total_len: Total number of bytes in the sg-list being mapped.
275 * @npages: Number of page addresses in the pages[] array.
276 * @nmdesc: Number of FMR or FR memory descriptors used for mapping.
277 * @ndesc: Number of SRP buffer descriptors that have been filled in.
279 struct srp_map_state {
280 union {
281 struct {
282 struct ib_pool_fmr **next;
283 struct ib_pool_fmr **end;
284 } fmr;
285 struct {
286 struct srp_fr_desc **next;
287 struct srp_fr_desc **end;
288 } fr;
289 struct {
290 void **next;
291 void **end;
292 } gen;
294 struct srp_direct_buf *desc;
295 union {
296 u64 *pages;
297 struct scatterlist *sg;
299 dma_addr_t base_dma_addr;
300 u32 dma_len;
301 u32 total_len;
302 unsigned int npages;
303 unsigned int nmdesc;
304 unsigned int ndesc;
307 #endif /* IB_SRP_H */