gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / s390 / scsi / zfcp_qdio.h
blob6b43d6b254bef2e34757a4bafe92bc3d0a19129a
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * zfcp device driver
5 * Header file for zfcp qdio interface
7 * Copyright IBM Corp. 2010
8 */
10 #ifndef ZFCP_QDIO_H
11 #define ZFCP_QDIO_H
13 #include <asm/qdio.h>
15 #define ZFCP_QDIO_SBALE_LEN PAGE_SIZE
17 /* Max SBALS for chaining */
18 #define ZFCP_QDIO_MAX_SBALS_PER_REQ 36
20 /**
21 * struct zfcp_qdio - basic qdio data structure
22 * @res_q: response queue
23 * @req_q: request queue
24 * @req_q_idx: index of next free buffer
25 * @req_q_free: number of free buffers in queue
26 * @stat_lock: lock to protect req_q_util and req_q_time
27 * @req_q_lock: lock to serialize access to request queue
28 * @req_q_time: time of last fill level change
29 * @req_q_util: used for accounting
30 * @req_q_full: queue full incidents
31 * @req_q_wq: used to wait for SBAL availability
32 * @adapter: adapter used in conjunction with this qdio structure
33 * @max_sbale_per_sbal: qdio limit per sbal
34 * @max_sbale_per_req: qdio limit per request
36 struct zfcp_qdio {
37 struct qdio_buffer *res_q[QDIO_MAX_BUFFERS_PER_Q];
38 struct qdio_buffer *req_q[QDIO_MAX_BUFFERS_PER_Q];
39 u8 req_q_idx;
40 atomic_t req_q_free;
41 spinlock_t stat_lock;
42 spinlock_t req_q_lock;
43 unsigned long long req_q_time;
44 u64 req_q_util;
45 atomic_t req_q_full;
46 wait_queue_head_t req_q_wq;
47 struct zfcp_adapter *adapter;
48 u16 max_sbale_per_sbal;
49 u16 max_sbale_per_req;
52 /**
53 * struct zfcp_qdio_req - qdio queue related values for a request
54 * @sbtype: sbal type flags for sbale 0
55 * @sbal_number: number of free sbals
56 * @sbal_first: first sbal for this request
57 * @sbal_last: last sbal for this request
58 * @sbal_limit: last possible sbal for this request
59 * @sbale_curr: current sbale at creation of this request
60 * @qdio_outb_usage: usage of outbound queue
62 struct zfcp_qdio_req {
63 u8 sbtype;
64 u8 sbal_number;
65 u8 sbal_first;
66 u8 sbal_last;
67 u8 sbal_limit;
68 u8 sbale_curr;
69 u16 qdio_outb_usage;
72 /**
73 * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
74 * @qdio: pointer to struct zfcp_qdio
75 * @q_req: pointer to struct zfcp_qdio_req
76 * Returns: pointer to qdio_buffer_element (sbale) structure
78 static inline struct qdio_buffer_element *
79 zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
81 return &qdio->req_q[q_req->sbal_last]->element[0];
84 /**
85 * zfcp_qdio_sbale_curr - return current sbale on req_q for a request
86 * @qdio: pointer to struct zfcp_qdio
87 * @q_req: pointer to struct zfcp_qdio_req
88 * Returns: pointer to qdio_buffer_element (sbale) structure
90 static inline struct qdio_buffer_element *
91 zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
93 return &qdio->req_q[q_req->sbal_last]->element[q_req->sbale_curr];
96 /**
97 * zfcp_qdio_req_init - initialize qdio request
98 * @qdio: request queue where to start putting the request
99 * @q_req: the qdio request to start
100 * @req_id: The request id
101 * @sbtype: type flags to set for all sbals
102 * @data: First data block
103 * @len: Length of first data block
105 * This is the start of putting the request into the queue, the last
106 * step is passing the request to zfcp_qdio_send. The request queue
107 * lock must be held during the whole process from init to send.
109 static inline
110 void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
111 unsigned long req_id, u8 sbtype, void *data, u32 len)
113 struct qdio_buffer_element *sbale;
114 int count = min(atomic_read(&qdio->req_q_free),
115 ZFCP_QDIO_MAX_SBALS_PER_REQ);
117 q_req->sbal_first = q_req->sbal_last = qdio->req_q_idx;
118 q_req->sbal_number = 1;
119 q_req->sbtype = sbtype;
120 q_req->sbale_curr = 1;
121 q_req->sbal_limit = (q_req->sbal_first + count - 1)
122 % QDIO_MAX_BUFFERS_PER_Q;
124 sbale = zfcp_qdio_sbale_req(qdio, q_req);
125 sbale->addr = req_id;
126 sbale->eflags = 0;
127 sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype;
129 if (unlikely(!data))
130 return;
131 sbale++;
132 sbale->addr = virt_to_phys(data);
133 sbale->length = len;
137 * zfcp_qdio_fill_next - Fill next sbale, only for single sbal requests
138 * @qdio: pointer to struct zfcp_qdio
139 * @q_req: pointer to struct zfcp_queue_req
140 * @data: pointer to data
141 * @len: length of data
143 * This is only required for single sbal requests, calling it when
144 * wrapping around to the next sbal is a bug.
146 static inline
147 void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
148 void *data, u32 len)
150 struct qdio_buffer_element *sbale;
152 BUG_ON(q_req->sbale_curr == qdio->max_sbale_per_sbal - 1);
153 q_req->sbale_curr++;
154 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
155 sbale->addr = virt_to_phys(data);
156 sbale->length = len;
160 * zfcp_qdio_set_sbale_last - set last entry flag in current sbale
161 * @qdio: pointer to struct zfcp_qdio
162 * @q_req: pointer to struct zfcp_queue_req
164 static inline
165 void zfcp_qdio_set_sbale_last(struct zfcp_qdio *qdio,
166 struct zfcp_qdio_req *q_req)
168 struct qdio_buffer_element *sbale;
170 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
171 sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
175 * zfcp_qdio_sg_one_sbal - check if one sbale is enough for sg data
176 * @sg: The scatterlist where to check the data size
178 * Returns: 1 when one sbale is enough for the data in the scatterlist,
179 * 0 if not.
181 static inline
182 int zfcp_qdio_sg_one_sbale(struct scatterlist *sg)
184 return sg_is_last(sg) && sg->length <= ZFCP_QDIO_SBALE_LEN;
188 * zfcp_qdio_skip_to_last_sbale - skip to last sbale in sbal
189 * @qdio: pointer to struct zfcp_qdio
190 * @q_req: The current zfcp_qdio_req
192 static inline
193 void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio *qdio,
194 struct zfcp_qdio_req *q_req)
196 q_req->sbale_curr = qdio->max_sbale_per_sbal - 1;
200 * zfcp_qdio_sbal_limit - set the sbal limit for a request in q_req
201 * @qdio: pointer to struct zfcp_qdio
202 * @q_req: The current zfcp_qdio_req
203 * @max_sbals: maximum number of SBALs allowed
205 static inline
206 void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
207 struct zfcp_qdio_req *q_req, int max_sbals)
209 int count = min(atomic_read(&qdio->req_q_free), max_sbals);
211 q_req->sbal_limit = (q_req->sbal_first + count - 1) %
212 QDIO_MAX_BUFFERS_PER_Q;
216 * zfcp_qdio_set_data_div - set data division count
217 * @qdio: pointer to struct zfcp_qdio
218 * @q_req: The current zfcp_qdio_req
219 * @count: The data division count
221 static inline
222 void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio,
223 struct zfcp_qdio_req *q_req, u32 count)
225 struct qdio_buffer_element *sbale;
227 sbale = qdio->req_q[q_req->sbal_first]->element;
228 sbale->length = count;
232 * zfcp_qdio_real_bytes - count bytes used
233 * @sg: pointer to struct scatterlist
235 static inline
236 unsigned int zfcp_qdio_real_bytes(struct scatterlist *sg)
238 unsigned int real_bytes = 0;
240 for (; sg; sg = sg_next(sg))
241 real_bytes += sg->length;
243 return real_bytes;
247 * zfcp_qdio_set_scount - set SBAL count value
248 * @qdio: pointer to struct zfcp_qdio
249 * @q_req: The current zfcp_qdio_req
251 static inline
252 void zfcp_qdio_set_scount(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
254 struct qdio_buffer_element *sbale;
256 sbale = qdio->req_q[q_req->sbal_first]->element;
257 sbale->scount = q_req->sbal_number - 1;
260 #endif /* ZFCP_QDIO_H */