Merge branch 'akpm'
[linux-2.6/next.git] / drivers / s390 / scsi / zfcp_qdio.h
blob8ac7f5342d29d807e815e939b54f68f3c8f38fca
1 /*
2 * zfcp device driver
4 * Header file for zfcp qdio interface
6 * Copyright IBM Corporation 2010
7 */
9 #ifndef ZFCP_QDIO_H
10 #define ZFCP_QDIO_H
12 #include <asm/qdio.h>
14 #define ZFCP_QDIO_SBALE_LEN PAGE_SIZE
16 /* Max SBALS for chaining */
17 #define ZFCP_QDIO_MAX_SBALS_PER_REQ 36
19 /**
20 * struct zfcp_qdio - basic qdio data structure
21 * @res_q: response queue
22 * @req_q: request queue
23 * @req_q_idx: index of next free buffer
24 * @req_q_free: number of free buffers in queue
25 * @stat_lock: lock to protect req_q_util and req_q_time
26 * @req_q_lock: lock to serialize access to request queue
27 * @req_q_time: time of last fill level change
28 * @req_q_util: used for accounting
29 * @req_q_full: queue full incidents
30 * @req_q_wq: used to wait for SBAL availability
31 * @adapter: adapter used in conjunction with this qdio structure
33 struct zfcp_qdio {
34 struct qdio_buffer *res_q[QDIO_MAX_BUFFERS_PER_Q];
35 struct qdio_buffer *req_q[QDIO_MAX_BUFFERS_PER_Q];
36 u8 req_q_idx;
37 atomic_t req_q_free;
38 spinlock_t stat_lock;
39 spinlock_t req_q_lock;
40 unsigned long long req_q_time;
41 u64 req_q_util;
42 atomic_t req_q_full;
43 wait_queue_head_t req_q_wq;
44 struct zfcp_adapter *adapter;
45 u16 max_sbale_per_sbal;
46 u16 max_sbale_per_req;
49 /**
50 * struct zfcp_qdio_req - qdio queue related values for a request
51 * @sbtype: sbal type flags for sbale 0
52 * @sbal_number: number of free sbals
53 * @sbal_first: first sbal for this request
54 * @sbal_last: last sbal for this request
55 * @sbal_limit: last possible sbal for this request
56 * @sbale_curr: current sbale at creation of this request
57 * @sbal_response: sbal used in interrupt
58 * @qdio_outb_usage: usage of outbound queue
60 struct zfcp_qdio_req {
61 u8 sbtype;
62 u8 sbal_number;
63 u8 sbal_first;
64 u8 sbal_last;
65 u8 sbal_limit;
66 u8 sbale_curr;
67 u8 sbal_response;
68 u16 qdio_outb_usage;
71 /**
72 * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
73 * @qdio: pointer to struct zfcp_qdio
74 * @q_rec: pointer to struct zfcp_qdio_req
75 * Returns: pointer to qdio_buffer_element (sbale) structure
77 static inline struct qdio_buffer_element *
78 zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
80 return &qdio->req_q[q_req->sbal_last]->element[0];
83 /**
84 * zfcp_qdio_sbale_curr - return current sbale on req_q for a request
85 * @qdio: pointer to struct zfcp_qdio
86 * @fsf_req: pointer to struct zfcp_fsf_req
87 * Returns: pointer to qdio_buffer_element (sbale) structure
89 static inline struct qdio_buffer_element *
90 zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
92 return &qdio->req_q[q_req->sbal_last]->element[q_req->sbale_curr];
95 /**
96 * zfcp_qdio_req_init - initialize qdio request
97 * @qdio: request queue where to start putting the request
98 * @q_req: the qdio request to start
99 * @req_id: The request id
100 * @sbtype: type flags to set for all sbals
101 * @data: First data block
102 * @len: Length of first data block
104 * This is the start of putting the request into the queue, the last
105 * step is passing the request to zfcp_qdio_send. The request queue
106 * lock must be held during the whole process from init to send.
108 static inline
109 void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
110 unsigned long req_id, u8 sbtype, void *data, u32 len)
112 struct qdio_buffer_element *sbale;
113 int count = min(atomic_read(&qdio->req_q_free),
114 ZFCP_QDIO_MAX_SBALS_PER_REQ);
116 q_req->sbal_first = q_req->sbal_last = qdio->req_q_idx;
117 q_req->sbal_number = 1;
118 q_req->sbtype = sbtype;
119 q_req->sbale_curr = 1;
120 q_req->sbal_limit = (q_req->sbal_first + count - 1)
121 % QDIO_MAX_BUFFERS_PER_Q;
123 sbale = zfcp_qdio_sbale_req(qdio, q_req);
124 sbale->addr = (void *) req_id;
125 sbale->eflags = 0;
126 sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype;
128 if (unlikely(!data))
129 return;
130 sbale++;
131 sbale->addr = data;
132 sbale->length = len;
136 * zfcp_qdio_fill_next - Fill next sbale, only for single sbal requests
137 * @qdio: pointer to struct zfcp_qdio
138 * @q_req: pointer to struct zfcp_queue_req
140 * This is only required for single sbal requests, calling it when
141 * wrapping around to the next sbal is a bug.
143 static inline
144 void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
145 void *data, u32 len)
147 struct qdio_buffer_element *sbale;
149 BUG_ON(q_req->sbale_curr == qdio->max_sbale_per_sbal - 1);
150 q_req->sbale_curr++;
151 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
152 sbale->addr = data;
153 sbale->length = len;
157 * zfcp_qdio_set_sbale_last - set last entry flag in current sbale
158 * @qdio: pointer to struct zfcp_qdio
159 * @q_req: pointer to struct zfcp_queue_req
161 static inline
162 void zfcp_qdio_set_sbale_last(struct zfcp_qdio *qdio,
163 struct zfcp_qdio_req *q_req)
165 struct qdio_buffer_element *sbale;
167 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
168 sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
172 * zfcp_qdio_sg_one_sbal - check if one sbale is enough for sg data
173 * @sg: The scatterlist where to check the data size
175 * Returns: 1 when one sbale is enough for the data in the scatterlist,
176 * 0 if not.
178 static inline
179 int zfcp_qdio_sg_one_sbale(struct scatterlist *sg)
181 return sg_is_last(sg) && sg->length <= ZFCP_QDIO_SBALE_LEN;
185 * zfcp_qdio_skip_to_last_sbale - skip to last sbale in sbal
186 * @q_req: The current zfcp_qdio_req
188 static inline
189 void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio *qdio,
190 struct zfcp_qdio_req *q_req)
192 q_req->sbale_curr = qdio->max_sbale_per_sbal - 1;
196 * zfcp_qdio_sbal_limit - set the sbal limit for a request in q_req
197 * @qdio: pointer to struct zfcp_qdio
198 * @q_req: The current zfcp_qdio_req
199 * @max_sbals: maximum number of SBALs allowed
201 static inline
202 void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
203 struct zfcp_qdio_req *q_req, int max_sbals)
205 int count = min(atomic_read(&qdio->req_q_free), max_sbals);
207 q_req->sbal_limit = (q_req->sbal_first + count - 1) %
208 QDIO_MAX_BUFFERS_PER_Q;
212 * zfcp_qdio_set_data_div - set data division count
213 * @qdio: pointer to struct zfcp_qdio
214 * @q_req: The current zfcp_qdio_req
215 * @count: The data division count
217 static inline
218 void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio,
219 struct zfcp_qdio_req *q_req, u32 count)
221 struct qdio_buffer_element *sbale;
223 sbale = qdio->req_q[q_req->sbal_first]->element;
224 sbale->length = count;
228 * zfcp_qdio_sbale_count - count sbale used
229 * @sg: pointer to struct scatterlist
231 static inline
232 unsigned int zfcp_qdio_sbale_count(struct scatterlist *sg)
234 unsigned int count = 0;
236 for (; sg; sg = sg_next(sg))
237 count++;
239 return count;
243 * zfcp_qdio_real_bytes - count bytes used
244 * @sg: pointer to struct scatterlist
246 static inline
247 unsigned int zfcp_qdio_real_bytes(struct scatterlist *sg)
249 unsigned int real_bytes = 0;
251 for (; sg; sg = sg_next(sg))
252 real_bytes += sg->length;
254 return real_bytes;
258 * zfcp_qdio_set_scount - set SBAL count value
259 * @qdio: pointer to struct zfcp_qdio
260 * @q_req: The current zfcp_qdio_req
262 static inline
263 void zfcp_qdio_set_scount(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
265 struct qdio_buffer_element *sbale;
267 sbale = qdio->req_q[q_req->sbal_first]->element;
268 sbale->scount = q_req->sbal_number - 1;
271 #endif /* ZFCP_QDIO_H */