include: replace linux/module.h with "struct module" wherever possible
[linux-2.6/next.git] / drivers / s390 / scsi / zfcp_qdio.h
blob54e22ace012b601f5a3cc8dd3cc33505458b8cfd
1 /*
2 * zfcp device driver
4 * Header file for zfcp qdio interface
6 * Copyright IBM Corporation 2010
7 */
9 #ifndef ZFCP_QDIO_H
10 #define ZFCP_QDIO_H
12 #include <asm/qdio.h>
14 #define ZFCP_QDIO_SBALE_LEN PAGE_SIZE
16 /* DMQ bug workaround: don't use last SBALE */
17 #define ZFCP_QDIO_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
19 /* index of last SBALE (with respect to DMQ bug workaround) */
20 #define ZFCP_QDIO_LAST_SBALE_PER_SBAL (ZFCP_QDIO_MAX_SBALES_PER_SBAL - 1)
22 /* Max SBALS for chaining */
23 #define ZFCP_QDIO_MAX_SBALS_PER_REQ 36
25 /* max. number of (data buffer) SBALEs in largest SBAL chain
26 * request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
27 #define ZFCP_QDIO_MAX_SBALES_PER_REQ \
28 (ZFCP_QDIO_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2)
30 /**
31 * struct zfcp_qdio - basic qdio data structure
32 * @res_q: response queue
33 * @req_q: request queue
34 * @req_q_idx: index of next free buffer
35 * @req_q_free: number of free buffers in queue
36 * @stat_lock: lock to protect req_q_util and req_q_time
37 * @req_q_lock: lock to serialize access to request queue
38 * @req_q_time: time of last fill level change
39 * @req_q_util: used for accounting
40 * @req_q_full: queue full incidents
41 * @req_q_wq: used to wait for SBAL availability
42 * @adapter: adapter used in conjunction with this qdio structure
44 struct zfcp_qdio {
45 struct qdio_buffer *res_q[QDIO_MAX_BUFFERS_PER_Q];
46 struct qdio_buffer *req_q[QDIO_MAX_BUFFERS_PER_Q];
47 u8 req_q_idx;
48 atomic_t req_q_free;
49 spinlock_t stat_lock;
50 spinlock_t req_q_lock;
51 unsigned long long req_q_time;
52 u64 req_q_util;
53 atomic_t req_q_full;
54 wait_queue_head_t req_q_wq;
55 struct zfcp_adapter *adapter;
58 /**
59 * struct zfcp_qdio_req - qdio queue related values for a request
60 * @sbtype: sbal type flags for sbale 0
61 * @sbal_number: number of free sbals
62 * @sbal_first: first sbal for this request
63 * @sbal_last: last sbal for this request
64 * @sbal_limit: last possible sbal for this request
65 * @sbale_curr: current sbale at creation of this request
66 * @sbal_response: sbal used in interrupt
67 * @qdio_outb_usage: usage of outbound queue
69 struct zfcp_qdio_req {
70 u8 sbtype;
71 u8 sbal_number;
72 u8 sbal_first;
73 u8 sbal_last;
74 u8 sbal_limit;
75 u8 sbale_curr;
76 u8 sbal_response;
77 u16 qdio_outb_usage;
80 /**
81 * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
82 * @qdio: pointer to struct zfcp_qdio
83 * @q_rec: pointer to struct zfcp_qdio_req
84 * Returns: pointer to qdio_buffer_element (sbale) structure
86 static inline struct qdio_buffer_element *
87 zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
89 return &qdio->req_q[q_req->sbal_last]->element[0];
92 /**
93 * zfcp_qdio_sbale_curr - return current sbale on req_q for a request
94 * @qdio: pointer to struct zfcp_qdio
95 * @fsf_req: pointer to struct zfcp_fsf_req
96 * Returns: pointer to qdio_buffer_element (sbale) structure
98 static inline struct qdio_buffer_element *
99 zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
101 return &qdio->req_q[q_req->sbal_last]->element[q_req->sbale_curr];
105 * zfcp_qdio_req_init - initialize qdio request
106 * @qdio: request queue where to start putting the request
107 * @q_req: the qdio request to start
108 * @req_id: The request id
109 * @sbtype: type flags to set for all sbals
110 * @data: First data block
111 * @len: Length of first data block
113 * This is the start of putting the request into the queue, the last
114 * step is passing the request to zfcp_qdio_send. The request queue
115 * lock must be held during the whole process from init to send.
117 static inline
118 void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
119 unsigned long req_id, u8 sbtype, void *data, u32 len)
121 struct qdio_buffer_element *sbale;
122 int count = min(atomic_read(&qdio->req_q_free),
123 ZFCP_QDIO_MAX_SBALS_PER_REQ);
125 q_req->sbal_first = q_req->sbal_last = qdio->req_q_idx;
126 q_req->sbal_number = 1;
127 q_req->sbtype = sbtype;
128 q_req->sbale_curr = 1;
129 q_req->sbal_limit = (q_req->sbal_first + count - 1)
130 % QDIO_MAX_BUFFERS_PER_Q;
132 sbale = zfcp_qdio_sbale_req(qdio, q_req);
133 sbale->addr = (void *) req_id;
134 sbale->eflags = 0;
135 sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype;
137 if (unlikely(!data))
138 return;
139 sbale++;
140 sbale->addr = data;
141 sbale->length = len;
145 * zfcp_qdio_fill_next - Fill next sbale, only for single sbal requests
146 * @qdio: pointer to struct zfcp_qdio
147 * @q_req: pointer to struct zfcp_queue_req
149 * This is only required for single sbal requests, calling it when
150 * wrapping around to the next sbal is a bug.
152 static inline
153 void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
154 void *data, u32 len)
156 struct qdio_buffer_element *sbale;
158 BUG_ON(q_req->sbale_curr == ZFCP_QDIO_LAST_SBALE_PER_SBAL);
159 q_req->sbale_curr++;
160 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
161 sbale->addr = data;
162 sbale->length = len;
166 * zfcp_qdio_set_sbale_last - set last entry flag in current sbale
167 * @qdio: pointer to struct zfcp_qdio
168 * @q_req: pointer to struct zfcp_queue_req
170 static inline
171 void zfcp_qdio_set_sbale_last(struct zfcp_qdio *qdio,
172 struct zfcp_qdio_req *q_req)
174 struct qdio_buffer_element *sbale;
176 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
177 sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
181 * zfcp_qdio_sg_one_sbal - check if one sbale is enough for sg data
182 * @sg: The scatterlist where to check the data size
184 * Returns: 1 when one sbale is enough for the data in the scatterlist,
185 * 0 if not.
187 static inline
188 int zfcp_qdio_sg_one_sbale(struct scatterlist *sg)
190 return sg_is_last(sg) && sg->length <= ZFCP_QDIO_SBALE_LEN;
194 * zfcp_qdio_skip_to_last_sbale - skip to last sbale in sbal
195 * @q_req: The current zfcp_qdio_req
197 static inline
198 void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio_req *q_req)
200 q_req->sbale_curr = ZFCP_QDIO_LAST_SBALE_PER_SBAL;
204 * zfcp_qdio_sbal_limit - set the sbal limit for a request in q_req
205 * @qdio: pointer to struct zfcp_qdio
206 * @q_req: The current zfcp_qdio_req
207 * @max_sbals: maximum number of SBALs allowed
209 static inline
210 void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
211 struct zfcp_qdio_req *q_req, int max_sbals)
213 int count = min(atomic_read(&qdio->req_q_free), max_sbals);
215 q_req->sbal_limit = (q_req->sbal_first + count - 1) %
216 QDIO_MAX_BUFFERS_PER_Q;
220 * zfcp_qdio_set_data_div - set data division count
221 * @qdio: pointer to struct zfcp_qdio
222 * @q_req: The current zfcp_qdio_req
223 * @count: The data division count
225 static inline
226 void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio,
227 struct zfcp_qdio_req *q_req, u32 count)
229 struct qdio_buffer_element *sbale;
231 sbale = &qdio->req_q[q_req->sbal_first]->element[0];
232 sbale->length = count;
235 #endif /* ZFCP_QDIO_H */