Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux/fpc-iii.git] / drivers / infiniband / hw / ehca / ipz_pt_fn.h
bloba801274ea337ae08c0a463170bb0688a325d326a
1 /*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * internal queue handling
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Reinhard Ernst <rernst@de.ibm.com>
8 * Christoph Raisch <raisch@de.ibm.com>
10 * Copyright (c) 2005 IBM Corporation
12 * All rights reserved.
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
17 * OpenIB BSD License
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
43 #ifndef __IPZ_PT_FN_H__
44 #define __IPZ_PT_FN_H__
46 #define EHCA_PAGESHIFT 12
47 #define EHCA_PAGESIZE 4096UL
48 #define EHCA_PAGEMASK (~(EHCA_PAGESIZE-1))
49 #define EHCA_PT_ENTRIES 512UL
51 #include "ehca_tools.h"
52 #include "ehca_qes.h"
54 struct ehca_pd;
55 struct ipz_small_queue_page;
57 extern struct kmem_cache *small_qp_cache;
59 /* struct generic ehca page */
60 struct ipz_page {
61 u8 entries[EHCA_PAGESIZE];
64 #define IPZ_SPAGE_PER_KPAGE (PAGE_SIZE / 512)
66 struct ipz_small_queue_page {
67 unsigned long page;
68 unsigned long bitmap[IPZ_SPAGE_PER_KPAGE / BITS_PER_LONG];
69 int fill;
70 void *mapped_addr;
71 u32 mmap_count;
72 struct list_head list;
75 /* struct generic queue in linux kernel virtual memory (kv) */
76 struct ipz_queue {
77 u64 current_q_offset; /* current queue entry */
79 struct ipz_page **queue_pages; /* array of pages belonging to queue */
80 u32 qe_size; /* queue entry size */
81 u32 act_nr_of_sg;
82 u32 queue_length; /* queue length allocated in bytes */
83 u32 pagesize;
84 u32 toggle_state; /* toggle flag - per page */
85 u32 offset; /* save offset within page for small_qp */
86 struct ipz_small_queue_page *small_page;
90 * return current Queue Entry for a certain q_offset
91 * returns address (kv) of Queue Entry
93 static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset)
95 struct ipz_page *current_page;
96 if (q_offset >= queue->queue_length)
97 return NULL;
98 current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT];
99 return &current_page->entries[q_offset & (EHCA_PAGESIZE - 1)];
103 * return current Queue Entry
104 * returns address (kv) of Queue Entry
106 static inline void *ipz_qeit_get(struct ipz_queue *queue)
108 return ipz_qeit_calc(queue, queue->current_q_offset);
112 * return current Queue Page , increment Queue Page iterator from
113 * page to page in struct ipz_queue, last increment will return 0! and
114 * NOT wrap
115 * returns address (kv) of Queue Page
116 * warning don't use in parallel with ipz_QE_get_inc()
118 void *ipz_qpageit_get_inc(struct ipz_queue *queue);
121 * return current Queue Entry, increment Queue Entry iterator by one
122 * step in struct ipz_queue, will wrap in ringbuffer
123 * returns address (kv) of Queue Entry BEFORE increment
124 * warning don't use in parallel with ipz_qpageit_get_inc()
126 static inline void *ipz_qeit_get_inc(struct ipz_queue *queue)
128 void *ret = ipz_qeit_get(queue);
129 queue->current_q_offset += queue->qe_size;
130 if (queue->current_q_offset >= queue->queue_length) {
131 queue->current_q_offset = 0;
132 /* toggle the valid flag */
133 queue->toggle_state = (~queue->toggle_state) & 1;
136 return ret;
140 * return a bool indicating whether current Queue Entry is valid
142 static inline int ipz_qeit_is_valid(struct ipz_queue *queue)
144 struct ehca_cqe *cqe = ipz_qeit_get(queue);
145 return ((cqe->cqe_flags >> 7) == (queue->toggle_state & 1));
149 * return current Queue Entry, increment Queue Entry iterator by one
150 * step in struct ipz_queue, will wrap in ringbuffer
151 * returns address (kv) of Queue Entry BEFORE increment
152 * returns 0 and does not increment, if wrong valid state
153 * warning don't use in parallel with ipz_qpageit_get_inc()
155 static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue)
157 return ipz_qeit_is_valid(queue) ? ipz_qeit_get_inc(queue) : NULL;
161 * returns and resets Queue Entry iterator
162 * returns address (kv) of first Queue Entry
164 static inline void *ipz_qeit_reset(struct ipz_queue *queue)
166 queue->current_q_offset = 0;
167 return ipz_qeit_get(queue);
171 * return the q_offset corresponding to an absolute address
173 int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset);
176 * return the next queue offset. don't modify the queue.
178 static inline u64 ipz_queue_advance_offset(struct ipz_queue *queue, u64 offset)
180 offset += queue->qe_size;
181 if (offset >= queue->queue_length) offset = 0;
182 return offset;
185 /* struct generic page table */
186 struct ipz_pt {
187 u64 entries[EHCA_PT_ENTRIES];
190 /* struct page table for a queue, only to be used in pf */
191 struct ipz_qpt {
192 /* queue page tables (kv), use u64 because we know the element length */
193 u64 *qpts;
194 u32 n_qpts;
195 u32 n_ptes; /* number of page table entries */
196 u64 *current_pte_addr;
200 * constructor for a ipz_queue_t, placement new for ipz_queue_t,
201 * new for all dependent datastructors
202 * all QP Tables are the same
203 * flow:
204 * allocate+pin queue
205 * see ipz_qpt_ctor()
206 * returns true if ok, false if out of memory
208 int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
209 const u32 nr_of_pages, const u32 pagesize,
210 const u32 qe_size, const u32 nr_of_sg,
211 int is_small);
214 * destructor for a ipz_queue_t
215 * -# free queue
216 * see ipz_queue_ctor()
217 * returns true if ok, false if queue was NULL-ptr of free failed
219 int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue);
222 * constructor for a ipz_qpt_t,
223 * placement new for struct ipz_queue, new for all dependent datastructors
224 * all QP Tables are the same,
225 * flow:
226 * -# allocate+pin queue
227 * -# initialise ptcb
228 * -# allocate+pin PTs
229 * -# link PTs to a ring, according to HCA Arch, set bit62 id needed
230 * -# the ring must have room for exactly nr_of_PTEs
231 * see ipz_qpt_ctor()
233 void ipz_qpt_ctor(struct ipz_qpt *qpt,
234 const u32 nr_of_qes,
235 const u32 pagesize,
236 const u32 qe_size,
237 const u8 lowbyte, const u8 toggle,
238 u32 * act_nr_of_QEs, u32 * act_nr_of_pages);
241 * return current Queue Entry, increment Queue Entry iterator by one
242 * step in struct ipz_queue, will wrap in ringbuffer
243 * returns address (kv) of Queue Entry BEFORE increment
244 * warning don't use in parallel with ipz_qpageit_get_inc()
245 * warning unpredictable results may occur if steps>act_nr_of_queue_entries
246 * fix EQ page problems
248 void *ipz_qeit_eq_get_inc(struct ipz_queue *queue);
251 * return current Event Queue Entry, increment Queue Entry iterator
252 * by one step in struct ipz_queue if valid, will wrap in ringbuffer
253 * returns address (kv) of Queue Entry BEFORE increment
254 * returns 0 and does not increment, if wrong valid state
255 * warning don't use in parallel with ipz_queue_QPageit_get_inc()
256 * warning unpredictable results may occur if steps>act_nr_of_queue_entries
258 static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue)
260 void *ret = ipz_qeit_get(queue);
261 u32 qe = *(u8 *)ret;
262 if ((qe >> 7) != (queue->toggle_state & 1))
263 return NULL;
264 ipz_qeit_eq_get_inc(queue); /* this is a good one */
265 return ret;
268 static inline void *ipz_eqit_eq_peek_valid(struct ipz_queue *queue)
270 void *ret = ipz_qeit_get(queue);
271 u32 qe = *(u8 *)ret;
272 if ((qe >> 7) != (queue->toggle_state & 1))
273 return NULL;
274 return ret;
277 /* returns address (GX) of first queue entry */
278 static inline u64 ipz_qpt_get_firstpage(struct ipz_qpt *qpt)
280 return be64_to_cpu(qpt->qpts[0]);
283 /* returns address (kv) of first page of queue page table */
284 static inline void *ipz_qpt_get_qpt(struct ipz_qpt *qpt)
286 return qpt->qpts;
289 #endif /* __IPZ_PT_FN_H__ */