Merge branch 'for-3.18-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[linux/fpc-iii.git] / arch / s390 / include / asm / idals.h
blobea5a6e45fd936412b537b5fd34828a54f57ac325
1 /*
2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
3 * Martin Schwidefsky <schwidefsky@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * Copyright IBM Corp. 2000
7 * History of changes
8 * 07/24/00 new file
9 * 05/04/02 code restructuring.
12 #ifndef _S390_IDALS_H
13 #define _S390_IDALS_H
15 #include <linux/errno.h>
16 #include <linux/err.h>
17 #include <linux/types.h>
18 #include <linux/slab.h>
19 #include <asm/cio.h>
20 #include <asm/uaccess.h>
22 #ifdef CONFIG_64BIT
23 #define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
24 #else
25 #define IDA_SIZE_LOG 11 /* 11 for 2k , 12 for 4k */
26 #endif
27 #define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG)
30 * Test if an address/length pair needs an idal list.
32 static inline int
33 idal_is_needed(void *vaddr, unsigned int length)
35 #ifdef CONFIG_64BIT
36 return ((__pa(vaddr) + length - 1) >> 31) != 0;
37 #else
38 return 0;
39 #endif
44 * Return the number of idal words needed for an address/length pair.
46 static inline unsigned int idal_nr_words(void *vaddr, unsigned int length)
48 return ((__pa(vaddr) & (IDA_BLOCK_SIZE-1)) + length +
49 (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
53 * Create the list of idal words for an address/length pair.
55 static inline unsigned long *idal_create_words(unsigned long *idaws,
56 void *vaddr, unsigned int length)
58 unsigned long paddr;
59 unsigned int cidaw;
61 paddr = __pa(vaddr);
62 cidaw = ((paddr & (IDA_BLOCK_SIZE-1)) + length +
63 (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
64 *idaws++ = paddr;
65 paddr &= -IDA_BLOCK_SIZE;
66 while (--cidaw > 0) {
67 paddr += IDA_BLOCK_SIZE;
68 *idaws++ = paddr;
70 return idaws;
74 * Sets the address of the data in CCW.
75 * If necessary it allocates an IDAL and sets the appropriate flags.
77 static inline int
78 set_normalized_cda(struct ccw1 * ccw, void *vaddr)
80 #ifdef CONFIG_64BIT
81 unsigned int nridaws;
82 unsigned long *idal;
84 if (ccw->flags & CCW_FLAG_IDA)
85 return -EINVAL;
86 nridaws = idal_nr_words(vaddr, ccw->count);
87 if (nridaws > 0) {
88 idal = kmalloc(nridaws * sizeof(unsigned long),
89 GFP_ATOMIC | GFP_DMA );
90 if (idal == NULL)
91 return -ENOMEM;
92 idal_create_words(idal, vaddr, ccw->count);
93 ccw->flags |= CCW_FLAG_IDA;
94 vaddr = idal;
96 #endif
97 ccw->cda = (__u32)(unsigned long) vaddr;
98 return 0;
102 * Releases any allocated IDAL related to the CCW.
104 static inline void
105 clear_normalized_cda(struct ccw1 * ccw)
107 #ifdef CONFIG_64BIT
108 if (ccw->flags & CCW_FLAG_IDA) {
109 kfree((void *)(unsigned long) ccw->cda);
110 ccw->flags &= ~CCW_FLAG_IDA;
112 #endif
113 ccw->cda = 0;
117 * Idal buffer extension
119 struct idal_buffer {
120 size_t size;
121 size_t page_order;
122 void *data[0];
126 * Allocate an idal buffer
128 static inline struct idal_buffer *
129 idal_buffer_alloc(size_t size, int page_order)
131 struct idal_buffer *ib;
132 int nr_chunks, nr_ptrs, i;
134 nr_ptrs = (size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
135 nr_chunks = (4096 << page_order) >> IDA_SIZE_LOG;
136 ib = kmalloc(sizeof(struct idal_buffer) + nr_ptrs*sizeof(void *),
137 GFP_DMA | GFP_KERNEL);
138 if (ib == NULL)
139 return ERR_PTR(-ENOMEM);
140 ib->size = size;
141 ib->page_order = page_order;
142 for (i = 0; i < nr_ptrs; i++) {
143 if ((i & (nr_chunks - 1)) != 0) {
144 ib->data[i] = ib->data[i-1] + IDA_BLOCK_SIZE;
145 continue;
147 ib->data[i] = (void *)
148 __get_free_pages(GFP_KERNEL, page_order);
149 if (ib->data[i] != NULL)
150 continue;
151 // Not enough memory
152 while (i >= nr_chunks) {
153 i -= nr_chunks;
154 free_pages((unsigned long) ib->data[i],
155 ib->page_order);
157 kfree(ib);
158 return ERR_PTR(-ENOMEM);
160 return ib;
164 * Free an idal buffer.
166 static inline void
167 idal_buffer_free(struct idal_buffer *ib)
169 int nr_chunks, nr_ptrs, i;
171 nr_ptrs = (ib->size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
172 nr_chunks = (4096 << ib->page_order) >> IDA_SIZE_LOG;
173 for (i = 0; i < nr_ptrs; i += nr_chunks)
174 free_pages((unsigned long) ib->data[i], ib->page_order);
175 kfree(ib);
179 * Test if a idal list is really needed.
181 static inline int
182 __idal_buffer_is_needed(struct idal_buffer *ib)
184 #ifdef CONFIG_64BIT
185 return ib->size > (4096ul << ib->page_order) ||
186 idal_is_needed(ib->data[0], ib->size);
187 #else
188 return ib->size > (4096ul << ib->page_order);
189 #endif
193 * Set channel data address to idal buffer.
195 static inline void
196 idal_buffer_set_cda(struct idal_buffer *ib, struct ccw1 *ccw)
198 if (__idal_buffer_is_needed(ib)) {
199 // setup idals;
200 ccw->cda = (u32)(addr_t) ib->data;
201 ccw->flags |= CCW_FLAG_IDA;
202 } else
203 // we do not need idals - use direct addressing
204 ccw->cda = (u32)(addr_t) ib->data[0];
205 ccw->count = ib->size;
209 * Copy count bytes from an idal buffer to user memory
211 static inline size_t
212 idal_buffer_to_user(struct idal_buffer *ib, void __user *to, size_t count)
214 size_t left;
215 int i;
217 BUG_ON(count > ib->size);
218 for (i = 0; count > IDA_BLOCK_SIZE; i++) {
219 left = copy_to_user(to, ib->data[i], IDA_BLOCK_SIZE);
220 if (left)
221 return left + count - IDA_BLOCK_SIZE;
222 to = (void __user *) to + IDA_BLOCK_SIZE;
223 count -= IDA_BLOCK_SIZE;
225 return copy_to_user(to, ib->data[i], count);
229 * Copy count bytes from user memory to an idal buffer
231 static inline size_t
232 idal_buffer_from_user(struct idal_buffer *ib, const void __user *from, size_t count)
234 size_t left;
235 int i;
237 BUG_ON(count > ib->size);
238 for (i = 0; count > IDA_BLOCK_SIZE; i++) {
239 left = copy_from_user(ib->data[i], from, IDA_BLOCK_SIZE);
240 if (left)
241 return left + count - IDA_BLOCK_SIZE;
242 from = (void __user *) from + IDA_BLOCK_SIZE;
243 count -= IDA_BLOCK_SIZE;
245 return copy_from_user(ib->data[i], from, count);
248 #endif