1 // SPDX-License-Identifier: GPL-2.0
3 * (C) 2001 Clemson University and The University of Chicago
5 * See COPYING in top-level directory.
8 #include "orangefs-kernel.h"
9 #include "orangefs-bufmap.h"
18 static struct slot_map rw_map
= {
20 .q
= __WAIT_QUEUE_HEAD_INITIALIZER(rw_map
.q
)
22 static struct slot_map readdir_map
= {
24 .q
= __WAIT_QUEUE_HEAD_INITIALIZER(readdir_map
.q
)
28 static void install(struct slot_map
*m
, int count
, unsigned long *map
)
30 spin_lock(&m
->q
.lock
);
31 m
->c
= m
->count
= count
;
33 wake_up_all_locked(&m
->q
);
34 spin_unlock(&m
->q
.lock
);
37 static void mark_killed(struct slot_map
*m
)
39 spin_lock(&m
->q
.lock
);
41 spin_unlock(&m
->q
.lock
);
44 static void run_down(struct slot_map
*m
)
47 spin_lock(&m
->q
.lock
);
50 if (likely(list_empty(&wait
.entry
)))
51 __add_wait_queue_entry_tail(&m
->q
, &wait
);
52 set_current_state(TASK_UNINTERRUPTIBLE
);
57 spin_unlock(&m
->q
.lock
);
59 spin_lock(&m
->q
.lock
);
61 __remove_wait_queue(&m
->q
, &wait
);
62 __set_current_state(TASK_RUNNING
);
65 spin_unlock(&m
->q
.lock
);
68 static void put(struct slot_map
*m
, int slot
)
71 spin_lock(&m
->q
.lock
);
72 __clear_bit(slot
, m
->map
);
74 if (unlikely(v
== 1)) /* no free slots -> one free slot */
75 wake_up_locked(&m
->q
);
76 else if (unlikely(v
== -1)) /* finished dying */
77 wake_up_all_locked(&m
->q
);
78 spin_unlock(&m
->q
.lock
);
81 static int wait_for_free(struct slot_map
*m
)
83 long left
= slot_timeout_secs
* HZ
;
88 if (likely(list_empty(&wait
.entry
)))
89 __add_wait_queue_entry_tail_exclusive(&m
->q
, &wait
);
90 set_current_state(TASK_INTERRUPTIBLE
);
96 /* we are waiting for map to be installed */
97 /* it would better be there soon, or we go away */
98 if (n
> ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS
* HZ
)
99 n
= ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS
* HZ
;
101 spin_unlock(&m
->q
.lock
);
102 t
= schedule_timeout(n
);
103 spin_lock(&m
->q
.lock
);
104 if (unlikely(!t
) && n
!= left
&& m
->c
< 0)
107 left
= t
+ (left
- n
);
108 if (unlikely(signal_pending(current
)))
112 if (!list_empty(&wait
.entry
))
113 list_del(&wait
.entry
);
114 else if (left
<= 0 && waitqueue_active(&m
->q
))
115 __wake_up_locked_key(&m
->q
, TASK_INTERRUPTIBLE
, NULL
);
116 __set_current_state(TASK_RUNNING
);
118 if (likely(left
> 0))
121 return left
< 0 ? -EINTR
: -ETIMEDOUT
;
124 static int get(struct slot_map
*m
)
127 spin_lock(&m
->q
.lock
);
128 if (unlikely(m
->c
<= 0))
129 res
= wait_for_free(m
);
132 res
= find_first_zero_bit(m
->map
, m
->count
);
133 __set_bit(res
, m
->map
);
135 spin_unlock(&m
->q
.lock
);
139 /* used to describe mapped buffers */
140 struct orangefs_bufmap_desc
{
141 void *uaddr
; /* user space address pointer */
142 struct page
**page_array
; /* array of mapped pages */
143 int array_count
; /* size of above arrays */
144 struct list_head list_link
;
147 static struct orangefs_bufmap
{
154 struct page
**page_array
;
155 struct orangefs_bufmap_desc
*desc_array
;
157 /* array to track usage of buffer descriptors */
158 unsigned long *buffer_index_array
;
160 /* array to track usage of buffer descriptors for readdir */
161 #define N DIV_ROUND_UP(ORANGEFS_READDIR_DEFAULT_DESC_COUNT, BITS_PER_LONG)
162 unsigned long readdir_index_array
[N
];
164 } *__orangefs_bufmap
;
166 static DEFINE_SPINLOCK(orangefs_bufmap_lock
);
169 orangefs_bufmap_unmap(struct orangefs_bufmap
*bufmap
)
173 for (i
= 0; i
< bufmap
->page_count
; i
++)
174 put_page(bufmap
->page_array
[i
]);
178 orangefs_bufmap_free(struct orangefs_bufmap
*bufmap
)
180 kfree(bufmap
->page_array
);
181 kfree(bufmap
->desc_array
);
182 kfree(bufmap
->buffer_index_array
);
187 * XXX: Can the size and shift change while the caller gives up the
188 * XXX: lock between calling this and doing something useful?
191 int orangefs_bufmap_size_query(void)
193 struct orangefs_bufmap
*bufmap
;
195 spin_lock(&orangefs_bufmap_lock
);
196 bufmap
= __orangefs_bufmap
;
198 size
= bufmap
->desc_size
;
199 spin_unlock(&orangefs_bufmap_lock
);
203 int orangefs_bufmap_shift_query(void)
205 struct orangefs_bufmap
*bufmap
;
207 spin_lock(&orangefs_bufmap_lock
);
208 bufmap
= __orangefs_bufmap
;
210 shift
= bufmap
->desc_shift
;
211 spin_unlock(&orangefs_bufmap_lock
);
215 static DECLARE_WAIT_QUEUE_HEAD(bufmap_waitq
);
216 static DECLARE_WAIT_QUEUE_HEAD(readdir_waitq
);
219 * orangefs_get_bufmap_init
221 * If bufmap_init is 1, then the shared memory system, including the
222 * buffer_index_array, is available. Otherwise, it is not.
224 * returns the value of bufmap_init
226 int orangefs_get_bufmap_init(void)
228 return __orangefs_bufmap
? 1 : 0;
232 static struct orangefs_bufmap
*
233 orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc
*user_desc
)
235 struct orangefs_bufmap
*bufmap
;
237 bufmap
= kzalloc(sizeof(*bufmap
), GFP_KERNEL
);
241 bufmap
->total_size
= user_desc
->total_size
;
242 bufmap
->desc_count
= user_desc
->count
;
243 bufmap
->desc_size
= user_desc
->size
;
244 bufmap
->desc_shift
= ilog2(bufmap
->desc_size
);
246 bufmap
->buffer_index_array
=
247 kzalloc(DIV_ROUND_UP(bufmap
->desc_count
, BITS_PER_LONG
), GFP_KERNEL
);
248 if (!bufmap
->buffer_index_array
)
249 goto out_free_bufmap
;
252 kcalloc(bufmap
->desc_count
, sizeof(struct orangefs_bufmap_desc
),
254 if (!bufmap
->desc_array
)
255 goto out_free_index_array
;
257 bufmap
->page_count
= bufmap
->total_size
/ PAGE_SIZE
;
259 /* allocate storage to track our page mappings */
261 kcalloc(bufmap
->page_count
, sizeof(struct page
*), GFP_KERNEL
);
262 if (!bufmap
->page_array
)
263 goto out_free_desc_array
;
268 kfree(bufmap
->desc_array
);
269 out_free_index_array
:
270 kfree(bufmap
->buffer_index_array
);
278 orangefs_bufmap_map(struct orangefs_bufmap
*bufmap
,
279 struct ORANGEFS_dev_map_desc
*user_desc
)
281 int pages_per_desc
= bufmap
->desc_size
/ PAGE_SIZE
;
282 int offset
= 0, ret
, i
;
285 ret
= get_user_pages_fast((unsigned long)user_desc
->ptr
,
286 bufmap
->page_count
, 1, bufmap
->page_array
);
291 if (ret
!= bufmap
->page_count
) {
292 gossip_err("orangefs error: asked for %d pages, only got %d.\n",
293 bufmap
->page_count
, ret
);
295 for (i
= 0; i
< ret
; i
++) {
296 SetPageError(bufmap
->page_array
[i
]);
297 put_page(bufmap
->page_array
[i
]);
303 * ideally we want to get kernel space pointers for each page, but
304 * we can't kmap that many pages at once if highmem is being used.
305 * so instead, we just kmap/kunmap the page address each time the
308 for (i
= 0; i
< bufmap
->page_count
; i
++)
309 flush_dcache_page(bufmap
->page_array
[i
]);
311 /* build a list of available descriptors */
312 for (offset
= 0, i
= 0; i
< bufmap
->desc_count
; i
++) {
313 bufmap
->desc_array
[i
].page_array
= &bufmap
->page_array
[offset
];
314 bufmap
->desc_array
[i
].array_count
= pages_per_desc
;
315 bufmap
->desc_array
[i
].uaddr
=
316 (user_desc
->ptr
+ (i
* pages_per_desc
* PAGE_SIZE
));
317 offset
+= pages_per_desc
;
324 * orangefs_bufmap_initialize()
326 * initializes the mapped buffer interface
328 * returns 0 on success, -errno on failure
330 int orangefs_bufmap_initialize(struct ORANGEFS_dev_map_desc
*user_desc
)
332 struct orangefs_bufmap
*bufmap
;
335 gossip_debug(GOSSIP_BUFMAP_DEBUG
,
336 "orangefs_bufmap_initialize: called (ptr ("
337 "%p) sz (%d) cnt(%d).\n",
342 if (user_desc
->total_size
< 0 ||
343 user_desc
->size
< 0 ||
344 user_desc
->count
< 0)
348 * sanity check alignment and size of buffer that caller wants to
351 if (PAGE_ALIGN((unsigned long)user_desc
->ptr
) !=
352 (unsigned long)user_desc
->ptr
) {
353 gossip_err("orangefs error: memory alignment (front). %p\n",
358 if (PAGE_ALIGN(((unsigned long)user_desc
->ptr
+ user_desc
->total_size
))
359 != (unsigned long)(user_desc
->ptr
+ user_desc
->total_size
)) {
360 gossip_err("orangefs error: memory alignment (back).(%p + %d)\n",
362 user_desc
->total_size
);
366 if (user_desc
->total_size
!= (user_desc
->size
* user_desc
->count
)) {
367 gossip_err("orangefs error: user provided an oddly sized buffer: (%d, %d, %d)\n",
368 user_desc
->total_size
,
374 if ((user_desc
->size
% PAGE_SIZE
) != 0) {
375 gossip_err("orangefs error: bufmap size not page size divisible (%d).\n",
381 bufmap
= orangefs_bufmap_alloc(user_desc
);
385 ret
= orangefs_bufmap_map(bufmap
, user_desc
);
387 goto out_free_bufmap
;
390 spin_lock(&orangefs_bufmap_lock
);
391 if (__orangefs_bufmap
) {
392 spin_unlock(&orangefs_bufmap_lock
);
393 gossip_err("orangefs: error: bufmap already initialized.\n");
395 goto out_unmap_bufmap
;
397 __orangefs_bufmap
= bufmap
;
400 bufmap
->buffer_index_array
);
401 install(&readdir_map
,
402 ORANGEFS_READDIR_DEFAULT_DESC_COUNT
,
403 bufmap
->readdir_index_array
);
404 spin_unlock(&orangefs_bufmap_lock
);
406 gossip_debug(GOSSIP_BUFMAP_DEBUG
,
407 "orangefs_bufmap_initialize: exiting normally\n");
411 orangefs_bufmap_unmap(bufmap
);
413 orangefs_bufmap_free(bufmap
);
419 * orangefs_bufmap_finalize()
421 * shuts down the mapped buffer interface and releases any resources
426 void orangefs_bufmap_finalize(void)
428 struct orangefs_bufmap
*bufmap
= __orangefs_bufmap
;
431 gossip_debug(GOSSIP_BUFMAP_DEBUG
, "orangefs_bufmap_finalize: called\n");
432 mark_killed(&rw_map
);
433 mark_killed(&readdir_map
);
434 gossip_debug(GOSSIP_BUFMAP_DEBUG
,
435 "orangefs_bufmap_finalize: exiting normally\n");
438 void orangefs_bufmap_run_down(void)
440 struct orangefs_bufmap
*bufmap
= __orangefs_bufmap
;
444 run_down(&readdir_map
);
445 spin_lock(&orangefs_bufmap_lock
);
446 __orangefs_bufmap
= NULL
;
447 spin_unlock(&orangefs_bufmap_lock
);
448 orangefs_bufmap_unmap(bufmap
);
449 orangefs_bufmap_free(bufmap
);
453 * orangefs_bufmap_get()
455 * gets a free mapped buffer descriptor, will sleep until one becomes
456 * available if necessary
458 * returns slot on success, -errno on failure
460 int orangefs_bufmap_get(void)
466 * orangefs_bufmap_put()
468 * returns a mapped buffer descriptor to the collection
472 void orangefs_bufmap_put(int buffer_index
)
474 put(&rw_map
, buffer_index
);
478 * orangefs_readdir_index_get()
480 * gets a free descriptor, will sleep until one becomes
481 * available if necessary.
482 * Although the readdir buffers are not mapped into kernel space
483 * we could do that at a later point of time. Regardless, these
484 * indices are used by the client-core.
486 * returns slot on success, -errno on failure
488 int orangefs_readdir_index_get(void)
490 return get(&readdir_map
);
493 void orangefs_readdir_index_put(int buffer_index
)
495 put(&readdir_map
, buffer_index
);
499 * we've been handed an iovec, we need to copy it to
500 * the shared memory descriptor at "buffer_index".
502 int orangefs_bufmap_copy_from_iovec(struct iov_iter
*iter
,
506 struct orangefs_bufmap_desc
*to
;
509 gossip_debug(GOSSIP_BUFMAP_DEBUG
,
510 "%s: buffer_index:%d: size:%zu:\n",
511 __func__
, buffer_index
, size
);
513 to
= &__orangefs_bufmap
->desc_array
[buffer_index
];
514 for (i
= 0; size
; i
++) {
515 struct page
*page
= to
->page_array
[i
];
519 if (copy_page_from_iter(page
, 0, n
, iter
) != n
)
527 * we've been handed an iovec, we need to fill it from
528 * the shared memory descriptor at "buffer_index".
530 int orangefs_bufmap_copy_to_iovec(struct iov_iter
*iter
,
534 struct orangefs_bufmap_desc
*from
;
537 from
= &__orangefs_bufmap
->desc_array
[buffer_index
];
538 gossip_debug(GOSSIP_BUFMAP_DEBUG
,
539 "%s: buffer_index:%d: size:%zu:\n",
540 __func__
, buffer_index
, size
);
543 for (i
= 0; size
; i
++) {
544 struct page
*page
= from
->page_array
[i
];
548 n
= copy_page_to_iter(page
, 0, n
, iter
);