1 // SPDX-License-Identifier: GPL-2.0
3 * (C) 2001 Clemson University and The University of Chicago
5 * See COPYING in top-level directory.
8 #include "orangefs-kernel.h"
9 #include "orangefs-bufmap.h"
18 static struct slot_map rw_map
= {
20 .q
= __WAIT_QUEUE_HEAD_INITIALIZER(rw_map
.q
)
22 static struct slot_map readdir_map
= {
24 .q
= __WAIT_QUEUE_HEAD_INITIALIZER(readdir_map
.q
)
28 static void install(struct slot_map
*m
, int count
, unsigned long *map
)
30 spin_lock(&m
->q
.lock
);
31 m
->c
= m
->count
= count
;
33 wake_up_all_locked(&m
->q
);
34 spin_unlock(&m
->q
.lock
);
37 static void mark_killed(struct slot_map
*m
)
39 spin_lock(&m
->q
.lock
);
41 spin_unlock(&m
->q
.lock
);
44 static void run_down(struct slot_map
*m
)
47 spin_lock(&m
->q
.lock
);
50 if (likely(list_empty(&wait
.entry
)))
51 __add_wait_queue_entry_tail(&m
->q
, &wait
);
52 set_current_state(TASK_UNINTERRUPTIBLE
);
57 spin_unlock(&m
->q
.lock
);
59 spin_lock(&m
->q
.lock
);
61 __remove_wait_queue(&m
->q
, &wait
);
62 __set_current_state(TASK_RUNNING
);
65 spin_unlock(&m
->q
.lock
);
68 static void put(struct slot_map
*m
, int slot
)
71 spin_lock(&m
->q
.lock
);
72 __clear_bit(slot
, m
->map
);
75 wake_up_locked(&m
->q
);
76 if (unlikely(v
== -1)) /* finished dying */
77 wake_up_all_locked(&m
->q
);
78 spin_unlock(&m
->q
.lock
);
81 static int wait_for_free(struct slot_map
*m
)
83 long left
= slot_timeout_secs
* HZ
;
88 if (likely(list_empty(&wait
.entry
)))
89 __add_wait_queue_entry_tail_exclusive(&m
->q
, &wait
);
90 set_current_state(TASK_INTERRUPTIBLE
);
96 /* we are waiting for map to be installed */
97 /* it would better be there soon, or we go away */
98 if (n
> ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS
* HZ
)
99 n
= ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS
* HZ
;
101 spin_unlock(&m
->q
.lock
);
102 t
= schedule_timeout(n
);
103 spin_lock(&m
->q
.lock
);
104 if (unlikely(!t
) && n
!= left
&& m
->c
< 0)
107 left
= t
+ (left
- n
);
108 if (signal_pending(current
))
112 if (!list_empty(&wait
.entry
))
113 list_del(&wait
.entry
);
114 else if (left
<= 0 && waitqueue_active(&m
->q
))
115 __wake_up_locked_key(&m
->q
, TASK_INTERRUPTIBLE
, NULL
);
116 __set_current_state(TASK_RUNNING
);
118 if (likely(left
> 0))
121 return left
< 0 ? -EINTR
: -ETIMEDOUT
;
124 static int get(struct slot_map
*m
)
127 spin_lock(&m
->q
.lock
);
128 if (unlikely(m
->c
<= 0))
129 res
= wait_for_free(m
);
132 res
= find_first_zero_bit(m
->map
, m
->count
);
133 __set_bit(res
, m
->map
);
135 spin_unlock(&m
->q
.lock
);
139 /* used to describe mapped buffers */
140 struct orangefs_bufmap_desc
{
141 void __user
*uaddr
; /* user space address pointer */
142 struct page
**page_array
; /* array of mapped pages */
143 int array_count
; /* size of above arrays */
144 struct list_head list_link
;
147 static struct orangefs_bufmap
{
154 struct page
**page_array
;
155 struct orangefs_bufmap_desc
*desc_array
;
157 /* array to track usage of buffer descriptors */
158 unsigned long *buffer_index_array
;
160 /* array to track usage of buffer descriptors for readdir */
161 #define N DIV_ROUND_UP(ORANGEFS_READDIR_DEFAULT_DESC_COUNT, BITS_PER_LONG)
162 unsigned long readdir_index_array
[N
];
164 } *__orangefs_bufmap
;
166 static DEFINE_SPINLOCK(orangefs_bufmap_lock
);
169 orangefs_bufmap_unmap(struct orangefs_bufmap
*bufmap
)
171 unpin_user_pages(bufmap
->page_array
, bufmap
->page_count
);
175 orangefs_bufmap_free(struct orangefs_bufmap
*bufmap
)
177 kfree(bufmap
->page_array
);
178 kfree(bufmap
->desc_array
);
179 bitmap_free(bufmap
->buffer_index_array
);
184 * XXX: Can the size and shift change while the caller gives up the
185 * XXX: lock between calling this and doing something useful?
188 int orangefs_bufmap_size_query(void)
190 struct orangefs_bufmap
*bufmap
;
192 spin_lock(&orangefs_bufmap_lock
);
193 bufmap
= __orangefs_bufmap
;
195 size
= bufmap
->desc_size
;
196 spin_unlock(&orangefs_bufmap_lock
);
200 int orangefs_bufmap_shift_query(void)
202 struct orangefs_bufmap
*bufmap
;
204 spin_lock(&orangefs_bufmap_lock
);
205 bufmap
= __orangefs_bufmap
;
207 shift
= bufmap
->desc_shift
;
208 spin_unlock(&orangefs_bufmap_lock
);
212 static DECLARE_WAIT_QUEUE_HEAD(bufmap_waitq
);
213 static DECLARE_WAIT_QUEUE_HEAD(readdir_waitq
);
215 static struct orangefs_bufmap
*
216 orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc
*user_desc
)
218 struct orangefs_bufmap
*bufmap
;
220 bufmap
= kzalloc(sizeof(*bufmap
), GFP_KERNEL
);
224 bufmap
->total_size
= user_desc
->total_size
;
225 bufmap
->desc_count
= user_desc
->count
;
226 bufmap
->desc_size
= user_desc
->size
;
227 bufmap
->desc_shift
= ilog2(bufmap
->desc_size
);
229 bufmap
->buffer_index_array
= bitmap_zalloc(bufmap
->desc_count
, GFP_KERNEL
);
230 if (!bufmap
->buffer_index_array
)
231 goto out_free_bufmap
;
234 kcalloc(bufmap
->desc_count
, sizeof(struct orangefs_bufmap_desc
),
236 if (!bufmap
->desc_array
)
237 goto out_free_index_array
;
239 bufmap
->page_count
= bufmap
->total_size
/ PAGE_SIZE
;
241 /* allocate storage to track our page mappings */
243 kcalloc(bufmap
->page_count
, sizeof(struct page
*), GFP_KERNEL
);
244 if (!bufmap
->page_array
)
245 goto out_free_desc_array
;
250 kfree(bufmap
->desc_array
);
251 out_free_index_array
:
252 bitmap_free(bufmap
->buffer_index_array
);
260 orangefs_bufmap_map(struct orangefs_bufmap
*bufmap
,
261 struct ORANGEFS_dev_map_desc
*user_desc
)
263 int pages_per_desc
= bufmap
->desc_size
/ PAGE_SIZE
;
264 int offset
= 0, ret
, i
;
267 ret
= pin_user_pages_fast((unsigned long)user_desc
->ptr
,
268 bufmap
->page_count
, FOLL_WRITE
, bufmap
->page_array
);
273 if (ret
!= bufmap
->page_count
) {
274 gossip_err("orangefs error: asked for %d pages, only got %d.\n",
275 bufmap
->page_count
, ret
);
277 for (i
= 0; i
< ret
; i
++)
278 unpin_user_page(bufmap
->page_array
[i
]);
283 * ideally we want to get kernel space pointers for each page, but
284 * we can't kmap that many pages at once if highmem is being used.
285 * so instead, we just kmap/kunmap the page address each time the
288 for (i
= 0; i
< bufmap
->page_count
; i
++)
289 flush_dcache_page(bufmap
->page_array
[i
]);
291 /* build a list of available descriptors */
292 for (offset
= 0, i
= 0; i
< bufmap
->desc_count
; i
++) {
293 bufmap
->desc_array
[i
].page_array
= &bufmap
->page_array
[offset
];
294 bufmap
->desc_array
[i
].array_count
= pages_per_desc
;
295 bufmap
->desc_array
[i
].uaddr
=
296 (user_desc
->ptr
+ (i
* pages_per_desc
* PAGE_SIZE
));
297 offset
+= pages_per_desc
;
304 * orangefs_bufmap_initialize()
306 * initializes the mapped buffer interface
308 * returns 0 on success, -errno on failure
310 int orangefs_bufmap_initialize(struct ORANGEFS_dev_map_desc
*user_desc
)
312 struct orangefs_bufmap
*bufmap
;
315 gossip_debug(GOSSIP_BUFMAP_DEBUG
,
316 "orangefs_bufmap_initialize: called (ptr ("
317 "%p) sz (%d) cnt(%d).\n",
322 if (user_desc
->total_size
< 0 ||
323 user_desc
->size
< 0 ||
324 user_desc
->count
< 0)
328 * sanity check alignment and size of buffer that caller wants to
331 if (PAGE_ALIGN((unsigned long)user_desc
->ptr
) !=
332 (unsigned long)user_desc
->ptr
) {
333 gossip_err("orangefs error: memory alignment (front). %p\n",
338 if (PAGE_ALIGN(((unsigned long)user_desc
->ptr
+ user_desc
->total_size
))
339 != (unsigned long)(user_desc
->ptr
+ user_desc
->total_size
)) {
340 gossip_err("orangefs error: memory alignment (back).(%p + %d)\n",
342 user_desc
->total_size
);
346 if (user_desc
->total_size
!= (user_desc
->size
* user_desc
->count
)) {
347 gossip_err("orangefs error: user provided an oddly sized buffer: (%d, %d, %d)\n",
348 user_desc
->total_size
,
354 if ((user_desc
->size
% PAGE_SIZE
) != 0) {
355 gossip_err("orangefs error: bufmap size not page size divisible (%d).\n",
361 bufmap
= orangefs_bufmap_alloc(user_desc
);
365 ret
= orangefs_bufmap_map(bufmap
, user_desc
);
367 goto out_free_bufmap
;
370 spin_lock(&orangefs_bufmap_lock
);
371 if (__orangefs_bufmap
) {
372 spin_unlock(&orangefs_bufmap_lock
);
373 gossip_err("orangefs: error: bufmap already initialized.\n");
375 goto out_unmap_bufmap
;
377 __orangefs_bufmap
= bufmap
;
380 bufmap
->buffer_index_array
);
381 install(&readdir_map
,
382 ORANGEFS_READDIR_DEFAULT_DESC_COUNT
,
383 bufmap
->readdir_index_array
);
384 spin_unlock(&orangefs_bufmap_lock
);
386 gossip_debug(GOSSIP_BUFMAP_DEBUG
,
387 "orangefs_bufmap_initialize: exiting normally\n");
391 orangefs_bufmap_unmap(bufmap
);
393 orangefs_bufmap_free(bufmap
);
399 * orangefs_bufmap_finalize()
401 * shuts down the mapped buffer interface and releases any resources
406 void orangefs_bufmap_finalize(void)
408 struct orangefs_bufmap
*bufmap
= __orangefs_bufmap
;
411 gossip_debug(GOSSIP_BUFMAP_DEBUG
, "orangefs_bufmap_finalize: called\n");
412 mark_killed(&rw_map
);
413 mark_killed(&readdir_map
);
414 gossip_debug(GOSSIP_BUFMAP_DEBUG
,
415 "orangefs_bufmap_finalize: exiting normally\n");
418 void orangefs_bufmap_run_down(void)
420 struct orangefs_bufmap
*bufmap
= __orangefs_bufmap
;
424 run_down(&readdir_map
);
425 spin_lock(&orangefs_bufmap_lock
);
426 __orangefs_bufmap
= NULL
;
427 spin_unlock(&orangefs_bufmap_lock
);
428 orangefs_bufmap_unmap(bufmap
);
429 orangefs_bufmap_free(bufmap
);
433 * orangefs_bufmap_get()
435 * gets a free mapped buffer descriptor, will sleep until one becomes
436 * available if necessary
438 * returns slot on success, -errno on failure
440 int orangefs_bufmap_get(void)
446 * orangefs_bufmap_put()
448 * returns a mapped buffer descriptor to the collection
452 void orangefs_bufmap_put(int buffer_index
)
454 put(&rw_map
, buffer_index
);
458 * orangefs_readdir_index_get()
460 * gets a free descriptor, will sleep until one becomes
461 * available if necessary.
462 * Although the readdir buffers are not mapped into kernel space
463 * we could do that at a later point of time. Regardless, these
464 * indices are used by the client-core.
466 * returns slot on success, -errno on failure
468 int orangefs_readdir_index_get(void)
470 return get(&readdir_map
);
473 void orangefs_readdir_index_put(int buffer_index
)
475 put(&readdir_map
, buffer_index
);
479 * we've been handed an iovec, we need to copy it to
480 * the shared memory descriptor at "buffer_index".
482 int orangefs_bufmap_copy_from_iovec(struct iov_iter
*iter
,
486 struct orangefs_bufmap_desc
*to
;
489 gossip_debug(GOSSIP_BUFMAP_DEBUG
,
490 "%s: buffer_index:%d: size:%zu:\n",
491 __func__
, buffer_index
, size
);
493 to
= &__orangefs_bufmap
->desc_array
[buffer_index
];
494 for (i
= 0; size
; i
++) {
495 struct page
*page
= to
->page_array
[i
];
499 if (copy_page_from_iter(page
, 0, n
, iter
) != n
)
507 * we've been handed an iovec, we need to fill it from
508 * the shared memory descriptor at "buffer_index".
510 int orangefs_bufmap_copy_to_iovec(struct iov_iter
*iter
,
514 struct orangefs_bufmap_desc
*from
;
517 from
= &__orangefs_bufmap
->desc_array
[buffer_index
];
518 gossip_debug(GOSSIP_BUFMAP_DEBUG
,
519 "%s: buffer_index:%d: size:%zu:\n",
520 __func__
, buffer_index
, size
);
523 for (i
= 0; size
; i
++) {
524 struct page
*page
= from
->page_array
[i
];
528 n
= copy_page_to_iter(page
, 0, n
, iter
);
536 void orangefs_bufmap_page_fill(void *page_to
,
540 struct orangefs_bufmap_desc
*from
;
543 from
= &__orangefs_bufmap
->desc_array
[buffer_index
];
544 page_from
= kmap_atomic(from
->page_array
[slot_index
]);
545 memcpy(page_to
, page_from
, PAGE_SIZE
);
546 kunmap_atomic(page_from
);