Merge tag 'powerpc-5.11-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[linux/fpc-iii.git] / kernel / relay.c
blobd1a67fbb819d3e774be0542faea31292920aa903
1 /*
2 * Public API and common code for kernel->userspace relay file support.
4 * See Documentation/filesystems/relay.rst for an overview.
6 * Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
7 * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com)
9 * Moved to kernel/relay.c by Paul Mundt, 2006.
10 * November 2006 - CPU hotplug support by Mathieu Desnoyers
11 * (mathieu.desnoyers@polymtl.ca)
13 * This file is released under the GPL.
15 #include <linux/errno.h>
16 #include <linux/stddef.h>
17 #include <linux/slab.h>
18 #include <linux/export.h>
19 #include <linux/string.h>
20 #include <linux/relay.h>
21 #include <linux/vmalloc.h>
22 #include <linux/mm.h>
23 #include <linux/cpu.h>
24 #include <linux/splice.h>
26 /* list of open channels, for cpu hotplug */
27 static DEFINE_MUTEX(relay_channels_mutex);
28 static LIST_HEAD(relay_channels);
31 * fault() vm_op implementation for relay file mapping.
33 static vm_fault_t relay_buf_fault(struct vm_fault *vmf)
35 struct page *page;
36 struct rchan_buf *buf = vmf->vma->vm_private_data;
37 pgoff_t pgoff = vmf->pgoff;
39 if (!buf)
40 return VM_FAULT_OOM;
42 page = vmalloc_to_page(buf->start + (pgoff << PAGE_SHIFT));
43 if (!page)
44 return VM_FAULT_SIGBUS;
45 get_page(page);
46 vmf->page = page;
48 return 0;
52 * vm_ops for relay file mappings.
54 static const struct vm_operations_struct relay_file_mmap_ops = {
55 .fault = relay_buf_fault,
59 * allocate an array of pointers of struct page
61 static struct page **relay_alloc_page_array(unsigned int n_pages)
63 const size_t pa_size = n_pages * sizeof(struct page *);
64 if (pa_size > PAGE_SIZE)
65 return vzalloc(pa_size);
66 return kzalloc(pa_size, GFP_KERNEL);
70 * free an array of pointers of struct page
72 static void relay_free_page_array(struct page **array)
74 kvfree(array);
77 /**
78 * relay_mmap_buf: - mmap channel buffer to process address space
79 * @buf: relay channel buffer
80 * @vma: vm_area_struct describing memory to be mapped
82 * Returns 0 if ok, negative on error
84 * Caller should already have grabbed mmap_lock.
86 static int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma)
88 unsigned long length = vma->vm_end - vma->vm_start;
90 if (!buf)
91 return -EBADF;
93 if (length != (unsigned long)buf->chan->alloc_size)
94 return -EINVAL;
96 vma->vm_ops = &relay_file_mmap_ops;
97 vma->vm_flags |= VM_DONTEXPAND;
98 vma->vm_private_data = buf;
100 return 0;
104 * relay_alloc_buf - allocate a channel buffer
105 * @buf: the buffer struct
106 * @size: total size of the buffer
108 * Returns a pointer to the resulting buffer, %NULL if unsuccessful. The
109 * passed in size will get page aligned, if it isn't already.
111 static void *relay_alloc_buf(struct rchan_buf *buf, size_t *size)
113 void *mem;
114 unsigned int i, j, n_pages;
116 *size = PAGE_ALIGN(*size);
117 n_pages = *size >> PAGE_SHIFT;
119 buf->page_array = relay_alloc_page_array(n_pages);
120 if (!buf->page_array)
121 return NULL;
123 for (i = 0; i < n_pages; i++) {
124 buf->page_array[i] = alloc_page(GFP_KERNEL);
125 if (unlikely(!buf->page_array[i]))
126 goto depopulate;
127 set_page_private(buf->page_array[i], (unsigned long)buf);
129 mem = vmap(buf->page_array, n_pages, VM_MAP, PAGE_KERNEL);
130 if (!mem)
131 goto depopulate;
133 memset(mem, 0, *size);
134 buf->page_count = n_pages;
135 return mem;
137 depopulate:
138 for (j = 0; j < i; j++)
139 __free_page(buf->page_array[j]);
140 relay_free_page_array(buf->page_array);
141 return NULL;
145 * relay_create_buf - allocate and initialize a channel buffer
146 * @chan: the relay channel
148 * Returns channel buffer if successful, %NULL otherwise.
150 static struct rchan_buf *relay_create_buf(struct rchan *chan)
152 struct rchan_buf *buf;
154 if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t *))
155 return NULL;
157 buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
158 if (!buf)
159 return NULL;
160 buf->padding = kmalloc_array(chan->n_subbufs, sizeof(size_t *),
161 GFP_KERNEL);
162 if (!buf->padding)
163 goto free_buf;
165 buf->start = relay_alloc_buf(buf, &chan->alloc_size);
166 if (!buf->start)
167 goto free_buf;
169 buf->chan = chan;
170 kref_get(&buf->chan->kref);
171 return buf;
173 free_buf:
174 kfree(buf->padding);
175 kfree(buf);
176 return NULL;
180 * relay_destroy_channel - free the channel struct
181 * @kref: target kernel reference that contains the relay channel
183 * Should only be called from kref_put().
185 static void relay_destroy_channel(struct kref *kref)
187 struct rchan *chan = container_of(kref, struct rchan, kref);
188 free_percpu(chan->buf);
189 kfree(chan);
193 * relay_destroy_buf - destroy an rchan_buf struct and associated buffer
194 * @buf: the buffer struct
196 static void relay_destroy_buf(struct rchan_buf *buf)
198 struct rchan *chan = buf->chan;
199 unsigned int i;
201 if (likely(buf->start)) {
202 vunmap(buf->start);
203 for (i = 0; i < buf->page_count; i++)
204 __free_page(buf->page_array[i]);
205 relay_free_page_array(buf->page_array);
207 *per_cpu_ptr(chan->buf, buf->cpu) = NULL;
208 kfree(buf->padding);
209 kfree(buf);
210 kref_put(&chan->kref, relay_destroy_channel);
214 * relay_remove_buf - remove a channel buffer
215 * @kref: target kernel reference that contains the relay buffer
217 * Removes the file from the filesystem, which also frees the
218 * rchan_buf_struct and the channel buffer. Should only be called from
219 * kref_put().
221 static void relay_remove_buf(struct kref *kref)
223 struct rchan_buf *buf = container_of(kref, struct rchan_buf, kref);
224 relay_destroy_buf(buf);
228 * relay_buf_empty - boolean, is the channel buffer empty?
229 * @buf: channel buffer
231 * Returns 1 if the buffer is empty, 0 otherwise.
233 static int relay_buf_empty(struct rchan_buf *buf)
235 return (buf->subbufs_produced - buf->subbufs_consumed) ? 0 : 1;
239 * relay_buf_full - boolean, is the channel buffer full?
240 * @buf: channel buffer
242 * Returns 1 if the buffer is full, 0 otherwise.
244 int relay_buf_full(struct rchan_buf *buf)
246 size_t ready = buf->subbufs_produced - buf->subbufs_consumed;
247 return (ready >= buf->chan->n_subbufs) ? 1 : 0;
249 EXPORT_SYMBOL_GPL(relay_buf_full);
252 * High-level relay kernel API and associated functions.
255 static int relay_subbuf_start(struct rchan_buf *buf, void *subbuf,
256 void *prev_subbuf, size_t prev_padding)
258 if (!buf->chan->cb->subbuf_start)
259 return !relay_buf_full(buf);
261 return buf->chan->cb->subbuf_start(buf, subbuf,
262 prev_subbuf, prev_padding);
266 * wakeup_readers - wake up readers waiting on a channel
267 * @work: contains the channel buffer
269 * This is the function used to defer reader waking
271 static void wakeup_readers(struct irq_work *work)
273 struct rchan_buf *buf;
275 buf = container_of(work, struct rchan_buf, wakeup_work);
276 wake_up_interruptible(&buf->read_wait);
280 * __relay_reset - reset a channel buffer
281 * @buf: the channel buffer
282 * @init: 1 if this is a first-time initialization
284 * See relay_reset() for description of effect.
286 static void __relay_reset(struct rchan_buf *buf, unsigned int init)
288 size_t i;
290 if (init) {
291 init_waitqueue_head(&buf->read_wait);
292 kref_init(&buf->kref);
293 init_irq_work(&buf->wakeup_work, wakeup_readers);
294 } else {
295 irq_work_sync(&buf->wakeup_work);
298 buf->subbufs_produced = 0;
299 buf->subbufs_consumed = 0;
300 buf->bytes_consumed = 0;
301 buf->finalized = 0;
302 buf->data = buf->start;
303 buf->offset = 0;
305 for (i = 0; i < buf->chan->n_subbufs; i++)
306 buf->padding[i] = 0;
308 relay_subbuf_start(buf, buf->data, NULL, 0);
312 * relay_reset - reset the channel
313 * @chan: the channel
315 * This has the effect of erasing all data from all channel buffers
316 * and restarting the channel in its initial state. The buffers
317 * are not freed, so any mappings are still in effect.
319 * NOTE. Care should be taken that the channel isn't actually
320 * being used by anything when this call is made.
322 void relay_reset(struct rchan *chan)
324 struct rchan_buf *buf;
325 unsigned int i;
327 if (!chan)
328 return;
330 if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) {
331 __relay_reset(buf, 0);
332 return;
335 mutex_lock(&relay_channels_mutex);
336 for_each_possible_cpu(i)
337 if ((buf = *per_cpu_ptr(chan->buf, i)))
338 __relay_reset(buf, 0);
339 mutex_unlock(&relay_channels_mutex);
341 EXPORT_SYMBOL_GPL(relay_reset);
343 static inline void relay_set_buf_dentry(struct rchan_buf *buf,
344 struct dentry *dentry)
346 buf->dentry = dentry;
347 d_inode(buf->dentry)->i_size = buf->early_bytes;
350 static struct dentry *relay_create_buf_file(struct rchan *chan,
351 struct rchan_buf *buf,
352 unsigned int cpu)
354 struct dentry *dentry;
355 char *tmpname;
357 tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
358 if (!tmpname)
359 return NULL;
360 snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
362 /* Create file in fs */
363 dentry = chan->cb->create_buf_file(tmpname, chan->parent,
364 S_IRUSR, buf,
365 &chan->is_global);
366 if (IS_ERR(dentry))
367 dentry = NULL;
369 kfree(tmpname);
371 return dentry;
375 * relay_open_buf - create a new relay channel buffer
377 * used by relay_open() and CPU hotplug.
379 static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
381 struct rchan_buf *buf = NULL;
382 struct dentry *dentry;
384 if (chan->is_global)
385 return *per_cpu_ptr(chan->buf, 0);
387 buf = relay_create_buf(chan);
388 if (!buf)
389 return NULL;
391 if (chan->has_base_filename) {
392 dentry = relay_create_buf_file(chan, buf, cpu);
393 if (!dentry)
394 goto free_buf;
395 relay_set_buf_dentry(buf, dentry);
396 } else {
397 /* Only retrieve global info, nothing more, nothing less */
398 dentry = chan->cb->create_buf_file(NULL, NULL,
399 S_IRUSR, buf,
400 &chan->is_global);
401 if (IS_ERR_OR_NULL(dentry))
402 goto free_buf;
405 buf->cpu = cpu;
406 __relay_reset(buf, 1);
408 if(chan->is_global) {
409 *per_cpu_ptr(chan->buf, 0) = buf;
410 buf->cpu = 0;
413 return buf;
415 free_buf:
416 relay_destroy_buf(buf);
417 return NULL;
421 * relay_close_buf - close a channel buffer
422 * @buf: channel buffer
424 * Marks the buffer finalized and restores the default callbacks.
425 * The channel buffer and channel buffer data structure are then freed
426 * automatically when the last reference is given up.
428 static void relay_close_buf(struct rchan_buf *buf)
430 buf->finalized = 1;
431 irq_work_sync(&buf->wakeup_work);
432 buf->chan->cb->remove_buf_file(buf->dentry);
433 kref_put(&buf->kref, relay_remove_buf);
436 int relay_prepare_cpu(unsigned int cpu)
438 struct rchan *chan;
439 struct rchan_buf *buf;
441 mutex_lock(&relay_channels_mutex);
442 list_for_each_entry(chan, &relay_channels, list) {
443 if ((buf = *per_cpu_ptr(chan->buf, cpu)))
444 continue;
445 buf = relay_open_buf(chan, cpu);
446 if (!buf) {
447 pr_err("relay: cpu %d buffer creation failed\n", cpu);
448 mutex_unlock(&relay_channels_mutex);
449 return -ENOMEM;
451 *per_cpu_ptr(chan->buf, cpu) = buf;
453 mutex_unlock(&relay_channels_mutex);
454 return 0;
458 * relay_open - create a new relay channel
459 * @base_filename: base name of files to create, %NULL for buffering only
460 * @parent: dentry of parent directory, %NULL for root directory or buffer
461 * @subbuf_size: size of sub-buffers
462 * @n_subbufs: number of sub-buffers
463 * @cb: client callback functions
464 * @private_data: user-defined data
466 * Returns channel pointer if successful, %NULL otherwise.
468 * Creates a channel buffer for each cpu using the sizes and
469 * attributes specified. The created channel buffer files
470 * will be named base_filename0...base_filenameN-1. File
471 * permissions will be %S_IRUSR.
473 * If opening a buffer (@parent = NULL) that you later wish to register
474 * in a filesystem, call relay_late_setup_files() once the @parent dentry
475 * is available.
477 struct rchan *relay_open(const char *base_filename,
478 struct dentry *parent,
479 size_t subbuf_size,
480 size_t n_subbufs,
481 const struct rchan_callbacks *cb,
482 void *private_data)
484 unsigned int i;
485 struct rchan *chan;
486 struct rchan_buf *buf;
488 if (!(subbuf_size && n_subbufs))
489 return NULL;
490 if (subbuf_size > UINT_MAX / n_subbufs)
491 return NULL;
492 if (!cb || !cb->create_buf_file || !cb->remove_buf_file)
493 return NULL;
495 chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
496 if (!chan)
497 return NULL;
499 chan->buf = alloc_percpu(struct rchan_buf *);
500 if (!chan->buf) {
501 kfree(chan);
502 return NULL;
505 chan->version = RELAYFS_CHANNEL_VERSION;
506 chan->n_subbufs = n_subbufs;
507 chan->subbuf_size = subbuf_size;
508 chan->alloc_size = PAGE_ALIGN(subbuf_size * n_subbufs);
509 chan->parent = parent;
510 chan->private_data = private_data;
511 if (base_filename) {
512 chan->has_base_filename = 1;
513 strlcpy(chan->base_filename, base_filename, NAME_MAX);
515 chan->cb = cb;
516 kref_init(&chan->kref);
518 mutex_lock(&relay_channels_mutex);
519 for_each_online_cpu(i) {
520 buf = relay_open_buf(chan, i);
521 if (!buf)
522 goto free_bufs;
523 *per_cpu_ptr(chan->buf, i) = buf;
525 list_add(&chan->list, &relay_channels);
526 mutex_unlock(&relay_channels_mutex);
528 return chan;
530 free_bufs:
531 for_each_possible_cpu(i) {
532 if ((buf = *per_cpu_ptr(chan->buf, i)))
533 relay_close_buf(buf);
536 kref_put(&chan->kref, relay_destroy_channel);
537 mutex_unlock(&relay_channels_mutex);
538 return NULL;
540 EXPORT_SYMBOL_GPL(relay_open);
542 struct rchan_percpu_buf_dispatcher {
543 struct rchan_buf *buf;
544 struct dentry *dentry;
547 /* Called in atomic context. */
548 static void __relay_set_buf_dentry(void *info)
550 struct rchan_percpu_buf_dispatcher *p = info;
552 relay_set_buf_dentry(p->buf, p->dentry);
556 * relay_late_setup_files - triggers file creation
557 * @chan: channel to operate on
558 * @base_filename: base name of files to create
559 * @parent: dentry of parent directory, %NULL for root directory
561 * Returns 0 if successful, non-zero otherwise.
563 * Use to setup files for a previously buffer-only channel created
564 * by relay_open() with a NULL parent dentry.
566 * For example, this is useful for perfomring early tracing in kernel,
567 * before VFS is up and then exposing the early results once the dentry
568 * is available.
570 int relay_late_setup_files(struct rchan *chan,
571 const char *base_filename,
572 struct dentry *parent)
574 int err = 0;
575 unsigned int i, curr_cpu;
576 unsigned long flags;
577 struct dentry *dentry;
578 struct rchan_buf *buf;
579 struct rchan_percpu_buf_dispatcher disp;
581 if (!chan || !base_filename)
582 return -EINVAL;
584 strlcpy(chan->base_filename, base_filename, NAME_MAX);
586 mutex_lock(&relay_channels_mutex);
587 /* Is chan already set up? */
588 if (unlikely(chan->has_base_filename)) {
589 mutex_unlock(&relay_channels_mutex);
590 return -EEXIST;
592 chan->has_base_filename = 1;
593 chan->parent = parent;
595 if (chan->is_global) {
596 err = -EINVAL;
597 buf = *per_cpu_ptr(chan->buf, 0);
598 if (!WARN_ON_ONCE(!buf)) {
599 dentry = relay_create_buf_file(chan, buf, 0);
600 if (dentry && !WARN_ON_ONCE(!chan->is_global)) {
601 relay_set_buf_dentry(buf, dentry);
602 err = 0;
605 mutex_unlock(&relay_channels_mutex);
606 return err;
609 curr_cpu = get_cpu();
611 * The CPU hotplug notifier ran before us and created buffers with
612 * no files associated. So it's safe to call relay_setup_buf_file()
613 * on all currently online CPUs.
615 for_each_online_cpu(i) {
616 buf = *per_cpu_ptr(chan->buf, i);
617 if (unlikely(!buf)) {
618 WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n");
619 err = -EINVAL;
620 break;
623 dentry = relay_create_buf_file(chan, buf, i);
624 if (unlikely(!dentry)) {
625 err = -EINVAL;
626 break;
629 if (curr_cpu == i) {
630 local_irq_save(flags);
631 relay_set_buf_dentry(buf, dentry);
632 local_irq_restore(flags);
633 } else {
634 disp.buf = buf;
635 disp.dentry = dentry;
636 smp_mb();
637 /* relay_channels_mutex must be held, so wait. */
638 err = smp_call_function_single(i,
639 __relay_set_buf_dentry,
640 &disp, 1);
642 if (unlikely(err))
643 break;
645 put_cpu();
646 mutex_unlock(&relay_channels_mutex);
648 return err;
650 EXPORT_SYMBOL_GPL(relay_late_setup_files);
653 * relay_switch_subbuf - switch to a new sub-buffer
654 * @buf: channel buffer
655 * @length: size of current event
657 * Returns either the length passed in or 0 if full.
659 * Performs sub-buffer-switch tasks such as invoking callbacks,
660 * updating padding counts, waking up readers, etc.
662 size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
664 void *old, *new;
665 size_t old_subbuf, new_subbuf;
667 if (unlikely(length > buf->chan->subbuf_size))
668 goto toobig;
670 if (buf->offset != buf->chan->subbuf_size + 1) {
671 buf->prev_padding = buf->chan->subbuf_size - buf->offset;
672 old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
673 buf->padding[old_subbuf] = buf->prev_padding;
674 buf->subbufs_produced++;
675 if (buf->dentry)
676 d_inode(buf->dentry)->i_size +=
677 buf->chan->subbuf_size -
678 buf->padding[old_subbuf];
679 else
680 buf->early_bytes += buf->chan->subbuf_size -
681 buf->padding[old_subbuf];
682 smp_mb();
683 if (waitqueue_active(&buf->read_wait)) {
685 * Calling wake_up_interruptible() from here
686 * will deadlock if we happen to be logging
687 * from the scheduler (trying to re-grab
688 * rq->lock), so defer it.
690 irq_work_queue(&buf->wakeup_work);
694 old = buf->data;
695 new_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
696 new = buf->start + new_subbuf * buf->chan->subbuf_size;
697 buf->offset = 0;
698 if (!relay_subbuf_start(buf, new, old, buf->prev_padding)) {
699 buf->offset = buf->chan->subbuf_size + 1;
700 return 0;
702 buf->data = new;
703 buf->padding[new_subbuf] = 0;
705 if (unlikely(length + buf->offset > buf->chan->subbuf_size))
706 goto toobig;
708 return length;
710 toobig:
711 buf->chan->last_toobig = length;
712 return 0;
714 EXPORT_SYMBOL_GPL(relay_switch_subbuf);
717 * relay_subbufs_consumed - update the buffer's sub-buffers-consumed count
718 * @chan: the channel
719 * @cpu: the cpu associated with the channel buffer to update
720 * @subbufs_consumed: number of sub-buffers to add to current buf's count
722 * Adds to the channel buffer's consumed sub-buffer count.
723 * subbufs_consumed should be the number of sub-buffers newly consumed,
724 * not the total consumed.
726 * NOTE. Kernel clients don't need to call this function if the channel
727 * mode is 'overwrite'.
729 void relay_subbufs_consumed(struct rchan *chan,
730 unsigned int cpu,
731 size_t subbufs_consumed)
733 struct rchan_buf *buf;
735 if (!chan || cpu >= NR_CPUS)
736 return;
738 buf = *per_cpu_ptr(chan->buf, cpu);
739 if (!buf || subbufs_consumed > chan->n_subbufs)
740 return;
742 if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed)
743 buf->subbufs_consumed = buf->subbufs_produced;
744 else
745 buf->subbufs_consumed += subbufs_consumed;
747 EXPORT_SYMBOL_GPL(relay_subbufs_consumed);
750 * relay_close - close the channel
751 * @chan: the channel
753 * Closes all channel buffers and frees the channel.
755 void relay_close(struct rchan *chan)
757 struct rchan_buf *buf;
758 unsigned int i;
760 if (!chan)
761 return;
763 mutex_lock(&relay_channels_mutex);
764 if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0)))
765 relay_close_buf(buf);
766 else
767 for_each_possible_cpu(i)
768 if ((buf = *per_cpu_ptr(chan->buf, i)))
769 relay_close_buf(buf);
771 if (chan->last_toobig)
772 printk(KERN_WARNING "relay: one or more items not logged "
773 "[item size (%zd) > sub-buffer size (%zd)]\n",
774 chan->last_toobig, chan->subbuf_size);
776 list_del(&chan->list);
777 kref_put(&chan->kref, relay_destroy_channel);
778 mutex_unlock(&relay_channels_mutex);
780 EXPORT_SYMBOL_GPL(relay_close);
783 * relay_flush - close the channel
784 * @chan: the channel
786 * Flushes all channel buffers, i.e. forces buffer switch.
788 void relay_flush(struct rchan *chan)
790 struct rchan_buf *buf;
791 unsigned int i;
793 if (!chan)
794 return;
796 if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) {
797 relay_switch_subbuf(buf, 0);
798 return;
801 mutex_lock(&relay_channels_mutex);
802 for_each_possible_cpu(i)
803 if ((buf = *per_cpu_ptr(chan->buf, i)))
804 relay_switch_subbuf(buf, 0);
805 mutex_unlock(&relay_channels_mutex);
807 EXPORT_SYMBOL_GPL(relay_flush);
810 * relay_file_open - open file op for relay files
811 * @inode: the inode
812 * @filp: the file
814 * Increments the channel buffer refcount.
816 static int relay_file_open(struct inode *inode, struct file *filp)
818 struct rchan_buf *buf = inode->i_private;
819 kref_get(&buf->kref);
820 filp->private_data = buf;
822 return nonseekable_open(inode, filp);
826 * relay_file_mmap - mmap file op for relay files
827 * @filp: the file
828 * @vma: the vma describing what to map
830 * Calls upon relay_mmap_buf() to map the file into user space.
832 static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma)
834 struct rchan_buf *buf = filp->private_data;
835 return relay_mmap_buf(buf, vma);
839 * relay_file_poll - poll file op for relay files
840 * @filp: the file
841 * @wait: poll table
843 * Poll implemention.
845 static __poll_t relay_file_poll(struct file *filp, poll_table *wait)
847 __poll_t mask = 0;
848 struct rchan_buf *buf = filp->private_data;
850 if (buf->finalized)
851 return EPOLLERR;
853 if (filp->f_mode & FMODE_READ) {
854 poll_wait(filp, &buf->read_wait, wait);
855 if (!relay_buf_empty(buf))
856 mask |= EPOLLIN | EPOLLRDNORM;
859 return mask;
863 * relay_file_release - release file op for relay files
864 * @inode: the inode
865 * @filp: the file
867 * Decrements the channel refcount, as the filesystem is
868 * no longer using it.
870 static int relay_file_release(struct inode *inode, struct file *filp)
872 struct rchan_buf *buf = filp->private_data;
873 kref_put(&buf->kref, relay_remove_buf);
875 return 0;
879 * relay_file_read_consume - update the consumed count for the buffer
881 static void relay_file_read_consume(struct rchan_buf *buf,
882 size_t read_pos,
883 size_t bytes_consumed)
885 size_t subbuf_size = buf->chan->subbuf_size;
886 size_t n_subbufs = buf->chan->n_subbufs;
887 size_t read_subbuf;
889 if (buf->subbufs_produced == buf->subbufs_consumed &&
890 buf->offset == buf->bytes_consumed)
891 return;
893 if (buf->bytes_consumed + bytes_consumed > subbuf_size) {
894 relay_subbufs_consumed(buf->chan, buf->cpu, 1);
895 buf->bytes_consumed = 0;
898 buf->bytes_consumed += bytes_consumed;
899 if (!read_pos)
900 read_subbuf = buf->subbufs_consumed % n_subbufs;
901 else
902 read_subbuf = read_pos / buf->chan->subbuf_size;
903 if (buf->bytes_consumed + buf->padding[read_subbuf] == subbuf_size) {
904 if ((read_subbuf == buf->subbufs_produced % n_subbufs) &&
905 (buf->offset == subbuf_size))
906 return;
907 relay_subbufs_consumed(buf->chan, buf->cpu, 1);
908 buf->bytes_consumed = 0;
913 * relay_file_read_avail - boolean, are there unconsumed bytes available?
915 static int relay_file_read_avail(struct rchan_buf *buf)
917 size_t subbuf_size = buf->chan->subbuf_size;
918 size_t n_subbufs = buf->chan->n_subbufs;
919 size_t produced = buf->subbufs_produced;
920 size_t consumed;
922 relay_file_read_consume(buf, 0, 0);
924 consumed = buf->subbufs_consumed;
926 if (unlikely(buf->offset > subbuf_size)) {
927 if (produced == consumed)
928 return 0;
929 return 1;
932 if (unlikely(produced - consumed >= n_subbufs)) {
933 consumed = produced - n_subbufs + 1;
934 buf->subbufs_consumed = consumed;
935 buf->bytes_consumed = 0;
938 produced = (produced % n_subbufs) * subbuf_size + buf->offset;
939 consumed = (consumed % n_subbufs) * subbuf_size + buf->bytes_consumed;
941 if (consumed > produced)
942 produced += n_subbufs * subbuf_size;
944 if (consumed == produced) {
945 if (buf->offset == subbuf_size &&
946 buf->subbufs_produced > buf->subbufs_consumed)
947 return 1;
948 return 0;
951 return 1;
955 * relay_file_read_subbuf_avail - return bytes available in sub-buffer
956 * @read_pos: file read position
957 * @buf: relay channel buffer
959 static size_t relay_file_read_subbuf_avail(size_t read_pos,
960 struct rchan_buf *buf)
962 size_t padding, avail = 0;
963 size_t read_subbuf, read_offset, write_subbuf, write_offset;
964 size_t subbuf_size = buf->chan->subbuf_size;
966 write_subbuf = (buf->data - buf->start) / subbuf_size;
967 write_offset = buf->offset > subbuf_size ? subbuf_size : buf->offset;
968 read_subbuf = read_pos / subbuf_size;
969 read_offset = read_pos % subbuf_size;
970 padding = buf->padding[read_subbuf];
972 if (read_subbuf == write_subbuf) {
973 if (read_offset + padding < write_offset)
974 avail = write_offset - (read_offset + padding);
975 } else
976 avail = (subbuf_size - padding) - read_offset;
978 return avail;
982 * relay_file_read_start_pos - find the first available byte to read
983 * @buf: relay channel buffer
985 * If the read_pos is in the middle of padding, return the
986 * position of the first actually available byte, otherwise
987 * return the original value.
989 static size_t relay_file_read_start_pos(struct rchan_buf *buf)
991 size_t read_subbuf, padding, padding_start, padding_end;
992 size_t subbuf_size = buf->chan->subbuf_size;
993 size_t n_subbufs = buf->chan->n_subbufs;
994 size_t consumed = buf->subbufs_consumed % n_subbufs;
995 size_t read_pos = consumed * subbuf_size + buf->bytes_consumed;
997 read_subbuf = read_pos / subbuf_size;
998 padding = buf->padding[read_subbuf];
999 padding_start = (read_subbuf + 1) * subbuf_size - padding;
1000 padding_end = (read_subbuf + 1) * subbuf_size;
1001 if (read_pos >= padding_start && read_pos < padding_end) {
1002 read_subbuf = (read_subbuf + 1) % n_subbufs;
1003 read_pos = read_subbuf * subbuf_size;
1006 return read_pos;
1010 * relay_file_read_end_pos - return the new read position
1011 * @read_pos: file read position
1012 * @buf: relay channel buffer
1013 * @count: number of bytes to be read
1015 static size_t relay_file_read_end_pos(struct rchan_buf *buf,
1016 size_t read_pos,
1017 size_t count)
1019 size_t read_subbuf, padding, end_pos;
1020 size_t subbuf_size = buf->chan->subbuf_size;
1021 size_t n_subbufs = buf->chan->n_subbufs;
1023 read_subbuf = read_pos / subbuf_size;
1024 padding = buf->padding[read_subbuf];
1025 if (read_pos % subbuf_size + count + padding == subbuf_size)
1026 end_pos = (read_subbuf + 1) * subbuf_size;
1027 else
1028 end_pos = read_pos + count;
1029 if (end_pos >= subbuf_size * n_subbufs)
1030 end_pos = 0;
1032 return end_pos;
1035 static ssize_t relay_file_read(struct file *filp,
1036 char __user *buffer,
1037 size_t count,
1038 loff_t *ppos)
1040 struct rchan_buf *buf = filp->private_data;
1041 size_t read_start, avail;
1042 size_t written = 0;
1043 int ret;
1045 if (!count)
1046 return 0;
1048 inode_lock(file_inode(filp));
1049 do {
1050 void *from;
1052 if (!relay_file_read_avail(buf))
1053 break;
1055 read_start = relay_file_read_start_pos(buf);
1056 avail = relay_file_read_subbuf_avail(read_start, buf);
1057 if (!avail)
1058 break;
1060 avail = min(count, avail);
1061 from = buf->start + read_start;
1062 ret = avail;
1063 if (copy_to_user(buffer, from, avail))
1064 break;
1066 buffer += ret;
1067 written += ret;
1068 count -= ret;
1070 relay_file_read_consume(buf, read_start, ret);
1071 *ppos = relay_file_read_end_pos(buf, read_start, ret);
1072 } while (count);
1073 inode_unlock(file_inode(filp));
1075 return written;
1078 static void relay_consume_bytes(struct rchan_buf *rbuf, int bytes_consumed)
1080 rbuf->bytes_consumed += bytes_consumed;
1082 if (rbuf->bytes_consumed >= rbuf->chan->subbuf_size) {
1083 relay_subbufs_consumed(rbuf->chan, rbuf->cpu, 1);
1084 rbuf->bytes_consumed %= rbuf->chan->subbuf_size;
1088 static void relay_pipe_buf_release(struct pipe_inode_info *pipe,
1089 struct pipe_buffer *buf)
1091 struct rchan_buf *rbuf;
1093 rbuf = (struct rchan_buf *)page_private(buf->page);
1094 relay_consume_bytes(rbuf, buf->private);
1097 static const struct pipe_buf_operations relay_pipe_buf_ops = {
1098 .release = relay_pipe_buf_release,
1099 .try_steal = generic_pipe_buf_try_steal,
1100 .get = generic_pipe_buf_get,
1103 static void relay_page_release(struct splice_pipe_desc *spd, unsigned int i)
1108 * subbuf_splice_actor - splice up to one subbuf's worth of data
1110 static ssize_t subbuf_splice_actor(struct file *in,
1111 loff_t *ppos,
1112 struct pipe_inode_info *pipe,
1113 size_t len,
1114 unsigned int flags,
1115 int *nonpad_ret)
1117 unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
1118 struct rchan_buf *rbuf = in->private_data;
1119 unsigned int subbuf_size = rbuf->chan->subbuf_size;
1120 uint64_t pos = (uint64_t) *ppos;
1121 uint32_t alloc_size = (uint32_t) rbuf->chan->alloc_size;
1122 size_t read_start = (size_t) do_div(pos, alloc_size);
1123 size_t read_subbuf = read_start / subbuf_size;
1124 size_t padding = rbuf->padding[read_subbuf];
1125 size_t nonpad_end = read_subbuf * subbuf_size + subbuf_size - padding;
1126 struct page *pages[PIPE_DEF_BUFFERS];
1127 struct partial_page partial[PIPE_DEF_BUFFERS];
1128 struct splice_pipe_desc spd = {
1129 .pages = pages,
1130 .nr_pages = 0,
1131 .nr_pages_max = PIPE_DEF_BUFFERS,
1132 .partial = partial,
1133 .ops = &relay_pipe_buf_ops,
1134 .spd_release = relay_page_release,
1136 ssize_t ret;
1138 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
1139 return 0;
1140 if (splice_grow_spd(pipe, &spd))
1141 return -ENOMEM;
1144 * Adjust read len, if longer than what is available
1146 if (len > (subbuf_size - read_start % subbuf_size))
1147 len = subbuf_size - read_start % subbuf_size;
1149 subbuf_pages = rbuf->chan->alloc_size >> PAGE_SHIFT;
1150 pidx = (read_start / PAGE_SIZE) % subbuf_pages;
1151 poff = read_start & ~PAGE_MASK;
1152 nr_pages = min_t(unsigned int, subbuf_pages, spd.nr_pages_max);
1154 for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) {
1155 unsigned int this_len, this_end, private;
1156 unsigned int cur_pos = read_start + total_len;
1158 if (!len)
1159 break;
1161 this_len = min_t(unsigned long, len, PAGE_SIZE - poff);
1162 private = this_len;
1164 spd.pages[spd.nr_pages] = rbuf->page_array[pidx];
1165 spd.partial[spd.nr_pages].offset = poff;
1167 this_end = cur_pos + this_len;
1168 if (this_end >= nonpad_end) {
1169 this_len = nonpad_end - cur_pos;
1170 private = this_len + padding;
1172 spd.partial[spd.nr_pages].len = this_len;
1173 spd.partial[spd.nr_pages].private = private;
1175 len -= this_len;
1176 total_len += this_len;
1177 poff = 0;
1178 pidx = (pidx + 1) % subbuf_pages;
1180 if (this_end >= nonpad_end) {
1181 spd.nr_pages++;
1182 break;
1186 ret = 0;
1187 if (!spd.nr_pages)
1188 goto out;
1190 ret = *nonpad_ret = splice_to_pipe(pipe, &spd);
1191 if (ret < 0 || ret < total_len)
1192 goto out;
1194 if (read_start + ret == nonpad_end)
1195 ret += padding;
1197 out:
1198 splice_shrink_spd(&spd);
1199 return ret;
1202 static ssize_t relay_file_splice_read(struct file *in,
1203 loff_t *ppos,
1204 struct pipe_inode_info *pipe,
1205 size_t len,
1206 unsigned int flags)
1208 ssize_t spliced;
1209 int ret;
1210 int nonpad_ret = 0;
1212 ret = 0;
1213 spliced = 0;
1215 while (len && !spliced) {
1216 ret = subbuf_splice_actor(in, ppos, pipe, len, flags, &nonpad_ret);
1217 if (ret < 0)
1218 break;
1219 else if (!ret) {
1220 if (flags & SPLICE_F_NONBLOCK)
1221 ret = -EAGAIN;
1222 break;
1225 *ppos += ret;
1226 if (ret > len)
1227 len = 0;
1228 else
1229 len -= ret;
1230 spliced += nonpad_ret;
1231 nonpad_ret = 0;
1234 if (spliced)
1235 return spliced;
1237 return ret;
1240 const struct file_operations relay_file_operations = {
1241 .open = relay_file_open,
1242 .poll = relay_file_poll,
1243 .mmap = relay_file_mmap,
1244 .read = relay_file_read,
1245 .llseek = no_llseek,
1246 .release = relay_file_release,
1247 .splice_read = relay_file_splice_read,
1249 EXPORT_SYMBOL_GPL(relay_file_operations);