1 // SPDX-License-Identifier: GPL-2.0
3 * Tty buffer allocation management
6 #include <linux/types.h>
7 #include <linux/errno.h>
9 #include <linux/tty_driver.h>
10 #include <linux/tty_flip.h>
11 #include <linux/timer.h>
12 #include <linux/string.h>
13 #include <linux/slab.h>
14 #include <linux/sched.h>
15 #include <linux/wait.h>
16 #include <linux/bitops.h>
17 #include <linux/delay.h>
18 #include <linux/module.h>
19 #include <linux/ratelimit.h>
22 #define MIN_TTYB_SIZE 256
23 #define TTYB_ALIGN_MASK 255
26 * Byte threshold to limit memory consumption for flip buffers.
27 * The actual memory limit is > 2x this amount.
29 #define TTYB_DEFAULT_MEM_LIMIT (640 * 1024UL)
32 * We default to dicing tty buffer allocations to this many characters
33 * in order to avoid multiple page allocations. We know the size of
34 * tty_buffer itself but it must also be taken into account that the
35 * the buffer is 256 byte aligned. See tty_buffer_find for the allocation
36 * logic this must match
39 #define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
42 * tty_buffer_lock_exclusive - gain exclusive access to buffer
43 * tty_buffer_unlock_exclusive - release exclusive access
45 * @port - tty_port owning the flip buffer
47 * Guarantees safe use of the line discipline's receive_buf() method by
48 * excluding the buffer work and any pending flush from using the flip
49 * buffer. Data can continue to be added concurrently to the flip buffer
50 * from the driver side.
52 * On release, the buffer work is restarted if there is data in the
56 void tty_buffer_lock_exclusive(struct tty_port
*port
)
58 struct tty_bufhead
*buf
= &port
->buf
;
60 atomic_inc(&buf
->priority
);
61 mutex_lock(&buf
->lock
);
63 EXPORT_SYMBOL_GPL(tty_buffer_lock_exclusive
);
65 void tty_buffer_unlock_exclusive(struct tty_port
*port
)
67 struct tty_bufhead
*buf
= &port
->buf
;
70 restart
= buf
->head
->commit
!= buf
->head
->read
;
72 atomic_dec(&buf
->priority
);
73 mutex_unlock(&buf
->lock
);
75 queue_work(system_unbound_wq
, &buf
->work
);
77 EXPORT_SYMBOL_GPL(tty_buffer_unlock_exclusive
);
80 * tty_buffer_space_avail - return unused buffer space
81 * @port - tty_port owning the flip buffer
83 * Returns the # of bytes which can be written by the driver without
84 * reaching the buffer limit.
86 * Note: this does not guarantee that memory is available to write
87 * the returned # of bytes (use tty_prepare_flip_string_xxx() to
88 * pre-allocate if memory guarantee is required).
91 int tty_buffer_space_avail(struct tty_port
*port
)
93 int space
= port
->buf
.mem_limit
- atomic_read(&port
->buf
.mem_used
);
96 EXPORT_SYMBOL_GPL(tty_buffer_space_avail
);
98 static void tty_buffer_reset(struct tty_buffer
*p
, size_t size
)
109 * tty_buffer_free_all - free buffers used by a tty
110 * @tty: tty to free from
112 * Remove all the buffers pending on a tty whether queued with data
113 * or in the free ring. Must be called when the tty is no longer in use
116 void tty_buffer_free_all(struct tty_port
*port
)
118 struct tty_bufhead
*buf
= &port
->buf
;
119 struct tty_buffer
*p
, *next
;
120 struct llist_node
*llist
;
121 unsigned int freed
= 0;
124 while ((p
= buf
->head
) != NULL
) {
130 llist
= llist_del_all(&buf
->free
);
131 llist_for_each_entry_safe(p
, next
, llist
, free
)
134 tty_buffer_reset(&buf
->sentinel
, 0);
135 buf
->head
= &buf
->sentinel
;
136 buf
->tail
= &buf
->sentinel
;
138 still_used
= atomic_xchg(&buf
->mem_used
, 0);
139 WARN(still_used
!= freed
, "we still have not freed %d bytes!",
144 * tty_buffer_alloc - allocate a tty buffer
146 * @size: desired size (characters)
148 * Allocate a new tty buffer to hold the desired number of characters.
149 * We round our buffers off in 256 character chunks to get better
150 * allocation behaviour.
151 * Return NULL if out of memory or the allocation would exceed the
155 static struct tty_buffer
*tty_buffer_alloc(struct tty_port
*port
, size_t size
)
157 struct llist_node
*free
;
158 struct tty_buffer
*p
;
160 /* Round the buffer size out */
161 size
= __ALIGN_MASK(size
, TTYB_ALIGN_MASK
);
163 if (size
<= MIN_TTYB_SIZE
) {
164 free
= llist_del_first(&port
->buf
.free
);
166 p
= llist_entry(free
, struct tty_buffer
, free
);
171 /* Should possibly check if this fails for the largest buffer we
172 have queued and recycle that ? */
173 if (atomic_read(&port
->buf
.mem_used
) > port
->buf
.mem_limit
)
175 p
= kmalloc(sizeof(struct tty_buffer
) + 2 * size
, GFP_ATOMIC
);
180 tty_buffer_reset(p
, size
);
181 atomic_add(size
, &port
->buf
.mem_used
);
186 * tty_buffer_free - free a tty buffer
187 * @tty: tty owning the buffer
188 * @b: the buffer to free
190 * Free a tty buffer, or add it to the free list according to our
194 static void tty_buffer_free(struct tty_port
*port
, struct tty_buffer
*b
)
196 struct tty_bufhead
*buf
= &port
->buf
;
198 /* Dumb strategy for now - should keep some stats */
199 WARN_ON(atomic_sub_return(b
->size
, &buf
->mem_used
) < 0);
201 if (b
->size
> MIN_TTYB_SIZE
)
203 else if (b
->size
> 0)
204 llist_add(&b
->free
, &buf
->free
);
208 * tty_buffer_flush - flush full tty buffers
210 * @ld: optional ldisc ptr (must be referenced)
212 * flush all the buffers containing receive data. If ld != NULL,
213 * flush the ldisc input buffer.
215 * Locking: takes buffer lock to ensure single-threaded flip buffer
219 void tty_buffer_flush(struct tty_struct
*tty
, struct tty_ldisc
*ld
)
221 struct tty_port
*port
= tty
->port
;
222 struct tty_bufhead
*buf
= &port
->buf
;
223 struct tty_buffer
*next
;
225 atomic_inc(&buf
->priority
);
227 mutex_lock(&buf
->lock
);
228 /* paired w/ release in __tty_buffer_request_room; ensures there are
229 * no pending memory accesses to the freed buffer
231 while ((next
= smp_load_acquire(&buf
->head
->next
)) != NULL
) {
232 tty_buffer_free(port
, buf
->head
);
235 buf
->head
->read
= buf
->head
->commit
;
237 if (ld
&& ld
->ops
->flush_buffer
)
238 ld
->ops
->flush_buffer(tty
);
240 atomic_dec(&buf
->priority
);
241 mutex_unlock(&buf
->lock
);
245 * tty_buffer_request_room - grow tty buffer if needed
246 * @tty: tty structure
247 * @size: size desired
248 * @flags: buffer flags if new buffer allocated (default = 0)
250 * Make at least size bytes of linear space available for the tty
251 * buffer. If we fail return the size we managed to find.
253 * Will change over to a new buffer if the current buffer is encoded as
254 * TTY_NORMAL (so has no flags buffer) and the new buffer requires
257 static int __tty_buffer_request_room(struct tty_port
*port
, size_t size
,
260 struct tty_bufhead
*buf
= &port
->buf
;
261 struct tty_buffer
*b
, *n
;
265 if (b
->flags
& TTYB_NORMAL
)
266 left
= 2 * b
->size
- b
->used
;
268 left
= b
->size
- b
->used
;
270 change
= (b
->flags
& TTYB_NORMAL
) && (~flags
& TTYB_NORMAL
);
271 if (change
|| left
< size
) {
272 /* This is the slow path - looking for new buffers to use */
273 n
= tty_buffer_alloc(port
, size
);
277 /* paired w/ acquire in flush_to_ldisc(); ensures
278 * flush_to_ldisc() sees buffer data.
280 smp_store_release(&b
->commit
, b
->used
);
281 /* paired w/ acquire in flush_to_ldisc(); ensures the
282 * latest commit value can be read before the head is
283 * advanced to the next buffer
285 smp_store_release(&b
->next
, n
);
294 int tty_buffer_request_room(struct tty_port
*port
, size_t size
)
296 return __tty_buffer_request_room(port
, size
, 0);
298 EXPORT_SYMBOL_GPL(tty_buffer_request_room
);
301 * tty_insert_flip_string_fixed_flag - Add characters to the tty buffer
304 * @flag: flag value for each character
307 * Queue a series of bytes to the tty buffering. All the characters
308 * passed are marked with the supplied flag. Returns the number added.
311 int tty_insert_flip_string_fixed_flag(struct tty_port
*port
,
312 const unsigned char *chars
, char flag
, size_t size
)
316 int goal
= min_t(size_t, size
- copied
, TTY_BUFFER_PAGE
);
317 int flags
= (flag
== TTY_NORMAL
) ? TTYB_NORMAL
: 0;
318 int space
= __tty_buffer_request_room(port
, goal
, flags
);
319 struct tty_buffer
*tb
= port
->buf
.tail
;
320 if (unlikely(space
== 0))
322 memcpy(char_buf_ptr(tb
, tb
->used
), chars
, space
);
323 if (~tb
->flags
& TTYB_NORMAL
)
324 memset(flag_buf_ptr(tb
, tb
->used
), flag
, space
);
328 /* There is a small chance that we need to split the data over
329 several buffers. If this is the case we must loop */
330 } while (unlikely(size
> copied
));
333 EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag
);
336 * tty_insert_flip_string_flags - Add characters to the tty buffer
342 * Queue a series of bytes to the tty buffering. For each character
343 * the flags array indicates the status of the character. Returns the
347 int tty_insert_flip_string_flags(struct tty_port
*port
,
348 const unsigned char *chars
, const char *flags
, size_t size
)
352 int goal
= min_t(size_t, size
- copied
, TTY_BUFFER_PAGE
);
353 int space
= tty_buffer_request_room(port
, goal
);
354 struct tty_buffer
*tb
= port
->buf
.tail
;
355 if (unlikely(space
== 0))
357 memcpy(char_buf_ptr(tb
, tb
->used
), chars
, space
);
358 memcpy(flag_buf_ptr(tb
, tb
->used
), flags
, space
);
363 /* There is a small chance that we need to split the data over
364 several buffers. If this is the case we must loop */
365 } while (unlikely(size
> copied
));
368 EXPORT_SYMBOL(tty_insert_flip_string_flags
);
371 * __tty_insert_flip_char - Add one character to the tty buffer
376 * Queue a single byte to the tty buffering, with an optional flag.
377 * This is the slow path of tty_insert_flip_char.
379 int __tty_insert_flip_char(struct tty_port
*port
, unsigned char ch
, char flag
)
381 struct tty_buffer
*tb
;
382 int flags
= (flag
== TTY_NORMAL
) ? TTYB_NORMAL
: 0;
384 if (!__tty_buffer_request_room(port
, 1, flags
))
388 if (~tb
->flags
& TTYB_NORMAL
)
389 *flag_buf_ptr(tb
, tb
->used
) = flag
;
390 *char_buf_ptr(tb
, tb
->used
++) = ch
;
394 EXPORT_SYMBOL(__tty_insert_flip_char
);
397 * tty_schedule_flip - push characters to ldisc
398 * @port: tty port to push from
400 * Takes any pending buffers and transfers their ownership to the
401 * ldisc side of the queue. It then schedules those characters for
402 * processing by the line discipline.
405 void tty_schedule_flip(struct tty_port
*port
)
407 struct tty_bufhead
*buf
= &port
->buf
;
409 /* paired w/ acquire in flush_to_ldisc(); ensures
410 * flush_to_ldisc() sees buffer data.
412 smp_store_release(&buf
->tail
->commit
, buf
->tail
->used
);
413 queue_work(system_unbound_wq
, &buf
->work
);
415 EXPORT_SYMBOL(tty_schedule_flip
);
418 * tty_prepare_flip_string - make room for characters
420 * @chars: return pointer for character write area
421 * @size: desired size
423 * Prepare a block of space in the buffer for data. Returns the length
424 * available and buffer pointer to the space which is now allocated and
425 * accounted for as ready for normal characters. This is used for drivers
426 * that need their own block copy routines into the buffer. There is no
427 * guarantee the buffer is a DMA target!
430 int tty_prepare_flip_string(struct tty_port
*port
, unsigned char **chars
,
433 int space
= __tty_buffer_request_room(port
, size
, TTYB_NORMAL
);
435 struct tty_buffer
*tb
= port
->buf
.tail
;
436 *chars
= char_buf_ptr(tb
, tb
->used
);
437 if (~tb
->flags
& TTYB_NORMAL
)
438 memset(flag_buf_ptr(tb
, tb
->used
), TTY_NORMAL
, space
);
443 EXPORT_SYMBOL_GPL(tty_prepare_flip_string
);
446 * tty_ldisc_receive_buf - forward data to line discipline
447 * @ld: line discipline to process input
449 * @f: TTY_* flags buffer
450 * @count: number of bytes to process
452 * Callers other than flush_to_ldisc() need to exclude the kworker
453 * from concurrent use of the line discipline, see paste_selection().
455 * Returns the number of bytes processed
457 int tty_ldisc_receive_buf(struct tty_ldisc
*ld
, const unsigned char *p
,
460 if (ld
->ops
->receive_buf2
)
461 count
= ld
->ops
->receive_buf2(ld
->tty
, p
, f
, count
);
463 count
= min_t(int, count
, ld
->tty
->receive_room
);
464 if (count
&& ld
->ops
->receive_buf
)
465 ld
->ops
->receive_buf(ld
->tty
, p
, f
, count
);
469 EXPORT_SYMBOL_GPL(tty_ldisc_receive_buf
);
472 receive_buf(struct tty_port
*port
, struct tty_buffer
*head
, int count
)
474 unsigned char *p
= char_buf_ptr(head
, head
->read
);
478 if (~head
->flags
& TTYB_NORMAL
)
479 f
= flag_buf_ptr(head
, head
->read
);
481 n
= port
->client_ops
->receive_buf(port
, p
, f
, count
);
489 * @work: tty structure passed from work queue.
491 * This routine is called out of the software interrupt to flush data
492 * from the buffer chain to the line discipline.
494 * The receive_buf method is single threaded for each tty instance.
496 * Locking: takes buffer lock to ensure single-threaded flip buffer
500 static void flush_to_ldisc(struct work_struct
*work
)
502 struct tty_port
*port
= container_of(work
, struct tty_port
, buf
.work
);
503 struct tty_bufhead
*buf
= &port
->buf
;
505 mutex_lock(&buf
->lock
);
508 struct tty_buffer
*head
= buf
->head
;
509 struct tty_buffer
*next
;
512 /* Ldisc or user is trying to gain exclusive access */
513 if (atomic_read(&buf
->priority
))
516 /* paired w/ release in __tty_buffer_request_room();
517 * ensures commit value read is not stale if the head
518 * is advancing to the next buffer
520 next
= smp_load_acquire(&head
->next
);
521 /* paired w/ release in __tty_buffer_request_room() or in
522 * tty_buffer_flush(); ensures we see the committed buffer data
524 count
= smp_load_acquire(&head
->commit
) - head
->read
;
529 tty_buffer_free(port
, head
);
533 count
= receive_buf(port
, head
, count
);
539 mutex_unlock(&buf
->lock
);
544 * tty_flip_buffer_push - terminal
545 * @port: tty port to push
547 * Queue a push of the terminal flip buffers to the line discipline.
548 * Can be called from IRQ/atomic context.
550 * In the event of the queue being busy for flipping the work will be
551 * held off and retried later.
554 void tty_flip_buffer_push(struct tty_port
*port
)
556 tty_schedule_flip(port
);
558 EXPORT_SYMBOL(tty_flip_buffer_push
);
561 * tty_buffer_init - prepare a tty buffer structure
562 * @tty: tty to initialise
564 * Set up the initial state of the buffer management for a tty device.
565 * Must be called before the other tty buffer functions are used.
568 void tty_buffer_init(struct tty_port
*port
)
570 struct tty_bufhead
*buf
= &port
->buf
;
572 mutex_init(&buf
->lock
);
573 tty_buffer_reset(&buf
->sentinel
, 0);
574 buf
->head
= &buf
->sentinel
;
575 buf
->tail
= &buf
->sentinel
;
576 init_llist_head(&buf
->free
);
577 atomic_set(&buf
->mem_used
, 0);
578 atomic_set(&buf
->priority
, 0);
579 INIT_WORK(&buf
->work
, flush_to_ldisc
);
580 buf
->mem_limit
= TTYB_DEFAULT_MEM_LIMIT
;
584 * tty_buffer_set_limit - change the tty buffer memory limit
585 * @port: tty port to change
587 * Change the tty buffer memory limit.
588 * Must be called before the other tty buffer functions are used.
591 int tty_buffer_set_limit(struct tty_port
*port
, int limit
)
593 if (limit
< MIN_TTYB_SIZE
)
595 port
->buf
.mem_limit
= limit
;
598 EXPORT_SYMBOL_GPL(tty_buffer_set_limit
);
600 /* slave ptys can claim nested buffer lock when handling BRK and INTR */
601 void tty_buffer_set_lock_subclass(struct tty_port
*port
)
603 lockdep_set_subclass(&port
->buf
.lock
, TTY_LOCK_SLAVE
);
606 bool tty_buffer_restart_work(struct tty_port
*port
)
608 return queue_work(system_unbound_wq
, &port
->buf
.work
);
611 bool tty_buffer_cancel_work(struct tty_port
*port
)
613 return cancel_work_sync(&port
->buf
.work
);
616 void tty_buffer_flush_work(struct tty_port
*port
)
618 flush_work(&port
->buf
.work
);