1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include <linux/compiler.h>
7 #include <linux/ktime.h>
8 #include <linux/wait.h>
9 #include <linux/string.h>
11 #include <linux/uaccess.h>
12 #include <uapi/linux/poll.h>
13 #include <uapi/linux/eventpoll.h>
15 /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
17 #define MAX_STACK_ALLOC 832
18 #define FRONTEND_STACK_ALLOC 256
19 #define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC
20 #define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC
21 #define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC)
22 #define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry))
24 #define DEFAULT_POLLMASK (EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM)
26 struct poll_table_struct
;
29 * structures and helpers for f_op->poll implementations
31 typedef void (*poll_queue_proc
)(struct file
*, wait_queue_head_t
*, struct poll_table_struct
*);
34 * Do not touch the structure directly, use the access functions
35 * poll_does_not_wait() and poll_requested_events() instead.
37 typedef struct poll_table_struct
{
38 poll_queue_proc _qproc
;
42 static inline void poll_wait(struct file
* filp
, wait_queue_head_t
* wait_address
, poll_table
*p
)
44 if (p
&& p
->_qproc
&& wait_address
)
45 p
->_qproc(filp
, wait_address
, p
);
49 * Return true if it is guaranteed that poll will not wait. This is the case
50 * if the poll() of another file descriptor in the set got an event, so there
51 * is no need for waiting.
53 static inline bool poll_does_not_wait(const poll_table
*p
)
55 return p
== NULL
|| p
->_qproc
== NULL
;
59 * Return the set of events that the application wants to poll for.
60 * This is useful for drivers that need to know whether a DMA transfer has
61 * to be started implicitly on poll(). You typically only want to do that
62 * if the application is actually polling for POLLIN and/or POLLOUT.
64 static inline __poll_t
poll_requested_events(const poll_table
*p
)
66 return p
? p
->_key
: ~(__poll_t
)0;
69 static inline void init_poll_funcptr(poll_table
*pt
, poll_queue_proc qproc
)
72 pt
->_key
= ~(__poll_t
)0; /* all events enabled */
75 static inline bool file_can_poll(struct file
*file
)
77 return file
->f_op
->poll
;
80 static inline __poll_t
vfs_poll(struct file
*file
, struct poll_table_struct
*pt
)
82 if (unlikely(!file
->f_op
->poll
))
83 return DEFAULT_POLLMASK
;
84 return file
->f_op
->poll(file
, pt
);
87 struct poll_table_entry
{
90 wait_queue_entry_t wait
;
91 wait_queue_head_t
*wait_address
;
95 * Structures and helpers for select/poll syscall
99 struct poll_table_page
*table
;
100 struct task_struct
*polling_task
;
104 struct poll_table_entry inline_entries
[N_INLINE_POLL_ENTRIES
];
107 extern void poll_initwait(struct poll_wqueues
*pwq
);
108 extern void poll_freewait(struct poll_wqueues
*pwq
);
109 extern u64
select_estimate_accuracy(struct timespec64
*tv
);
111 #define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1)
113 extern int core_sys_select(int n
, fd_set __user
*inp
, fd_set __user
*outp
,
114 fd_set __user
*exp
, struct timespec64
*end_time
);
116 extern int poll_select_set_timeout(struct timespec64
*to
, time64_t sec
,
119 #define __MAP(v, from, to) \
120 (from < to ? (v & from) * (to/from) : (v & from) / (from/to))
122 static inline __u16
mangle_poll(__poll_t val
)
124 __u16 v
= (__force __u16
)val
;
125 #define M(X) __MAP(v, (__force __u16)EPOLL##X, POLL##X)
126 return M(IN
) | M(OUT
) | M(PRI
) | M(ERR
) | M(NVAL
) |
127 M(RDNORM
) | M(RDBAND
) | M(WRNORM
) | M(WRBAND
) |
128 M(HUP
) | M(RDHUP
) | M(MSG
);
132 static inline __poll_t
demangle_poll(u16 val
)
134 #define M(X) (__force __poll_t)__MAP(val, POLL##X, (__force __u16)EPOLL##X)
135 return M(IN
) | M(OUT
) | M(PRI
) | M(ERR
) | M(NVAL
) |
136 M(RDNORM
) | M(RDBAND
) | M(WRNORM
) | M(WRBAND
) |
137 M(HUP
) | M(RDHUP
) | M(MSG
);
143 #endif /* _LINUX_POLL_H */