cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

poll.h (4085B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _LINUX_POLL_H
      3#define _LINUX_POLL_H
      4
      5
      6#include <linux/compiler.h>
      7#include <linux/ktime.h>
      8#include <linux/wait.h>
      9#include <linux/string.h>
     10#include <linux/fs.h>
     11#include <linux/uaccess.h>
     12#include <uapi/linux/poll.h>
     13#include <uapi/linux/eventpoll.h>
     14
     15/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
     16   additional memory. */
     17#ifdef __clang__
     18#define MAX_STACK_ALLOC 768
     19#else
     20#define MAX_STACK_ALLOC 832
     21#endif
     22#define FRONTEND_STACK_ALLOC	256
     23#define SELECT_STACK_ALLOC	FRONTEND_STACK_ALLOC
     24#define POLL_STACK_ALLOC	FRONTEND_STACK_ALLOC
     25#define WQUEUES_STACK_ALLOC	(MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC)
     26#define N_INLINE_POLL_ENTRIES	(WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry))
     27
     28#define DEFAULT_POLLMASK (EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM)
     29
     30struct poll_table_struct;
     31
     32/* 
     33 * structures and helpers for f_op->poll implementations
     34 */
     35typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);
     36
     37/*
     38 * Do not touch the structure directly, use the access functions
     39 * poll_does_not_wait() and poll_requested_events() instead.
     40 */
     41typedef struct poll_table_struct {
     42	poll_queue_proc _qproc;
     43	__poll_t _key;
     44} poll_table;
     45
     46static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
     47{
     48	if (p && p->_qproc && wait_address)
     49		p->_qproc(filp, wait_address, p);
     50}
     51
     52/*
     53 * Return true if it is guaranteed that poll will not wait. This is the case
     54 * if the poll() of another file descriptor in the set got an event, so there
     55 * is no need for waiting.
     56 */
     57static inline bool poll_does_not_wait(const poll_table *p)
     58{
     59	return p == NULL || p->_qproc == NULL;
     60}
     61
     62/*
     63 * Return the set of events that the application wants to poll for.
     64 * This is useful for drivers that need to know whether a DMA transfer has
     65 * to be started implicitly on poll(). You typically only want to do that
     66 * if the application is actually polling for POLLIN and/or POLLOUT.
     67 */
     68static inline __poll_t poll_requested_events(const poll_table *p)
     69{
     70	return p ? p->_key : ~(__poll_t)0;
     71}
     72
     73static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
     74{
     75	pt->_qproc = qproc;
     76	pt->_key   = ~(__poll_t)0; /* all events enabled */
     77}
     78
     79static inline bool file_can_poll(struct file *file)
     80{
     81	return file->f_op->poll;
     82}
     83
     84static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
     85{
     86	if (unlikely(!file->f_op->poll))
     87		return DEFAULT_POLLMASK;
     88	return file->f_op->poll(file, pt);
     89}
     90
     91struct poll_table_entry {
     92	struct file *filp;
     93	__poll_t key;
     94	wait_queue_entry_t wait;
     95	wait_queue_head_t *wait_address;
     96};
     97
     98/*
     99 * Structures and helpers for select/poll syscall
    100 */
    101struct poll_wqueues {
    102	poll_table pt;
    103	struct poll_table_page *table;
    104	struct task_struct *polling_task;
    105	int triggered;
    106	int error;
    107	int inline_index;
    108	struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES];
    109};
    110
    111extern void poll_initwait(struct poll_wqueues *pwq);
    112extern void poll_freewait(struct poll_wqueues *pwq);
    113extern u64 select_estimate_accuracy(struct timespec64 *tv);
    114
    115#define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1)
    116
    117extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
    118			   fd_set __user *exp, struct timespec64 *end_time);
    119
    120extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec,
    121				   long nsec);
    122
    123#define __MAP(v, from, to) \
    124	(from < to ? (v & from) * (to/from) : (v & from) / (from/to))
    125
    126static inline __u16 mangle_poll(__poll_t val)
    127{
    128	__u16 v = (__force __u16)val;
    129#define M(X) __MAP(v, (__force __u16)EPOLL##X, POLL##X)
    130	return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) |
    131		M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) |
    132		M(HUP) | M(RDHUP) | M(MSG);
    133#undef M
    134}
    135
    136static inline __poll_t demangle_poll(u16 val)
    137{
    138#define M(X) (__force __poll_t)__MAP(val, POLL##X, (__force __u16)EPOLL##X)
    139	return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) |
    140		M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) |
    141		M(HUP) | M(RDHUP) | M(MSG);
    142#undef M
    143}
    144#undef __MAP
    145
    146
    147#endif /* _LINUX_POLL_H */