fdmon-epoll.c (4102B)
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * epoll(7) file descriptor monitoring 4 */ 5 6#include "qemu/osdep.h" 7#include <sys/epoll.h> 8#include "qemu/rcu_queue.h" 9#include "aio-posix.h" 10 11/* The fd number threshold to switch to epoll */ 12#define EPOLL_ENABLE_THRESHOLD 64 13 14void fdmon_epoll_disable(AioContext *ctx) 15{ 16 if (ctx->epollfd >= 0) { 17 close(ctx->epollfd); 18 ctx->epollfd = -1; 19 } 20 21 /* Switch back */ 22 ctx->fdmon_ops = &fdmon_poll_ops; 23} 24 25static inline int epoll_events_from_pfd(int pfd_events) 26{ 27 return (pfd_events & G_IO_IN ? EPOLLIN : 0) | 28 (pfd_events & G_IO_OUT ? EPOLLOUT : 0) | 29 (pfd_events & G_IO_HUP ? EPOLLHUP : 0) | 30 (pfd_events & G_IO_ERR ? EPOLLERR : 0); 31} 32 33static void fdmon_epoll_update(AioContext *ctx, 34 AioHandler *old_node, 35 AioHandler *new_node) 36{ 37 struct epoll_event event = { 38 .data.ptr = new_node, 39 .events = new_node ? epoll_events_from_pfd(new_node->pfd.events) : 0, 40 }; 41 int r; 42 43 if (!new_node) { 44 r = epoll_ctl(ctx->epollfd, EPOLL_CTL_DEL, old_node->pfd.fd, &event); 45 } else if (!old_node) { 46 r = epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, new_node->pfd.fd, &event); 47 } else { 48 r = epoll_ctl(ctx->epollfd, EPOLL_CTL_MOD, new_node->pfd.fd, &event); 49 } 50 51 if (r) { 52 fdmon_epoll_disable(ctx); 53 } 54} 55 56static int fdmon_epoll_wait(AioContext *ctx, AioHandlerList *ready_list, 57 int64_t timeout) 58{ 59 GPollFD pfd = { 60 .fd = ctx->epollfd, 61 .events = G_IO_IN | G_IO_OUT | G_IO_HUP | G_IO_ERR, 62 }; 63 AioHandler *node; 64 int i, ret = 0; 65 struct epoll_event events[128]; 66 67 /* Fall back while external clients are disabled */ 68 if (qatomic_read(&ctx->external_disable_cnt)) { 69 return fdmon_poll_ops.wait(ctx, ready_list, timeout); 70 } 71 72 if (timeout > 0) { 73 ret = qemu_poll_ns(&pfd, 1, timeout); 74 if (ret > 0) { 75 timeout = 0; 76 } 77 } 78 if (timeout <= 0 || ret > 0) { 79 ret = epoll_wait(ctx->epollfd, events, 80 ARRAY_SIZE(events), 81 timeout); 82 if (ret <= 0) { 83 goto out; 84 } 85 for (i = 0; i < ret; i++) { 86 int ev = events[i].events; 87 int revents = (ev & EPOLLIN ? G_IO_IN : 0) | 88 (ev & EPOLLOUT ? G_IO_OUT : 0) | 89 (ev & EPOLLHUP ? G_IO_HUP : 0) | 90 (ev & EPOLLERR ? G_IO_ERR : 0); 91 92 node = events[i].data.ptr; 93 aio_add_ready_handler(ready_list, node, revents); 94 } 95 } 96out: 97 return ret; 98} 99 100static const FDMonOps fdmon_epoll_ops = { 101 .update = fdmon_epoll_update, 102 .wait = fdmon_epoll_wait, 103 .need_wait = aio_poll_disabled, 104}; 105 106static bool fdmon_epoll_try_enable(AioContext *ctx) 107{ 108 AioHandler *node; 109 struct epoll_event event; 110 111 QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { 112 int r; 113 if (QLIST_IS_INSERTED(node, node_deleted) || !node->pfd.events) { 114 continue; 115 } 116 event.events = epoll_events_from_pfd(node->pfd.events); 117 event.data.ptr = node; 118 r = epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, node->pfd.fd, &event); 119 if (r) { 120 return false; 121 } 122 } 123 124 ctx->fdmon_ops = &fdmon_epoll_ops; 125 return true; 126} 127 128bool fdmon_epoll_try_upgrade(AioContext *ctx, unsigned npfd) 129{ 130 if (ctx->epollfd < 0) { 131 return false; 132 } 133 134 /* Do not upgrade while external clients are disabled */ 135 if (qatomic_read(&ctx->external_disable_cnt)) { 136 return false; 137 } 138 139 if (npfd >= EPOLL_ENABLE_THRESHOLD) { 140 if (fdmon_epoll_try_enable(ctx)) { 141 return true; 142 } else { 143 fdmon_epoll_disable(ctx); 144 } 145 } 146 return false; 147} 148 149void fdmon_epoll_setup(AioContext *ctx) 150{ 151 ctx->epollfd = epoll_create1(EPOLL_CLOEXEC); 152 if (ctx->epollfd == -1) { 153 fprintf(stderr, "Failed to create epoll instance: %s", strerror(errno)); 154 } 155}