poll.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389
  1. /* Licensed under LGPLv2.1+ - see LICENSE file for details */
  2. #include "io.h"
  3. #include "backend.h"
  4. #include <assert.h>
  5. #include <poll.h>
  6. #include <stdlib.h>
  7. #include <sys/types.h>
  8. #include <sys/socket.h>
  9. #include <limits.h>
  10. #include <errno.h>
  11. static size_t num_fds = 0, max_fds = 0, num_closing = 0, num_waiting = 0;
  12. static struct pollfd *pollfds = NULL;
  13. static struct fd **fds = NULL;
  14. static struct timers timeouts;
  15. #ifdef DEBUG
  16. static unsigned int io_loop_level;
  17. static struct io_conn *free_later;
  18. static void io_loop_enter(void)
  19. {
  20. io_loop_level++;
  21. }
  22. static void io_loop_exit(void)
  23. {
  24. io_loop_level--;
  25. if (io_loop_level == 0) {
  26. /* Delayed free. */
  27. while (free_later) {
  28. struct io_conn *c = free_later;
  29. free_later = c->finish_arg;
  30. io_alloc.free(c);
  31. }
  32. }
  33. }
  34. static void free_conn(struct io_conn *conn)
  35. {
  36. /* Only free on final exit: chain via finish. */
  37. if (io_loop_level > 1) {
  38. struct io_conn *c;
  39. for (c = free_later; c; c = c->finish_arg)
  40. assert(c != conn);
  41. conn->finish_arg = free_later;
  42. free_later = conn;
  43. } else
  44. io_alloc.free(conn);
  45. }
  46. #else
  47. static void io_loop_enter(void)
  48. {
  49. }
  50. static void io_loop_exit(void)
  51. {
  52. }
  53. static void free_conn(struct io_conn *conn)
  54. {
  55. io_alloc.free(conn);
  56. }
  57. #endif
  58. static bool add_fd(struct fd *fd, short events)
  59. {
  60. if (num_fds + 1 > max_fds) {
  61. struct pollfd *newpollfds;
  62. struct fd **newfds;
  63. size_t num = max_fds ? max_fds * 2 : 8;
  64. newpollfds = io_alloc.realloc(pollfds, sizeof(*newpollfds)*num);
  65. if (!newpollfds)
  66. return false;
  67. pollfds = newpollfds;
  68. newfds = io_alloc.realloc(fds, sizeof(*newfds) * num);
  69. if (!newfds)
  70. return false;
  71. fds = newfds;
  72. max_fds = num;
  73. }
  74. pollfds[num_fds].events = events;
  75. /* In case it's idle. */
  76. if (!events)
  77. pollfds[num_fds].fd = -fd->fd;
  78. else
  79. pollfds[num_fds].fd = fd->fd;
  80. pollfds[num_fds].revents = 0; /* In case we're iterating now */
  81. fds[num_fds] = fd;
  82. fd->backend_info = num_fds;
  83. num_fds++;
  84. if (events)
  85. num_waiting++;
  86. return true;
  87. }
  88. static void del_fd(struct fd *fd)
  89. {
  90. size_t n = fd->backend_info;
  91. assert(n != -1);
  92. assert(n < num_fds);
  93. if (pollfds[n].events)
  94. num_waiting--;
  95. if (n != num_fds - 1) {
  96. /* Move last one over us. */
  97. pollfds[n] = pollfds[num_fds-1];
  98. fds[n] = fds[num_fds-1];
  99. assert(fds[n]->backend_info == num_fds-1);
  100. fds[n]->backend_info = n;
  101. } else if (num_fds == 1) {
  102. /* Free everything when no more fds. */
  103. io_alloc.free(pollfds);
  104. io_alloc.free(fds);
  105. pollfds = NULL;
  106. fds = NULL;
  107. max_fds = 0;
  108. }
  109. num_fds--;
  110. fd->backend_info = -1;
  111. close(fd->fd);
  112. }
  113. bool add_listener(struct io_listener *l)
  114. {
  115. if (!add_fd(&l->fd, POLLIN))
  116. return false;
  117. return true;
  118. }
  119. void backend_plan_changed(struct io_conn *conn)
  120. {
  121. struct pollfd *pfd;
  122. /* This can happen with debugging and delayed free... */
  123. if (conn->fd.backend_info == -1)
  124. return;
  125. pfd = &pollfds[conn->fd.backend_info];
  126. if (pfd->events)
  127. num_waiting--;
  128. pfd->events = conn->plan.pollflag;
  129. if (conn->duplex) {
  130. int mask = conn->duplex->plan.pollflag;
  131. /* You can't *both* read/write. */
  132. assert(!mask || pfd->events != mask);
  133. pfd->events |= mask;
  134. }
  135. if (pfd->events) {
  136. num_waiting++;
  137. pfd->fd = conn->fd.fd;
  138. } else
  139. pfd->fd = -conn->fd.fd;
  140. if (!conn->plan.next)
  141. num_closing++;
  142. }
  143. bool add_conn(struct io_conn *c)
  144. {
  145. if (!add_fd(&c->fd, c->plan.pollflag))
  146. return false;
  147. /* Immediate close is allowed. */
  148. if (!c->plan.next)
  149. num_closing++;
  150. return true;
  151. }
  152. bool add_duplex(struct io_conn *c)
  153. {
  154. c->fd.backend_info = c->duplex->fd.backend_info;
  155. backend_plan_changed(c);
  156. return true;
  157. }
  158. void backend_del_conn(struct io_conn *conn)
  159. {
  160. if (conn->finish) {
  161. /* Saved by io_close */
  162. errno = conn->plan.u1.s;
  163. conn->finish(conn, conn->finish_arg);
  164. }
  165. if (timeout_active(conn))
  166. backend_del_timeout(conn);
  167. io_alloc.free(conn->timeout);
  168. if (conn->duplex) {
  169. /* In case fds[] pointed to the other one. */
  170. fds[conn->fd.backend_info] = &conn->duplex->fd;
  171. conn->duplex->duplex = NULL;
  172. conn->fd.backend_info = -1;
  173. } else
  174. del_fd(&conn->fd);
  175. num_closing--;
  176. free_conn(conn);
  177. }
  178. void del_listener(struct io_listener *l)
  179. {
  180. del_fd(&l->fd);
  181. }
  182. static void set_plan(struct io_conn *conn, struct io_plan plan)
  183. {
  184. conn->plan = plan;
  185. backend_plan_changed(conn);
  186. }
  187. static void accept_conn(struct io_listener *l)
  188. {
  189. int fd = accept(l->fd.fd, NULL, NULL);
  190. /* FIXME: What to do here? */
  191. if (fd < 0)
  192. return;
  193. l->init(fd, l->arg);
  194. }
  195. /* It's OK to miss some, as long as we make progress. */
  196. static bool finish_conns(struct io_conn **ready)
  197. {
  198. unsigned int i;
  199. for (i = 0; !io_loop_return && i < num_fds; i++) {
  200. struct io_conn *c, *duplex;
  201. if (!num_closing)
  202. break;
  203. if (fds[i]->listener)
  204. continue;
  205. c = (void *)fds[i];
  206. for (duplex = c->duplex; c; c = duplex, duplex = NULL) {
  207. if (!c->plan.next) {
  208. if (doing_debug_on(c) && ready) {
  209. *ready = c;
  210. return true;
  211. }
  212. backend_del_conn(c);
  213. i--;
  214. }
  215. }
  216. }
  217. return false;
  218. }
  219. void backend_add_timeout(struct io_conn *conn, struct timespec duration)
  220. {
  221. if (!timeouts.base)
  222. timers_init(&timeouts, time_now());
  223. timer_add(&timeouts, &conn->timeout->timer,
  224. time_add(time_now(), duration));
  225. conn->timeout->conn = conn;
  226. }
  227. void backend_del_timeout(struct io_conn *conn)
  228. {
  229. assert(conn->timeout->conn == conn);
  230. timer_del(&timeouts, &conn->timeout->timer);
  231. conn->timeout->conn = NULL;
  232. }
  233. /* This is the main loop. */
  234. void *do_io_loop(struct io_conn **ready)
  235. {
  236. void *ret;
  237. io_loop_enter();
  238. while (!io_loop_return) {
  239. int i, r, timeout = INT_MAX;
  240. struct timespec now;
  241. bool some_timeouts = false;
  242. if (timeouts.base) {
  243. struct timespec first;
  244. struct list_head expired;
  245. struct io_timeout *t;
  246. now = time_now();
  247. /* Call functions for expired timers. */
  248. timers_expire(&timeouts, now, &expired);
  249. while ((t = list_pop(&expired, struct io_timeout, timer.list))) {
  250. struct io_conn *conn = t->conn;
  251. /* Clear, in case timer re-adds */
  252. t->conn = NULL;
  253. set_current(conn);
  254. set_plan(conn, t->next(conn, t->next_arg));
  255. some_timeouts = true;
  256. }
  257. /* Now figure out how long to wait for the next one. */
  258. if (timer_earliest(&timeouts, &first)) {
  259. uint64_t f = time_to_msec(time_sub(first, now));
  260. if (f < INT_MAX)
  261. timeout = f;
  262. }
  263. }
  264. if (num_closing) {
  265. /* If this finishes a debugging con, return now. */
  266. if (finish_conns(ready))
  267. return NULL;
  268. /* Could have started/finished more. */
  269. continue;
  270. }
  271. /* debug can recurse on io_loop; anything can change. */
  272. if (doing_debug() && some_timeouts)
  273. continue;
  274. if (num_fds == 0)
  275. break;
  276. /* You can't tell them all to go to sleep! */
  277. assert(num_waiting);
  278. r = poll(pollfds, num_fds, timeout);
  279. if (r < 0)
  280. break;
  281. for (i = 0; i < num_fds && !io_loop_return; i++) {
  282. struct io_conn *c = (void *)fds[i];
  283. int events = pollfds[i].revents;
  284. if (r == 0)
  285. break;
  286. if (fds[i]->listener) {
  287. if (events & POLLIN) {
  288. accept_conn((void *)c);
  289. r--;
  290. }
  291. } else if (events & (POLLIN|POLLOUT)) {
  292. r--;
  293. if (c->duplex) {
  294. int mask = c->duplex->plan.pollflag;
  295. if (events & mask) {
  296. if (doing_debug_on(c->duplex)
  297. && ready) {
  298. *ready = c->duplex;
  299. return NULL;
  300. }
  301. io_ready(c->duplex);
  302. events &= ~mask;
  303. /* debug can recurse;
  304. * anything can change. */
  305. if (doing_debug())
  306. break;
  307. if (!(events&(POLLIN|POLLOUT)))
  308. continue;
  309. }
  310. }
  311. if (doing_debug_on(c) && ready) {
  312. *ready = c;
  313. return NULL;
  314. }
  315. io_ready(c);
  316. /* debug can recurse; anything can change. */
  317. if (doing_debug())
  318. break;
  319. } else if (events & (POLLHUP|POLLNVAL|POLLERR)) {
  320. r--;
  321. set_current(c);
  322. errno = EBADF;
  323. set_plan(c, io_close());
  324. if (c->duplex) {
  325. set_current(c->duplex);
  326. set_plan(c->duplex, io_close());
  327. }
  328. }
  329. }
  330. }
  331. while (num_closing && !io_loop_return) {
  332. if (finish_conns(ready))
  333. return NULL;
  334. }
  335. ret = io_loop_return;
  336. io_loop_return = NULL;
  337. io_loop_exit();
  338. return ret;
  339. }
  340. void *io_loop(void)
  341. {
  342. return do_io_loop(NULL);
  343. }