io.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. /* Licensed under LGPLv2.1+ - see LICENSE file for details */
  2. #include "io.h"
  3. #include "backend.h"
  4. #include <sys/types.h>
  5. #include <sys/socket.h>
  6. #include <netdb.h>
  7. #include <string.h>
  8. #include <errno.h>
  9. #include <stdlib.h>
  10. #include <assert.h>
  11. #include <poll.h>
  12. void *io_loop_return;
  13. #ifdef DEBUG
  14. /* Set to skip the next plan. */
  15. bool io_plan_nodebug;
  16. /* The current connection to apply plan to. */
  17. struct io_conn *current;
  18. /* User-defined function to select which connection(s) to debug. */
  19. bool (*io_debug_conn)(struct io_conn *conn);
  20. /* Set when we wake up an connection we are debugging. */
  21. bool io_debug_wakeup;
  22. struct io_plan io_debug(struct io_plan plan)
  23. {
  24. if (io_plan_nodebug) {
  25. io_plan_nodebug = false;
  26. return plan;
  27. }
  28. if (!io_debug_conn || !current)
  29. return plan;
  30. if (!io_debug_conn(current) && !io_debug_wakeup)
  31. return plan;
  32. io_debug_wakeup = false;
  33. current->plan = plan;
  34. backend_plan_changed(current);
  35. /* Call back into the loop immediately. */
  36. io_loop_return = io_loop();
  37. return plan;
  38. }
  39. static void debug_io_wake(struct io_conn *conn)
  40. {
  41. /* We want linear if we wake a debugged connection, too. */
  42. if (io_debug_conn && io_debug_conn(conn))
  43. io_debug_wakeup = true;
  44. }
  45. /* Counterpart to io_plan_no_debug(), called in macros in io.h */
  46. static void io_plan_debug_again(void)
  47. {
  48. io_plan_nodebug = false;
  49. }
  50. #else
  51. static void debug_io_wake(struct io_conn *conn)
  52. {
  53. }
  54. static void io_plan_debug_again(void)
  55. {
  56. }
  57. #endif
  58. struct io_listener *io_new_listener_(int fd,
  59. void (*init)(int fd, void *arg),
  60. void *arg)
  61. {
  62. struct io_listener *l = malloc(sizeof(*l));
  63. if (!l)
  64. return NULL;
  65. l->fd.listener = true;
  66. l->fd.fd = fd;
  67. l->init = init;
  68. l->arg = arg;
  69. if (!add_listener(l)) {
  70. free(l);
  71. return NULL;
  72. }
  73. return l;
  74. }
  75. void io_close_listener(struct io_listener *l)
  76. {
  77. close(l->fd.fd);
  78. del_listener(l);
  79. free(l);
  80. }
  81. struct io_conn *io_new_conn_(int fd, struct io_plan plan)
  82. {
  83. struct io_conn *conn = malloc(sizeof(*conn));
  84. io_plan_debug_again();
  85. if (!conn)
  86. return NULL;
  87. conn->fd.listener = false;
  88. conn->fd.fd = fd;
  89. conn->plan = plan;
  90. conn->finish = NULL;
  91. conn->finish_arg = NULL;
  92. conn->duplex = NULL;
  93. conn->timeout = NULL;
  94. if (!add_conn(conn)) {
  95. free(conn);
  96. return NULL;
  97. }
  98. return conn;
  99. }
  100. void io_set_finish_(struct io_conn *conn,
  101. void (*finish)(struct io_conn *, void *),
  102. void *arg)
  103. {
  104. conn->finish = finish;
  105. conn->finish_arg = arg;
  106. }
  107. struct io_conn *io_duplex_(struct io_conn *old, struct io_plan plan)
  108. {
  109. struct io_conn *conn;
  110. io_plan_debug_again();
  111. assert(!old->duplex);
  112. conn = malloc(sizeof(*conn));
  113. if (!conn)
  114. return NULL;
  115. conn->fd.listener = false;
  116. conn->fd.fd = old->fd.fd;
  117. conn->plan = plan;
  118. conn->duplex = old;
  119. conn->finish = NULL;
  120. conn->finish_arg = NULL;
  121. conn->timeout = NULL;
  122. if (!add_duplex(conn)) {
  123. free(conn);
  124. return NULL;
  125. }
  126. old->duplex = conn;
  127. return conn;
  128. }
  129. bool io_timeout_(struct io_conn *conn, struct timespec ts,
  130. struct io_plan (*cb)(struct io_conn *, void *), void *arg)
  131. {
  132. assert(cb);
  133. if (!conn->timeout) {
  134. conn->timeout = malloc(sizeof(*conn->timeout));
  135. if (!conn->timeout)
  136. return false;
  137. } else
  138. assert(!timeout_active(conn));
  139. conn->timeout->next = cb;
  140. conn->timeout->next_arg = arg;
  141. backend_add_timeout(conn, ts);
  142. return true;
  143. }
  144. /* Returns true if we're finished. */
  145. static int do_write(int fd, struct io_plan *plan)
  146. {
  147. ssize_t ret = write(fd, plan->u.write.buf, plan->u.write.len);
  148. if (ret < 0)
  149. return -1;
  150. plan->u.write.buf += ret;
  151. plan->u.write.len -= ret;
  152. return (plan->u.write.len == 0);
  153. }
  154. /* Queue some data to be written. */
  155. struct io_plan io_write_(const void *data, size_t len,
  156. struct io_plan (*cb)(struct io_conn *, void *),
  157. void *arg)
  158. {
  159. struct io_plan plan;
  160. assert(cb);
  161. plan.u.write.buf = data;
  162. plan.u.write.len = len;
  163. plan.io = do_write;
  164. plan.next = cb;
  165. plan.next_arg = arg;
  166. plan.pollflag = POLLOUT;
  167. return plan;
  168. }
  169. static int do_read(int fd, struct io_plan *plan)
  170. {
  171. ssize_t ret = read(fd, plan->u.read.buf, plan->u.read.len);
  172. if (ret <= 0)
  173. return -1;
  174. plan->u.read.buf += ret;
  175. plan->u.read.len -= ret;
  176. return (plan->u.read.len == 0);
  177. }
  178. /* Queue a request to read into a buffer. */
  179. struct io_plan io_read_(void *data, size_t len,
  180. struct io_plan (*cb)(struct io_conn *, void *),
  181. void *arg)
  182. {
  183. struct io_plan plan;
  184. assert(cb);
  185. plan.u.read.buf = data;
  186. plan.u.read.len = len;
  187. plan.io = do_read;
  188. plan.next = cb;
  189. plan.next_arg = arg;
  190. plan.pollflag = POLLIN;
  191. return plan;
  192. }
  193. static int do_read_partial(int fd, struct io_plan *plan)
  194. {
  195. ssize_t ret = read(fd, plan->u.readpart.buf, *plan->u.readpart.lenp);
  196. if (ret <= 0)
  197. return -1;
  198. *plan->u.readpart.lenp = ret;
  199. return 1;
  200. }
  201. /* Queue a partial request to read into a buffer. */
  202. struct io_plan io_read_partial_(void *data, size_t *len,
  203. struct io_plan (*cb)(struct io_conn *, void *),
  204. void *arg)
  205. {
  206. struct io_plan plan;
  207. assert(cb);
  208. plan.u.readpart.buf = data;
  209. plan.u.readpart.lenp = len;
  210. plan.io = do_read_partial;
  211. plan.next = cb;
  212. plan.next_arg = arg;
  213. plan.pollflag = POLLIN;
  214. return plan;
  215. }
  216. static int do_write_partial(int fd, struct io_plan *plan)
  217. {
  218. ssize_t ret = write(fd, plan->u.writepart.buf, *plan->u.writepart.lenp);
  219. if (ret < 0)
  220. return -1;
  221. *plan->u.writepart.lenp = ret;
  222. return 1;
  223. }
  224. /* Queue a partial write request. */
  225. struct io_plan io_write_partial_(const void *data, size_t *len,
  226. struct io_plan (*cb)(struct io_conn*, void *),
  227. void *arg)
  228. {
  229. struct io_plan plan;
  230. assert(cb);
  231. plan.u.writepart.buf = data;
  232. plan.u.writepart.lenp = len;
  233. plan.io = do_write_partial;
  234. plan.next = cb;
  235. plan.next_arg = arg;
  236. plan.pollflag = POLLOUT;
  237. return plan;
  238. }
  239. struct io_plan io_idle_(void)
  240. {
  241. struct io_plan plan;
  242. plan.pollflag = 0;
  243. plan.io = NULL;
  244. /* Never called (overridden by io_wake), but NULL means closing */
  245. plan.next = (void *)io_idle_;
  246. return plan;
  247. }
  248. void io_wake_(struct io_conn *conn, struct io_plan plan)
  249. {
  250. io_plan_debug_again();
  251. /* It might be closing, but we haven't called its finish() yet. */
  252. if (!conn->plan.next)
  253. return;
  254. /* It was idle, right? */
  255. assert(!conn->plan.io);
  256. conn->plan = plan;
  257. backend_plan_changed(conn);
  258. debug_io_wake(conn);
  259. }
  260. void io_ready(struct io_conn *conn)
  261. {
  262. switch (conn->plan.io(conn->fd.fd, &conn->plan)) {
  263. case -1: /* Failure means a new plan: close up. */
  264. set_current(conn);
  265. conn->plan = io_close();
  266. backend_plan_changed(conn);
  267. set_current(NULL);
  268. break;
  269. case 0: /* Keep going with plan. */
  270. break;
  271. case 1: /* Done: get next plan. */
  272. set_current(conn);
  273. if (timeout_active(conn))
  274. backend_del_timeout(conn);
  275. conn->plan = conn->plan.next(conn, conn->plan.next_arg);
  276. backend_plan_changed(conn);
  277. set_current(NULL);
  278. }
  279. }
  280. /* Close the connection, we're done. */
  281. struct io_plan io_close_(void)
  282. {
  283. struct io_plan plan;
  284. plan.pollflag = 0;
  285. /* This means we're closing. */
  286. plan.next = NULL;
  287. plan.u.close.saved_errno = errno;
  288. return plan;
  289. }
  290. struct io_plan io_close_cb(struct io_conn *conn, void *arg)
  291. {
  292. return io_close();
  293. }
  294. /* Exit the loop, returning this (non-NULL) arg. */
  295. struct io_plan io_break_(void *ret, struct io_plan plan)
  296. {
  297. io_plan_debug_again();
  298. assert(ret);
  299. io_loop_return = ret;
  300. return plan;
  301. }