io.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. /* Licensed under LGPLv2.1+ - see LICENSE file for details */
  2. #include "io.h"
  3. #include "backend.h"
  4. #include <sys/types.h>
  5. #include <sys/socket.h>
  6. #include <netdb.h>
  7. #include <string.h>
  8. #include <errno.h>
  9. #include <stdlib.h>
  10. #include <assert.h>
  11. #include <poll.h>
  12. void *io_loop_return;
  13. struct io_listener *io_new_listener_(int fd,
  14. void (*init)(int fd, void *arg),
  15. void *arg)
  16. {
  17. struct io_listener *l = malloc(sizeof(*l));
  18. if (!l)
  19. return NULL;
  20. l->fd.listener = true;
  21. l->fd.fd = fd;
  22. l->init = init;
  23. l->arg = arg;
  24. if (!add_listener(l)) {
  25. free(l);
  26. return NULL;
  27. }
  28. return l;
  29. }
  30. void io_close_listener(struct io_listener *l)
  31. {
  32. close(l->fd.fd);
  33. del_listener(l);
  34. free(l);
  35. }
  36. struct io_conn *io_new_conn_(int fd,
  37. struct io_plan plan,
  38. void (*finish)(struct io_conn *, void *),
  39. void *arg)
  40. {
  41. struct io_conn *conn = malloc(sizeof(*conn));
  42. if (!conn)
  43. return NULL;
  44. conn->fd.listener = false;
  45. conn->fd.fd = fd;
  46. conn->plan = plan;
  47. conn->finish = finish;
  48. conn->finish_arg = arg;
  49. conn->duplex = NULL;
  50. conn->timeout = NULL;
  51. if (!add_conn(conn)) {
  52. free(conn);
  53. return NULL;
  54. }
  55. return conn;
  56. }
  57. struct io_conn *io_duplex_(struct io_conn *old,
  58. struct io_plan plan,
  59. void (*finish)(struct io_conn *, void *),
  60. void *arg)
  61. {
  62. struct io_conn *conn;
  63. assert(!old->duplex);
  64. conn = malloc(sizeof(*conn));
  65. if (!conn)
  66. return NULL;
  67. conn->fd.listener = false;
  68. conn->fd.fd = old->fd.fd;
  69. conn->plan = plan;
  70. conn->duplex = old;
  71. conn->finish = finish;
  72. conn->finish_arg = arg;
  73. conn->timeout = NULL;
  74. if (!add_duplex(conn)) {
  75. free(conn);
  76. return NULL;
  77. }
  78. old->duplex = conn;
  79. return conn;
  80. }
  81. bool io_timeout_(struct io_conn *conn, struct timespec ts,
  82. struct io_plan (*cb)(struct io_conn *, void *), void *arg)
  83. {
  84. assert(cb);
  85. if (!conn->timeout) {
  86. conn->timeout = malloc(sizeof(*conn->timeout));
  87. if (!conn->timeout)
  88. return false;
  89. } else
  90. assert(!timeout_active(conn));
  91. conn->timeout->next = cb;
  92. conn->timeout->next_arg = arg;
  93. backend_add_timeout(conn, ts);
  94. return true;
  95. }
  96. static enum io_result do_write(struct io_conn *conn)
  97. {
  98. ssize_t ret = write(conn->fd.fd, conn->plan.u.write.buf, conn->plan.u.write.len);
  99. if (ret < 0)
  100. return RESULT_CLOSE;
  101. conn->plan.u.write.buf += ret;
  102. conn->plan.u.write.len -= ret;
  103. if (conn->plan.u.write.len == 0)
  104. return RESULT_FINISHED;
  105. else
  106. return RESULT_AGAIN;
  107. }
  108. /* Queue some data to be written. */
  109. struct io_plan io_write_(const void *data, size_t len,
  110. struct io_plan (*cb)(struct io_conn *, void *),
  111. void *arg)
  112. {
  113. struct io_plan plan;
  114. assert(cb);
  115. plan.u.write.buf = data;
  116. plan.u.write.len = len;
  117. plan.io = do_write;
  118. plan.next = cb;
  119. plan.next_arg = arg;
  120. plan.pollflag = POLLOUT;
  121. return plan;
  122. }
  123. static enum io_result do_read(struct io_conn *conn)
  124. {
  125. ssize_t ret = read(conn->fd.fd, conn->plan.u.read.buf,
  126. conn->plan.u.read.len);
  127. if (ret <= 0)
  128. return RESULT_CLOSE;
  129. conn->plan.u.read.buf += ret;
  130. conn->plan.u.read.len -= ret;
  131. if (conn->plan.u.read.len == 0)
  132. return RESULT_FINISHED;
  133. else
  134. return RESULT_AGAIN;
  135. }
  136. /* Queue a request to read into a buffer. */
  137. struct io_plan io_read_(void *data, size_t len,
  138. struct io_plan (*cb)(struct io_conn *, void *),
  139. void *arg)
  140. {
  141. struct io_plan plan;
  142. assert(cb);
  143. plan.u.read.buf = data;
  144. plan.u.read.len = len;
  145. plan.io = do_read;
  146. plan.next = cb;
  147. plan.next_arg = arg;
  148. plan.pollflag = POLLIN;
  149. return plan;
  150. }
  151. static enum io_result do_read_partial(struct io_conn *conn)
  152. {
  153. ssize_t ret = read(conn->fd.fd, conn->plan.u.readpart.buf,
  154. *conn->plan.u.readpart.lenp);
  155. if (ret <= 0)
  156. return RESULT_CLOSE;
  157. *conn->plan.u.readpart.lenp = ret;
  158. return RESULT_FINISHED;
  159. }
  160. /* Queue a partial request to read into a buffer. */
  161. struct io_plan io_read_partial_(void *data, size_t *len,
  162. struct io_plan (*cb)(struct io_conn *, void *),
  163. void *arg)
  164. {
  165. struct io_plan plan;
  166. assert(cb);
  167. plan.u.readpart.buf = data;
  168. plan.u.readpart.lenp = len;
  169. plan.io = do_read_partial;
  170. plan.next = cb;
  171. plan.next_arg = arg;
  172. plan.pollflag = POLLIN;
  173. return plan;
  174. }
  175. static enum io_result do_write_partial(struct io_conn *conn)
  176. {
  177. ssize_t ret = write(conn->fd.fd, conn->plan.u.writepart.buf,
  178. *conn->plan.u.writepart.lenp);
  179. if (ret < 0)
  180. return RESULT_CLOSE;
  181. *conn->plan.u.writepart.lenp = ret;
  182. return RESULT_FINISHED;
  183. }
  184. /* Queue a partial write request. */
  185. struct io_plan io_write_partial_(const void *data, size_t *len,
  186. struct io_plan (*cb)(struct io_conn*, void *),
  187. void *arg)
  188. {
  189. struct io_plan plan;
  190. assert(cb);
  191. plan.u.writepart.buf = data;
  192. plan.u.writepart.lenp = len;
  193. plan.io = do_write_partial;
  194. plan.next = cb;
  195. plan.next_arg = arg;
  196. plan.pollflag = POLLOUT;
  197. return plan;
  198. }
  199. struct io_plan io_idle(void)
  200. {
  201. struct io_plan plan;
  202. plan.pollflag = 0;
  203. plan.io = NULL;
  204. /* Never called (overridded by io_wake), but NULL means closing */
  205. plan.next = io_close;
  206. return plan;
  207. }
  208. void io_wake(struct io_conn *conn, struct io_plan plan)
  209. {
  210. /* It might be closing, but we haven't called its finish() yet. */
  211. if (!conn->plan.next)
  212. return;
  213. /* It was idle, right? */
  214. assert(!conn->plan.io);
  215. conn->plan = plan;
  216. backend_wakeup(conn);
  217. }
  218. static struct io_plan do_next(struct io_conn *conn)
  219. {
  220. if (timeout_active(conn))
  221. backend_del_timeout(conn);
  222. return conn->plan.next(conn, conn->plan.next_arg);
  223. }
  224. struct io_plan do_ready(struct io_conn *conn)
  225. {
  226. switch (conn->plan.io(conn)) {
  227. case RESULT_CLOSE:
  228. return io_close(conn, NULL);
  229. case RESULT_FINISHED:
  230. return do_next(conn);
  231. case RESULT_AGAIN:
  232. return conn->plan;
  233. default:
  234. abort();
  235. }
  236. }
  237. /* Useful next functions. */
  238. /* Close the connection, we're done. */
  239. struct io_plan io_close(struct io_conn *conn, void *arg)
  240. {
  241. struct io_plan plan;
  242. plan.pollflag = 0;
  243. /* This means we're closing. */
  244. plan.next = NULL;
  245. return plan;
  246. }
  247. /* Exit the loop, returning this (non-NULL) arg. */
  248. struct io_plan io_break(void *ret, struct io_plan plan)
  249. {
  250. assert(ret);
  251. io_loop_return = ret;
  252. return plan;
  253. }