io.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. /* Licensed under LGPLv2.1+ - see LICENSE file for details */
  2. #include "io.h"
  3. #include "backend.h"
  4. #include <sys/types.h>
  5. #include <sys/socket.h>
  6. #include <netdb.h>
  7. #include <string.h>
  8. #include <errno.h>
  9. #include <stdlib.h>
  10. #include <assert.h>
  11. void *io_loop_return;
  12. struct io_listener *io_new_listener_(int fd,
  13. struct io_op *(*start)(struct io_conn *,
  14. void *arg),
  15. void (*finish)(struct io_conn *, void *),
  16. void *arg)
  17. {
  18. struct io_listener *l = malloc(sizeof(*l));
  19. if (!l)
  20. return NULL;
  21. l->fd.listener = true;
  22. l->fd.fd = fd;
  23. l->fd.next = start;
  24. l->fd.finish = finish;
  25. l->fd.finish_arg = l->fd.next_arg = arg;
  26. if (!add_listener(l)) {
  27. free(l);
  28. return NULL;
  29. }
  30. return l;
  31. }
  32. void io_close_listener(struct io_listener *l)
  33. {
  34. close(l->fd.fd);
  35. del_listener(l);
  36. free(l);
  37. }
  38. struct io_conn *io_new_conn_(int fd,
  39. struct io_op *(*start)(struct io_conn *, void *),
  40. void (*finish)(struct io_conn *, void *),
  41. void *arg)
  42. {
  43. struct io_conn *conn = malloc(sizeof(*conn));
  44. if (!conn)
  45. return NULL;
  46. conn->fd.listener = false;
  47. conn->fd.fd = fd;
  48. conn->fd.next = start;
  49. conn->fd.finish = finish;
  50. conn->fd.finish_arg = conn->fd.next_arg = arg;
  51. conn->state = NEXT;
  52. conn->duplex = NULL;
  53. conn->timeout = NULL;
  54. if (!add_conn(conn)) {
  55. free(conn);
  56. return NULL;
  57. }
  58. return conn;
  59. }
  60. struct io_conn *io_duplex_(struct io_conn *old,
  61. struct io_op *(*start)(struct io_conn *, void *),
  62. void (*finish)(struct io_conn *, void *),
  63. void *arg)
  64. {
  65. struct io_conn *conn;
  66. assert(!old->duplex);
  67. conn = malloc(sizeof(*conn));
  68. if (!conn)
  69. return NULL;
  70. conn->fd.listener = false;
  71. conn->fd.fd = old->fd.fd;
  72. conn->fd.next = start;
  73. conn->fd.finish = finish;
  74. conn->fd.finish_arg = conn->fd.next_arg = arg;
  75. conn->state = NEXT;
  76. conn->duplex = old;
  77. conn->timeout = NULL;
  78. if (!add_duplex(conn)) {
  79. free(conn);
  80. return NULL;
  81. }
  82. old->duplex = conn;
  83. return conn;
  84. }
  85. /* Convenient token which only we can produce. */
  86. static inline struct io_next *to_ionext(struct io_conn *conn)
  87. {
  88. return (struct io_next *)conn;
  89. }
  90. static inline struct io_op *to_ioop(enum io_state state)
  91. {
  92. return (struct io_op *)(long)state;
  93. }
  94. static inline struct io_conn *from_ionext(struct io_next *next)
  95. {
  96. return (struct io_conn *)next;
  97. }
  98. struct io_next *io_next_(struct io_conn *conn,
  99. struct io_op *(*next)(struct io_conn *, void *),
  100. void *arg)
  101. {
  102. conn->fd.next = next;
  103. conn->fd.next_arg = arg;
  104. return to_ionext(conn);
  105. }
  106. bool io_timeout_(struct io_conn *conn, struct timespec ts,
  107. struct io_op *(*next)(struct io_conn *, void *), void *arg)
  108. {
  109. if (!conn->timeout) {
  110. conn->timeout = malloc(sizeof(*conn->timeout));
  111. if (!conn->timeout)
  112. return false;
  113. } else
  114. assert(!timeout_active(conn));
  115. conn->timeout->next = next;
  116. conn->timeout->next_arg = arg;
  117. backend_add_timeout(conn, ts);
  118. return true;
  119. }
  120. /* Queue some data to be written. */
  121. struct io_op *io_write(const void *data, size_t len, struct io_next *next)
  122. {
  123. struct io_conn *conn = from_ionext(next);
  124. conn->u.write.buf = data;
  125. conn->u.write.len = len;
  126. return to_ioop(WRITE);
  127. }
  128. /* Queue a request to read into a buffer. */
  129. struct io_op *io_read(void *data, size_t len, struct io_next *next)
  130. {
  131. struct io_conn *conn = from_ionext(next);
  132. conn->u.read.buf = data;
  133. conn->u.read.len = len;
  134. return to_ioop(READ);
  135. }
  136. /* Queue a partial request to read into a buffer. */
  137. struct io_op *io_read_partial(void *data, size_t *len, struct io_next *next)
  138. {
  139. struct io_conn *conn = from_ionext(next);
  140. conn->u.readpart.buf = data;
  141. conn->u.readpart.lenp = len;
  142. return to_ioop(READPART);
  143. }
  144. /* Queue a partial write request. */
  145. struct io_op *io_write_partial(const void *data, size_t *len, struct io_next *next)
  146. {
  147. struct io_conn *conn = from_ionext(next);
  148. conn->u.writepart.buf = data;
  149. conn->u.writepart.lenp = len;
  150. return to_ioop(WRITEPART);
  151. }
  152. struct io_op *io_idle(struct io_conn *conn)
  153. {
  154. return to_ioop(IDLE);
  155. }
  156. void io_wake_(struct io_conn *conn,
  157. struct io_op *(*next)(struct io_conn *, void *), void *arg)
  158. {
  159. /* It might have finished, but we haven't called its finish() yet. */
  160. if (conn->state == FINISHED)
  161. return;
  162. assert(conn->state == IDLE);
  163. conn->fd.next = next;
  164. conn->fd.next_arg = arg;
  165. backend_set_state(conn, to_ioop(NEXT));
  166. }
  167. static struct io_op *do_next(struct io_conn *conn)
  168. {
  169. if (timeout_active(conn))
  170. backend_del_timeout(conn);
  171. return conn->fd.next(conn, conn->fd.next_arg);
  172. }
  173. struct io_op *do_ready(struct io_conn *conn)
  174. {
  175. ssize_t ret;
  176. bool finished;
  177. switch (conn->state) {
  178. case WRITE:
  179. ret = write(conn->fd.fd, conn->u.write.buf, conn->u.write.len);
  180. if (ret < 0)
  181. return io_close(conn, NULL);
  182. conn->u.write.buf += ret;
  183. conn->u.write.len -= ret;
  184. finished = (conn->u.write.len == 0);
  185. break;
  186. case WRITEPART:
  187. ret = write(conn->fd.fd, conn->u.writepart.buf,
  188. *conn->u.writepart.lenp);
  189. if (ret < 0)
  190. return io_close(conn, NULL);
  191. *conn->u.writepart.lenp = ret;
  192. finished = true;
  193. break;
  194. case READ:
  195. ret = read(conn->fd.fd, conn->u.read.buf, conn->u.read.len);
  196. if (ret <= 0)
  197. return io_close(conn, NULL);
  198. conn->u.read.buf += ret;
  199. conn->u.read.len -= ret;
  200. finished = (conn->u.read.len == 0);
  201. break;
  202. case READPART:
  203. ret = read(conn->fd.fd, conn->u.readpart.buf,
  204. *conn->u.readpart.lenp);
  205. if (ret <= 0)
  206. return io_close(conn, NULL);
  207. *conn->u.readpart.lenp = ret;
  208. finished = true;
  209. break;
  210. default:
  211. /* Shouldn't happen. */
  212. abort();
  213. }
  214. if (finished)
  215. return do_next(conn);
  216. return to_ioop(conn->state);
  217. }
  218. /* Useful next functions. */
  219. /* Close the connection, we're done. */
  220. struct io_op *io_close(struct io_conn *conn, void *arg)
  221. {
  222. return to_ioop(FINISHED);
  223. }
  224. /* Exit the loop, returning this (non-NULL) arg. */
  225. struct io_op *io_break(void *arg, struct io_next *next)
  226. {
  227. io_loop_return = arg;
  228. return to_ioop(NEXT);
  229. }