io.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426
  1. /* Licensed under LGPLv2.1+ - see LICENSE file for details */
  2. #include "io.h"
  3. #include "backend.h"
  4. #include <sys/types.h>
  5. #include <sys/socket.h>
  6. #include <netdb.h>
  7. #include <string.h>
  8. #include <errno.h>
  9. #include <stdlib.h>
  10. #include <assert.h>
  11. #include <unistd.h>
  12. #include <fcntl.h>
  13. void *io_loop_return;
  14. struct io_listener *io_new_listener_(const tal_t *ctx, int fd,
  15. struct io_plan *(*init)(struct io_conn *,
  16. void *),
  17. void *arg)
  18. {
  19. struct io_listener *l = tal(ctx, struct io_listener);
  20. if (!l)
  21. return NULL;
  22. l->fd.listener = true;
  23. l->fd.fd = fd;
  24. l->init = init;
  25. l->arg = arg;
  26. l->ctx = ctx;
  27. if (!add_listener(l))
  28. return tal_free(l);
  29. return l;
  30. }
  31. void io_close_listener(struct io_listener *l)
  32. {
  33. close(l->fd.fd);
  34. del_listener(l);
  35. tal_free(l);
  36. }
  37. static struct io_plan *io_never_called(struct io_conn *conn, void *arg)
  38. {
  39. abort();
  40. }
  41. static void next_plan(struct io_conn *conn, struct io_plan *plan)
  42. {
  43. struct io_plan *(*next)(struct io_conn *, void *arg);
  44. next = plan->next;
  45. plan->status = IO_UNSET;
  46. plan->io = NULL;
  47. plan->next = io_never_called;
  48. plan = next(conn, plan->next_arg);
  49. /* It should have set a plan inside this conn. */
  50. assert(plan == &conn->plan[IO_IN]
  51. || plan == &conn->plan[IO_OUT]);
  52. assert(conn->plan[IO_IN].status != IO_UNSET
  53. || conn->plan[IO_OUT].status != IO_UNSET);
  54. backend_new_plan(conn);
  55. }
  56. struct io_conn *io_new_conn_(const tal_t *ctx, int fd,
  57. struct io_plan *(*init)(struct io_conn *, void *),
  58. void *arg)
  59. {
  60. struct io_conn *conn = tal(ctx, struct io_conn);
  61. if (!conn)
  62. return NULL;
  63. conn->fd.listener = false;
  64. conn->fd.fd = fd;
  65. conn->finish = NULL;
  66. conn->finish_arg = NULL;
  67. conn->list = NULL;
  68. if (!add_conn(conn))
  69. return tal_free(conn);
  70. /* We start with out doing nothing, and in doing our init. */
  71. conn->plan[IO_OUT].status = IO_UNSET;
  72. conn->plan[IO_IN].next = init;
  73. conn->plan[IO_IN].next_arg = arg;
  74. next_plan(conn, &conn->plan[IO_IN]);
  75. return conn;
  76. }
  77. void io_set_finish_(struct io_conn *conn,
  78. void (*finish)(struct io_conn *, void *),
  79. void *arg)
  80. {
  81. conn->finish = finish;
  82. conn->finish_arg = arg;
  83. }
  84. struct io_plan *io_get_plan(struct io_conn *conn, enum io_direction dir)
  85. {
  86. assert(conn->plan[dir].status == IO_UNSET);
  87. conn->plan[dir].status = IO_POLLING;
  88. return &conn->plan[dir];
  89. }
  90. static struct io_plan *set_always(struct io_conn *conn,
  91. struct io_plan *plan,
  92. struct io_plan *(*next)(struct io_conn *,
  93. void *),
  94. void *arg)
  95. {
  96. plan->next = next;
  97. plan->next_arg = arg;
  98. plan->status = IO_ALWAYS;
  99. backend_new_always(conn);
  100. return plan;
  101. }
  102. struct io_plan *io_always_(struct io_conn *conn,
  103. enum io_direction dir,
  104. struct io_plan *(*next)(struct io_conn *, void *),
  105. void *arg)
  106. {
  107. struct io_plan *plan = io_get_plan(conn, dir);
  108. assert(next);
  109. set_always(conn, plan, next, arg);
  110. return plan;
  111. }
  112. static int do_write(int fd, struct io_plan *plan)
  113. {
  114. ssize_t ret = write(fd, plan->u1.cp, plan->u2.s);
  115. if (ret < 0)
  116. return -1;
  117. plan->u1.cp += ret;
  118. plan->u2.s -= ret;
  119. return plan->u2.s == 0;
  120. }
  121. /* Queue some data to be written. */
  122. struct io_plan *io_write_(struct io_conn *conn, const void *data, size_t len,
  123. struct io_plan *(*next)(struct io_conn *, void *),
  124. void *arg)
  125. {
  126. struct io_plan *plan = io_get_plan(conn, IO_OUT);
  127. assert(next);
  128. if (len == 0)
  129. return set_always(conn, plan, next, arg);
  130. plan->u1.const_vp = data;
  131. plan->u2.s = len;
  132. plan->io = do_write;
  133. plan->next = next;
  134. plan->next_arg = arg;
  135. return plan;
  136. }
  137. static int do_read(int fd, struct io_plan *plan)
  138. {
  139. ssize_t ret = read(fd, plan->u1.cp, plan->u2.s);
  140. if (ret <= 0)
  141. return -1;
  142. plan->u1.cp += ret;
  143. plan->u2.s -= ret;
  144. return plan->u2.s == 0;
  145. }
  146. /* Queue a request to read into a buffer. */
  147. struct io_plan *io_read_(struct io_conn *conn,
  148. void *data, size_t len,
  149. struct io_plan *(*next)(struct io_conn *, void *),
  150. void *arg)
  151. {
  152. struct io_plan *plan = io_get_plan(conn, IO_IN);
  153. assert(next);
  154. if (len == 0)
  155. return set_always(conn, plan, next, arg);
  156. plan->u1.cp = data;
  157. plan->u2.s = len;
  158. plan->io = do_read;
  159. plan->next = next;
  160. plan->next_arg = arg;
  161. return plan;
  162. }
  163. static int do_read_partial(int fd, struct io_plan *plan)
  164. {
  165. ssize_t ret = read(fd, plan->u1.cp, *(size_t *)plan->u2.vp);
  166. if (ret <= 0)
  167. return -1;
  168. *(size_t *)plan->u2.vp = ret;
  169. return 1;
  170. }
  171. /* Queue a partial request to read into a buffer. */
  172. struct io_plan *io_read_partial_(struct io_conn *conn,
  173. void *data, size_t maxlen, size_t *len,
  174. struct io_plan *(*next)(struct io_conn *,
  175. void *),
  176. void *arg)
  177. {
  178. struct io_plan *plan = io_get_plan(conn, IO_IN);
  179. assert(next);
  180. if (maxlen == 0)
  181. return set_always(conn, plan, next, arg);
  182. plan->u1.cp = data;
  183. /* We store the max len in here temporarily. */
  184. *len = maxlen;
  185. plan->u2.vp = len;
  186. plan->io = do_read_partial;
  187. plan->next = next;
  188. plan->next_arg = arg;
  189. return plan;
  190. }
  191. static int do_write_partial(int fd, struct io_plan *plan)
  192. {
  193. ssize_t ret = write(fd, plan->u1.cp, *(size_t *)plan->u2.vp);
  194. if (ret < 0)
  195. return -1;
  196. *(size_t *)plan->u2.vp = ret;
  197. return 1;
  198. }
  199. /* Queue a partial write request. */
  200. struct io_plan *io_write_partial_(struct io_conn *conn,
  201. const void *data, size_t maxlen, size_t *len,
  202. struct io_plan *(*next)(struct io_conn *,
  203. void*),
  204. void *arg)
  205. {
  206. struct io_plan *plan = io_get_plan(conn, IO_OUT);
  207. assert(next);
  208. if (maxlen == 0)
  209. return set_always(conn, plan, next, arg);
  210. plan->u1.const_vp = data;
  211. /* We store the max len in here temporarily. */
  212. *len = maxlen;
  213. plan->u2.vp = len;
  214. plan->io = do_write_partial;
  215. plan->next = next;
  216. plan->next_arg = arg;
  217. return plan;
  218. }
  219. static int do_connect(int fd, struct io_plan *plan)
  220. {
  221. int err, ret;
  222. socklen_t len = sizeof(err);
  223. /* Has async connect finished? */
  224. ret = getsockopt(fd, SOL_SOCKET, SO_ERROR, &err, &len);
  225. if (ret < 0)
  226. return -1;
  227. if (err == 0) {
  228. /* Restore blocking if it was initially. */
  229. fcntl(fd, F_SETFL, plan->u1.s);
  230. return 1;
  231. } else if (err == EINPROGRESS)
  232. return 0;
  233. errno = err;
  234. return -1;
  235. }
  236. struct io_plan *io_connect_(struct io_conn *conn, const struct addrinfo *addr,
  237. struct io_plan *(*next)(struct io_conn *, void *),
  238. void *arg)
  239. {
  240. struct io_plan *plan = io_get_plan(conn, IO_IN);
  241. int fd = io_conn_fd(conn);
  242. assert(next);
  243. /* Save old flags, set nonblock if not already. */
  244. plan->u1.s = fcntl(fd, F_GETFL);
  245. fcntl(fd, F_SETFL, plan->u1.s | O_NONBLOCK);
  246. /* Immediate connect can happen. */
  247. if (connect(fd, addr->ai_addr, addr->ai_addrlen) == 0)
  248. return set_always(conn, plan, next, arg);
  249. if (errno != EINPROGRESS)
  250. return io_close(conn);
  251. plan->next = next;
  252. plan->next_arg = arg;
  253. plan->io = do_connect;
  254. return plan;
  255. }
  256. struct io_plan *io_wait_(struct io_conn *conn,
  257. const void *wait, enum io_direction dir,
  258. struct io_plan *(*next)(struct io_conn *, void *),
  259. void *arg)
  260. {
  261. struct io_plan *plan = io_get_plan(conn, dir);
  262. assert(next);
  263. plan->next = next;
  264. plan->next_arg = arg;
  265. plan->u1.const_vp = wait;
  266. plan->status = IO_WAITING;
  267. return plan;
  268. }
  269. void io_wake(const void *wait)
  270. {
  271. backend_wake(wait);
  272. }
  273. static void do_plan(struct io_conn *conn, struct io_plan *plan)
  274. {
  275. /* Someone else might have called io_close() on us. */
  276. if (plan->status == IO_CLOSING)
  277. return;
  278. /* We shouldn't have polled for this event if this wasn't true! */
  279. assert(plan->status == IO_POLLING);
  280. switch (plan->io(conn->fd.fd, plan)) {
  281. case -1:
  282. io_close(conn);
  283. break;
  284. case 0:
  285. break;
  286. case 1:
  287. next_plan(conn, plan);
  288. break;
  289. default:
  290. /* IO should only return -1, 0 or 1 */
  291. abort();
  292. }
  293. }
  294. void io_ready(struct io_conn *conn, int pollflags)
  295. {
  296. if (pollflags & POLLIN)
  297. do_plan(conn, &conn->plan[IO_IN]);
  298. if (pollflags & POLLOUT)
  299. do_plan(conn, &conn->plan[IO_OUT]);
  300. }
  301. void io_do_always(struct io_conn *conn)
  302. {
  303. if (conn->plan[IO_IN].status == IO_ALWAYS)
  304. next_plan(conn, &conn->plan[IO_IN]);
  305. if (conn->plan[IO_OUT].status == IO_ALWAYS)
  306. next_plan(conn, &conn->plan[IO_OUT]);
  307. }
  308. void io_do_wakeup(struct io_conn *conn, struct io_plan *plan)
  309. {
  310. assert(plan->status == IO_WAITING);
  311. next_plan(conn, plan);
  312. }
  313. /* Close the connection, we're done. */
  314. struct io_plan *io_close(struct io_conn *conn)
  315. {
  316. /* Already closing? Don't close twice. */
  317. if (conn->plan[IO_IN].status == IO_CLOSING)
  318. return &conn->plan[IO_IN];
  319. conn->plan[IO_IN].status = conn->plan[IO_OUT].status = IO_CLOSING;
  320. conn->plan[IO_IN].u1.s = errno;
  321. backend_new_closing(conn);
  322. return &conn->plan[IO_IN];
  323. }
  324. struct io_plan *io_close_cb(struct io_conn *conn, void *arg)
  325. {
  326. return io_close(conn);
  327. }
  328. /* Exit the loop, returning this (non-NULL) arg. */
  329. void io_break(const void *ret)
  330. {
  331. assert(ret);
  332. io_loop_return = (void *)ret;
  333. }
  334. struct io_plan *io_never(struct io_conn *conn)
  335. {
  336. return io_always(conn, IO_IN, io_never_called, NULL);
  337. }
  338. int io_conn_fd(const struct io_conn *conn)
  339. {
  340. return conn->fd.fd;
  341. }