io.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546
  1. /* Licensed under LGPLv2.1+ - see LICENSE file for details */
  2. #include "io.h"
  3. #include "backend.h"
  4. #include <sys/types.h>
  5. #include <sys/socket.h>
  6. #include <netdb.h>
  7. #include <string.h>
  8. #include <errno.h>
  9. #include <stdlib.h>
  10. #include <assert.h>
  11. #include <unistd.h>
  12. #include <fcntl.h>
  13. #include <ccan/container_of/container_of.h>
  14. void *io_loop_return;
  15. struct io_listener *io_new_listener_(const tal_t *ctx, int fd,
  16. struct io_plan *(*init)(struct io_conn *,
  17. void *),
  18. void *arg)
  19. {
  20. struct io_listener *l = tal(ctx, struct io_listener);
  21. if (!l)
  22. return NULL;
  23. l->fd.listener = true;
  24. l->fd.fd = fd;
  25. l->init = init;
  26. l->arg = arg;
  27. l->ctx = ctx;
  28. if (!add_listener(l))
  29. return tal_free(l);
  30. return l;
  31. }
  32. void io_close_listener(struct io_listener *l)
  33. {
  34. close(l->fd.fd);
  35. del_listener(l);
  36. tal_free(l);
  37. }
  38. static struct io_plan *io_never_called(struct io_conn *conn, void *arg)
  39. {
  40. abort();
  41. }
  42. static void next_plan(struct io_conn *conn, struct io_plan *plan)
  43. {
  44. struct io_plan *(*next)(struct io_conn *, void *arg);
  45. next = plan->next;
  46. plan->status = IO_UNSET;
  47. plan->io = NULL;
  48. plan->next = io_never_called;
  49. plan = next(conn, plan->next_arg);
  50. /* It should have set a plan inside this conn (or duplex) */
  51. assert(plan == &conn->plan[IO_IN]
  52. || plan == &conn->plan[IO_OUT]
  53. || plan == &conn->plan[2]);
  54. assert(conn->plan[IO_IN].status != IO_UNSET
  55. || conn->plan[IO_OUT].status != IO_UNSET);
  56. backend_new_plan(conn);
  57. }
  58. static void set_blocking(int fd, bool block)
  59. {
  60. int flags = fcntl(fd, F_GETFL);
  61. if (block)
  62. flags &= ~O_NONBLOCK;
  63. else
  64. flags |= O_NONBLOCK;
  65. fcntl(fd, F_SETFL, flags);
  66. }
  67. struct io_conn *io_new_conn_(const tal_t *ctx, int fd,
  68. struct io_plan *(*init)(struct io_conn *, void *),
  69. void *arg)
  70. {
  71. struct io_conn *conn = tal(ctx, struct io_conn);
  72. if (!conn)
  73. return NULL;
  74. conn->fd.listener = false;
  75. conn->fd.fd = fd;
  76. conn->finish = NULL;
  77. conn->finish_arg = NULL;
  78. list_node_init(&conn->always);
  79. list_node_init(&conn->closing);
  80. conn->debug = false;
  81. if (!add_conn(conn))
  82. return tal_free(conn);
  83. /* Keep our I/O async. */
  84. set_blocking(fd, false);
  85. /* We start with out doing nothing, and in doing our init. */
  86. conn->plan[IO_OUT].status = IO_UNSET;
  87. conn->plan[IO_IN].next = init;
  88. conn->plan[IO_IN].next_arg = arg;
  89. next_plan(conn, &conn->plan[IO_IN]);
  90. return conn;
  91. }
  92. void io_set_finish_(struct io_conn *conn,
  93. void (*finish)(struct io_conn *, void *),
  94. void *arg)
  95. {
  96. conn->finish = finish;
  97. conn->finish_arg = arg;
  98. }
  99. struct io_plan_arg *io_plan_arg(struct io_conn *conn, enum io_direction dir)
  100. {
  101. assert(conn->plan[dir].status == IO_UNSET);
  102. conn->plan[dir].status = IO_POLLING;
  103. return &conn->plan[dir].arg;
  104. }
  105. static struct io_plan *set_always(struct io_conn *conn,
  106. enum io_direction dir,
  107. struct io_plan *(*next)(struct io_conn *,
  108. void *),
  109. void *arg)
  110. {
  111. struct io_plan *plan = &conn->plan[dir];
  112. plan->status = IO_ALWAYS;
  113. backend_new_always(conn);
  114. return io_set_plan(conn, dir, NULL, next, arg);
  115. }
  116. static struct io_plan *io_always_dir(struct io_conn *conn,
  117. enum io_direction dir,
  118. struct io_plan *(*next)(struct io_conn *,
  119. void *),
  120. void *arg)
  121. {
  122. return set_always(conn, dir, next, arg);
  123. }
  124. struct io_plan *io_always_(struct io_conn *conn,
  125. struct io_plan *(*next)(struct io_conn *, void *),
  126. void *arg)
  127. {
  128. return io_always_dir(conn, IO_IN, next, arg);
  129. }
  130. struct io_plan *io_out_always_(struct io_conn *conn,
  131. struct io_plan *(*next)(struct io_conn *,
  132. void *),
  133. void *arg)
  134. {
  135. return io_always_dir(conn, IO_OUT, next, arg);
  136. }
  137. static int do_write(int fd, struct io_plan_arg *arg)
  138. {
  139. ssize_t ret = write(fd, arg->u1.cp, arg->u2.s);
  140. if (ret < 0)
  141. return -1;
  142. arg->u1.cp += ret;
  143. arg->u2.s -= ret;
  144. return arg->u2.s == 0;
  145. }
  146. /* Queue some data to be written. */
  147. struct io_plan *io_write_(struct io_conn *conn, const void *data, size_t len,
  148. struct io_plan *(*next)(struct io_conn *, void *),
  149. void *next_arg)
  150. {
  151. struct io_plan_arg *arg = io_plan_arg(conn, IO_OUT);
  152. if (len == 0)
  153. return set_always(conn, IO_OUT, next, next_arg);
  154. arg->u1.const_vp = data;
  155. arg->u2.s = len;
  156. return io_set_plan(conn, IO_OUT, do_write, next, next_arg);
  157. }
  158. static int do_read(int fd, struct io_plan_arg *arg)
  159. {
  160. ssize_t ret = read(fd, arg->u1.cp, arg->u2.s);
  161. if (ret <= 0)
  162. return -1;
  163. arg->u1.cp += ret;
  164. arg->u2.s -= ret;
  165. return arg->u2.s == 0;
  166. }
  167. /* Queue a request to read into a buffer. */
  168. struct io_plan *io_read_(struct io_conn *conn,
  169. void *data, size_t len,
  170. struct io_plan *(*next)(struct io_conn *, void *),
  171. void *next_arg)
  172. {
  173. struct io_plan_arg *arg = io_plan_arg(conn, IO_IN);
  174. if (len == 0)
  175. return set_always(conn, IO_IN, next, next_arg);
  176. arg->u1.cp = data;
  177. arg->u2.s = len;
  178. return io_set_plan(conn, IO_IN, do_read, next, next_arg);
  179. }
  180. static int do_read_partial(int fd, struct io_plan_arg *arg)
  181. {
  182. ssize_t ret = read(fd, arg->u1.cp, *(size_t *)arg->u2.vp);
  183. if (ret <= 0)
  184. return -1;
  185. *(size_t *)arg->u2.vp = ret;
  186. return 1;
  187. }
  188. /* Queue a partial request to read into a buffer. */
  189. struct io_plan *io_read_partial_(struct io_conn *conn,
  190. void *data, size_t maxlen, size_t *len,
  191. struct io_plan *(*next)(struct io_conn *,
  192. void *),
  193. void *next_arg)
  194. {
  195. struct io_plan_arg *arg = io_plan_arg(conn, IO_IN);
  196. if (maxlen == 0)
  197. return set_always(conn, IO_IN, next, next_arg);
  198. arg->u1.cp = data;
  199. /* We store the max len in here temporarily. */
  200. *len = maxlen;
  201. arg->u2.vp = len;
  202. return io_set_plan(conn, IO_IN, do_read_partial, next, next_arg);
  203. }
  204. static int do_write_partial(int fd, struct io_plan_arg *arg)
  205. {
  206. ssize_t ret = write(fd, arg->u1.cp, *(size_t *)arg->u2.vp);
  207. if (ret < 0)
  208. return -1;
  209. *(size_t *)arg->u2.vp = ret;
  210. return 1;
  211. }
  212. /* Queue a partial write request. */
  213. struct io_plan *io_write_partial_(struct io_conn *conn,
  214. const void *data, size_t maxlen, size_t *len,
  215. struct io_plan *(*next)(struct io_conn *,
  216. void*),
  217. void *next_arg)
  218. {
  219. struct io_plan_arg *arg = io_plan_arg(conn, IO_OUT);
  220. if (maxlen == 0)
  221. return set_always(conn, IO_OUT, next, next_arg);
  222. arg->u1.const_vp = data;
  223. /* We store the max len in here temporarily. */
  224. *len = maxlen;
  225. arg->u2.vp = len;
  226. return io_set_plan(conn, IO_OUT, do_write_partial, next, next_arg);
  227. }
  228. static int do_connect(int fd, struct io_plan_arg *arg)
  229. {
  230. int err, ret;
  231. socklen_t len = sizeof(err);
  232. /* Has async connect finished? */
  233. ret = getsockopt(fd, SOL_SOCKET, SO_ERROR, &err, &len);
  234. if (ret < 0)
  235. return -1;
  236. if (err == 0) {
  237. return 1;
  238. } else if (err == EINPROGRESS)
  239. return 0;
  240. errno = err;
  241. return -1;
  242. }
  243. struct io_plan *io_connect_(struct io_conn *conn, const struct addrinfo *addr,
  244. struct io_plan *(*next)(struct io_conn *, void *),
  245. void *next_arg)
  246. {
  247. int fd = io_conn_fd(conn);
  248. /* We don't actually need the arg, but we need it polling. */
  249. io_plan_arg(conn, IO_OUT);
  250. /* Note that io_new_conn() will make fd O_NONBLOCK */
  251. /* Immediate connect can happen. */
  252. if (connect(fd, addr->ai_addr, addr->ai_addrlen) == 0)
  253. return set_always(conn, IO_OUT, next, next_arg);
  254. if (errno != EINPROGRESS)
  255. return io_close(conn);
  256. return io_set_plan(conn, IO_OUT, do_connect, next, next_arg);
  257. }
  258. static struct io_plan *io_wait_dir(struct io_conn *conn,
  259. const void *wait,
  260. enum io_direction dir,
  261. struct io_plan *(*next)(struct io_conn *,
  262. void *),
  263. void *next_arg)
  264. {
  265. struct io_plan_arg *arg = io_plan_arg(conn, dir);
  266. arg->u1.const_vp = wait;
  267. conn->plan[dir].status = IO_WAITING;
  268. return io_set_plan(conn, dir, NULL, next, next_arg);
  269. }
  270. struct io_plan *io_wait_(struct io_conn *conn,
  271. const void *wait,
  272. struct io_plan *(*next)(struct io_conn *, void *),
  273. void *next_arg)
  274. {
  275. return io_wait_dir(conn, wait, IO_IN, next, next_arg);
  276. }
  277. struct io_plan *io_out_wait_(struct io_conn *conn,
  278. const void *wait,
  279. struct io_plan *(*next)(struct io_conn *, void *),
  280. void *next_arg)
  281. {
  282. return io_wait_dir(conn, wait, IO_OUT, next, next_arg);
  283. }
  284. void io_wake(const void *wait)
  285. {
  286. backend_wake(wait);
  287. }
  288. static int do_plan(struct io_conn *conn, struct io_plan *plan)
  289. {
  290. /* Someone else might have called io_close() on us. */
  291. if (plan->status == IO_CLOSING)
  292. return -1;
  293. /* We shouldn't have polled for this event if this wasn't true! */
  294. assert(plan->status == IO_POLLING);
  295. switch (plan->io(conn->fd.fd, &plan->arg)) {
  296. case -1:
  297. io_close(conn);
  298. return -1;
  299. case 0:
  300. return 0;
  301. case 1:
  302. next_plan(conn, plan);
  303. return 1;
  304. default:
  305. /* IO should only return -1, 0 or 1 */
  306. abort();
  307. }
  308. }
  309. void io_ready(struct io_conn *conn, int pollflags)
  310. {
  311. if (pollflags & POLLIN)
  312. do_plan(conn, &conn->plan[IO_IN]);
  313. if (pollflags & POLLOUT)
  314. do_plan(conn, &conn->plan[IO_OUT]);
  315. }
  316. void io_do_always(struct io_conn *conn)
  317. {
  318. if (conn->plan[IO_IN].status == IO_ALWAYS)
  319. next_plan(conn, &conn->plan[IO_IN]);
  320. if (conn->plan[IO_OUT].status == IO_ALWAYS)
  321. next_plan(conn, &conn->plan[IO_OUT]);
  322. }
  323. void io_do_wakeup(struct io_conn *conn, enum io_direction dir)
  324. {
  325. struct io_plan *plan = &conn->plan[dir];
  326. assert(plan->status == IO_WAITING);
  327. set_always(conn, dir, plan->next, plan->next_arg);
  328. }
  329. /* Close the connection, we're done. */
  330. struct io_plan *io_close(struct io_conn *conn)
  331. {
  332. /* Already closing? Don't close twice. */
  333. if (conn->plan[IO_IN].status == IO_CLOSING)
  334. return &conn->plan[IO_IN];
  335. conn->plan[IO_IN].status = conn->plan[IO_OUT].status = IO_CLOSING;
  336. conn->plan[IO_IN].arg.u1.s = errno;
  337. backend_new_closing(conn);
  338. return io_set_plan(conn, IO_IN, NULL, NULL, NULL);
  339. }
  340. struct io_plan *io_close_cb(struct io_conn *conn, void *next_arg)
  341. {
  342. return io_close(conn);
  343. }
  344. /* Exit the loop, returning this (non-NULL) arg. */
  345. void io_break(const void *ret)
  346. {
  347. assert(ret);
  348. io_loop_return = (void *)ret;
  349. }
  350. struct io_plan *io_never(struct io_conn *conn, void *unused)
  351. {
  352. return io_always(conn, io_never_called, NULL);
  353. }
  354. int io_conn_fd(const struct io_conn *conn)
  355. {
  356. return conn->fd.fd;
  357. }
  358. void io_duplex_prepare(struct io_conn *conn)
  359. {
  360. assert(conn->plan[IO_IN].status == IO_UNSET);
  361. assert(conn->plan[IO_OUT].status == IO_UNSET);
  362. /* We can't sync debug until we've set both: io_wait() and io_always
  363. * can't handle it. */
  364. conn->debug_saved = conn->debug;
  365. io_set_debug(conn, false);
  366. }
  367. struct io_plan *io_duplex_(struct io_plan *in_plan, struct io_plan *out_plan)
  368. {
  369. struct io_conn *conn;
  370. /* in_plan must be conn->plan[IO_IN], out_plan must be [IO_OUT] */
  371. assert(out_plan == in_plan + 1);
  372. /* Restore debug. */
  373. conn = container_of(in_plan, struct io_conn, plan[IO_IN]);
  374. io_set_debug(conn, conn->debug_saved);
  375. /* Now set the plans again, to invoke sync debug. */
  376. io_set_plan(conn, IO_OUT,
  377. out_plan->io, out_plan->next, out_plan->next_arg);
  378. io_set_plan(conn, IO_IN,
  379. in_plan->io, in_plan->next, in_plan->next_arg);
  380. return out_plan + 1;
  381. }
  382. struct io_plan *io_halfclose(struct io_conn *conn)
  383. {
  384. /* Already closing? Don't close twice. */
  385. if (conn->plan[IO_IN].status == IO_CLOSING)
  386. return &conn->plan[IO_IN];
  387. /* Both unset? OK. */
  388. if (conn->plan[IO_IN].status == IO_UNSET
  389. && conn->plan[IO_OUT].status == IO_UNSET)
  390. return io_close(conn);
  391. /* We leave this unset then. */
  392. if (conn->plan[IO_IN].status == IO_UNSET)
  393. return &conn->plan[IO_IN];
  394. else
  395. return &conn->plan[IO_OUT];
  396. }
  397. struct io_plan *io_set_plan(struct io_conn *conn, enum io_direction dir,
  398. int (*io)(int fd, struct io_plan_arg *arg),
  399. struct io_plan *(*next)(struct io_conn *, void *),
  400. void *next_arg)
  401. {
  402. struct io_plan *plan = &conn->plan[dir];
  403. plan->io = io;
  404. plan->next = next;
  405. plan->next_arg = next_arg;
  406. assert(plan->status == IO_CLOSING || next != NULL);
  407. if (!conn->debug)
  408. return plan;
  409. if (io_loop_return) {
  410. io_debug_complete(conn);
  411. return plan;
  412. }
  413. switch (plan->status) {
  414. case IO_POLLING:
  415. while (do_plan(conn, plan) == 0);
  416. break;
  417. /* Shouldn't happen, since you said you did plan! */
  418. case IO_UNSET:
  419. abort();
  420. case IO_ALWAYS:
  421. /* If other one is ALWAYS, leave in list! */
  422. if (conn->plan[!dir].status != IO_ALWAYS)
  423. remove_from_always(conn);
  424. next_plan(conn, plan);
  425. break;
  426. case IO_WAITING:
  427. case IO_CLOSING:
  428. io_debug_complete(conn);
  429. }
  430. return plan;
  431. }
  432. void io_set_debug(struct io_conn *conn, bool debug)
  433. {
  434. conn->debug = debug;
  435. /* Debugging means fds must block. */
  436. set_blocking(io_conn_fd(conn), debug);
  437. }
  438. void io_debug_complete(struct io_conn *conn)
  439. {
  440. }