antithread.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552
  1. /* Licensed under GPLv3+ - see LICENSE file for details */
  2. #include <stdlib.h>
  3. #include <unistd.h>
  4. #include <fcntl.h>
  5. #include <stdbool.h>
  6. #include <string.h>
  7. #include <sys/mman.h>
  8. #include <sys/types.h>
  9. #include <sys/wait.h>
  10. #include <signal.h>
  11. #include <errno.h>
  12. #include <assert.h>
  13. #include "antithread.h"
  14. #include <ccan/err/err.h>
  15. #include <ccan/noerr/noerr.h>
  16. #include <ccan/talloc/talloc.h>
  17. #include <ccan/read_write_all/read_write_all.h>
  18. #include <ccan/antithread/alloc/alloc.h>
  19. #include <ccan/list/list.h>
  20. /* FIXME: Valgrind support should be possible for some cases. Tricky
  21. * case is where another process allocates for you, but at worst we
  22. * could reset what is valid and what isn't on every entry into the
  23. * library or something. */
  24. static LIST_HEAD(pools);
  25. /* Talloc destroys parents before children (damn Tridge's failing destructors!)
  26. * so we need the first child (ie. last-destroyed) to actually clean up. */
  27. struct at_pool_contents {
  28. struct list_node list;
  29. void *pool;
  30. unsigned long poolsize;
  31. int fd;
  32. int parent_rfd, parent_wfd;
  33. struct at_pool *atp;
  34. };
  35. struct at_pool {
  36. struct at_pool_contents *p;
  37. const void *ctx;
  38. };
  39. struct athread {
  40. pid_t pid;
  41. int rfd, wfd;
  42. };
  43. /* FIXME: Better locking through futexes. */
  44. static void lock(int fd, unsigned long off)
  45. {
  46. struct flock fl;
  47. fl.l_type = F_WRLCK;
  48. fl.l_whence = SEEK_SET;
  49. fl.l_start = off;
  50. fl.l_len = 1;
  51. while (fcntl(fd, F_SETLKW, &fl) < 0) {
  52. if (errno != EINTR)
  53. err(1, "Failure locking antithread file");
  54. }
  55. }
  56. static void unlock(int fd, unsigned long off)
  57. {
  58. struct flock fl;
  59. int serrno = errno;
  60. fl.l_type = F_UNLCK;
  61. fl.l_whence = SEEK_SET;
  62. fl.l_start = off;
  63. fl.l_len = 1;
  64. fcntl(fd, F_SETLK, &fl);
  65. errno = serrno;
  66. }
  67. /* This pointer is in a pool. Find which one. */
  68. static struct at_pool_contents *find_pool(const void *ptr)
  69. {
  70. struct at_pool_contents *p;
  71. list_for_each(&pools, p, list) {
  72. /* Special case for initial allocation: ptr *is* pool */
  73. if (ptr == p->atp)
  74. return p;
  75. if ((char *)ptr >= (char *)p->pool
  76. && (char *)ptr < (char *)p->pool + p->poolsize)
  77. return p;
  78. }
  79. abort();
  80. }
  81. static int destroy_pool(struct at_pool_contents *p)
  82. {
  83. list_del(&p->list);
  84. munmap(p->pool, p->poolsize);
  85. close(p->fd);
  86. close(p->parent_rfd);
  87. close(p->parent_wfd);
  88. return 0;
  89. }
  90. static void *at_realloc(const void *parent, void *ptr, size_t size)
  91. {
  92. struct at_pool_contents *p = find_pool(parent);
  93. /* FIXME: realloc in ccan/alloc? */
  94. void *new;
  95. if (size == 0) {
  96. alloc_free(p->pool, p->poolsize, ptr);
  97. new = NULL;
  98. } else if (ptr == NULL) {
  99. /* FIXME: Alignment */
  100. new = alloc_get(p->pool, p->poolsize, size, 16);
  101. } else {
  102. if (size <= alloc_size(p->pool, p->poolsize, ptr))
  103. new = ptr;
  104. else {
  105. new = alloc_get(p->pool, p->poolsize, size, 16);
  106. if (new) {
  107. memcpy(new, ptr,
  108. alloc_size(p->pool, p->poolsize, ptr));
  109. alloc_free(p->pool, p->poolsize, ptr);
  110. }
  111. }
  112. }
  113. return new;
  114. }
  115. static struct at_pool_contents *locked;
  116. static void talloc_lock(const void *ptr)
  117. {
  118. struct at_pool_contents *p = find_pool(ptr);
  119. lock(p->fd, 0);
  120. assert(!locked);
  121. locked = p;
  122. }
  123. static void talloc_unlock(void)
  124. {
  125. struct at_pool_contents *p = locked;
  126. locked = NULL;
  127. unlock(p->fd, 0);
  128. }
  129. /* We add 16MB to size. This compensates for address randomization. */
  130. #define PADDING (16 * 1024 * 1024)
  131. /* Create a new sharable pool. */
  132. struct at_pool *at_pool(unsigned long size)
  133. {
  134. int fd;
  135. struct at_pool *atp;
  136. struct at_pool_contents *p;
  137. FILE *f;
  138. /* FIXME: How much should we actually add for overhead?. */
  139. size += 32 * getpagesize();
  140. /* Round up to whole pages. */
  141. size = (size + getpagesize()-1) & ~(getpagesize()-1);
  142. f = tmpfile();
  143. if (!f)
  144. return NULL;
  145. fd = dup(fileno(f));
  146. fclose_noerr(f);
  147. if (fd < 0)
  148. return NULL;
  149. if (ftruncate(fd, size + PADDING) != 0)
  150. goto fail_close;
  151. atp = talloc(NULL, struct at_pool);
  152. if (!atp)
  153. goto fail_close;
  154. atp->p = p = talloc(NULL, struct at_pool_contents);
  155. if (!p)
  156. goto fail_free;
  157. /* First map gets a nice big area. */
  158. p->pool = mmap(NULL, size+PADDING, PROT_READ|PROT_WRITE, MAP_SHARED, fd,
  159. 0);
  160. if (p->pool == MAP_FAILED)
  161. goto fail_free;
  162. /* Then we remap into the middle of it. */
  163. munmap(p->pool, size+PADDING);
  164. p->pool = mmap((char *)p->pool + PADDING/2, size, PROT_READ|PROT_WRITE,
  165. MAP_SHARED, fd, 0);
  166. if (p->pool == MAP_FAILED)
  167. goto fail_free;
  168. p->fd = fd;
  169. p->poolsize = size;
  170. p->parent_rfd = p->parent_wfd = -1;
  171. p->atp = atp;
  172. alloc_init(p->pool, p->poolsize);
  173. list_add(&pools, &p->list);
  174. talloc_set_destructor(p, destroy_pool);
  175. atp->ctx = talloc_add_external(atp,
  176. at_realloc, talloc_lock, talloc_unlock);
  177. if (!atp->ctx)
  178. goto fail_free;
  179. return atp;
  180. fail_free:
  181. talloc_free(atp);
  182. fail_close:
  183. close_noerr(fd);
  184. return NULL;
  185. }
  186. /* Talloc off this to allocate from within the pool. */
  187. const void *at_pool_ctx(struct at_pool *atp)
  188. {
  189. return atp->ctx;
  190. }
  191. static int cant_destroy_self(struct athread *at)
  192. {
  193. /* Perhaps this means we want to detach, but it doesn't really
  194. * make sense. */
  195. abort();
  196. return 0;
  197. }
  198. static int destroy_at(struct athread *at)
  199. {
  200. /* If it is already a zombie, this is harmless. */
  201. kill(at->pid, SIGTERM);
  202. close(at->rfd);
  203. close(at->wfd);
  204. /* FIXME: Should we do SIGKILL if process doesn't exit soon? */
  205. if (waitpid(at->pid, NULL, 0) != at->pid)
  206. err(1, "Waiting for athread %p (pid %u)", at, at->pid);
  207. return 0;
  208. }
  209. /* Sets up thread and forks it. NULL on error. */
  210. static struct athread *fork_thread(struct at_pool *atp)
  211. {
  212. int p2c[2], c2p[2];
  213. struct athread *at;
  214. struct at_pool_contents *pool = atp->p;
  215. /* You can't already be a child of this pool. */
  216. if (pool->parent_rfd != -1)
  217. errx(1, "Can't create antithread on this pool: we're one");
  218. /* We don't want this allocated *in* the pool. */
  219. at = talloc_steal(atp, talloc(NULL, struct athread));
  220. if (pipe(p2c) != 0)
  221. goto free;
  222. if (pipe(c2p) != 0)
  223. goto close_p2c;
  224. at->pid = fork();
  225. if (at->pid == -1)
  226. goto close_c2p;
  227. if (at->pid == 0) {
  228. /* Child */
  229. close(c2p[0]);
  230. close(p2c[1]);
  231. pool->parent_rfd = p2c[0];
  232. pool->parent_wfd = c2p[1];
  233. talloc_set_destructor(at, cant_destroy_self);
  234. } else {
  235. /* Parent */
  236. close(c2p[1]);
  237. close(p2c[0]);
  238. at->rfd = c2p[0];
  239. at->wfd = p2c[1];
  240. talloc_set_destructor(at, destroy_at);
  241. }
  242. return at;
  243. close_c2p:
  244. close_noerr(c2p[0]);
  245. close_noerr(c2p[1]);
  246. close_p2c:
  247. close_noerr(p2c[0]);
  248. close_noerr(p2c[1]);
  249. free:
  250. talloc_free(at);
  251. return NULL;
  252. }
  253. /* Creating an antithread via fork() */
  254. struct athread *_at_run(struct at_pool *atp,
  255. void *(*fn)(struct at_pool *, void *),
  256. void *obj)
  257. {
  258. struct athread *at;
  259. at = fork_thread(atp);
  260. if (!at)
  261. return NULL;
  262. if (at->pid == 0) {
  263. /* Child */
  264. at_tell_parent(atp, fn(atp, obj));
  265. exit(0);
  266. }
  267. /* Parent */
  268. return at;
  269. }
  270. static unsigned int num_args(char *const argv[])
  271. {
  272. unsigned int i;
  273. for (i = 0; argv[i]; i++);
  274. return i;
  275. }
  276. /* Fork and execvp, with added arguments for child to grab. */
  277. struct athread *at_spawn(struct at_pool *atp, void *arg, char *cmdline[])
  278. {
  279. struct athread *at;
  280. int err;
  281. at = fork_thread(atp);
  282. if (!at)
  283. return NULL;
  284. if (at->pid == 0) {
  285. /* child */
  286. char *argv[num_args(cmdline) + 2];
  287. argv[0] = cmdline[0];
  288. argv[1] = talloc_asprintf(NULL, "AT:%p/%lu/%i/%i/%i/%p",
  289. atp->p->pool, atp->p->poolsize,
  290. atp->p->fd, atp->p->parent_rfd,
  291. atp->p->parent_wfd, arg);
  292. /* Copy including NULL terminator. */
  293. memcpy(&argv[2], &cmdline[1], num_args(cmdline)*sizeof(char *));
  294. execvp(argv[0], argv);
  295. err = errno;
  296. write_all(atp->p->parent_wfd, &err, sizeof(err));
  297. exit(1);
  298. }
  299. /* Child should always write an error code (or 0). */
  300. if (read(at->rfd, &err, sizeof(err)) != sizeof(err)) {
  301. errno = ECHILD;
  302. talloc_free(at);
  303. return NULL;
  304. }
  305. if (err != 0) {
  306. errno = err;
  307. talloc_free(at);
  308. return NULL;
  309. }
  310. return at;
  311. }
  312. /* The fd to poll on */
  313. int at_fd(struct athread *at)
  314. {
  315. return at->rfd;
  316. }
  317. /* What's the antithread saying? Blocks if fd not ready. */
  318. void *at_read(struct athread *at)
  319. {
  320. void *ret;
  321. switch (read(at->rfd, &ret, sizeof(ret))) {
  322. case -1:
  323. err(1, "Reading from athread %p (pid %u)", at, at->pid);
  324. case 0:
  325. /* Thread died. */
  326. return NULL;
  327. case sizeof(ret):
  328. return ret;
  329. default:
  330. /* Should never happen. */
  331. err(1, "Short read from athread %p (pid %u)", at, at->pid);
  332. }
  333. }
  334. /* Say something to a child. */
  335. void at_tell(struct athread *at, const void *status)
  336. {
  337. if (write(at->wfd, &status, sizeof(status)) != sizeof(status))
  338. err(1, "Failure writing to athread %p (pid %u)", at, at->pid);
  339. }
  340. /* For child to grab arguments from command line (removes them) */
  341. struct at_pool *at_get_pool(int *argc, char *argv[], void **arg)
  342. {
  343. struct at_pool *atp = talloc(NULL, struct at_pool);
  344. struct at_pool_contents *p;
  345. void *map;
  346. int err;
  347. if (!argv[1]) {
  348. errno = EINVAL;
  349. goto fail;
  350. }
  351. /* If they don't care, use dummy value. */
  352. if (arg == NULL)
  353. arg = &map;
  354. p = atp->p = talloc(atp, struct at_pool_contents);
  355. if (sscanf(argv[1], "AT:%p/%lu/%i/%i/%i/%p",
  356. &p->pool, &p->poolsize, &p->fd,
  357. &p->parent_rfd, &p->parent_wfd, arg) != 6) {
  358. errno = EINVAL;
  359. goto fail;
  360. }
  361. /* FIXME: To try to adjust for address space randomization, we
  362. * could re-exec a few times. */
  363. map = mmap(p->pool, p->poolsize, PROT_READ|PROT_WRITE, MAP_SHARED,
  364. p->fd, 0);
  365. if (map != p->pool) {
  366. fprintf(stderr, "Mapping %lu bytes @%p gave %p\n",
  367. p->poolsize, p->pool, map);
  368. errno = ENOMEM;
  369. goto fail;
  370. }
  371. list_add(&pools, &p->list);
  372. talloc_set_destructor(p, destroy_pool);
  373. p->atp = atp;
  374. atp->ctx = talloc_add_external(atp,
  375. at_realloc, talloc_lock, talloc_unlock);
  376. if (!atp->ctx)
  377. goto fail;
  378. /* Tell parent we're good. */
  379. err = 0;
  380. if (write(p->parent_wfd, &err, sizeof(err)) != sizeof(err)) {
  381. errno = EBADF;
  382. goto fail;
  383. }
  384. /* Delete AT arg. */
  385. memmove(&argv[1], &argv[2], --(*argc));
  386. return atp;
  387. fail:
  388. talloc_free(atp);
  389. return NULL;
  390. }
  391. /* Say something to our parent (async). */
  392. void at_tell_parent(struct at_pool *atp, const void *status)
  393. {
  394. if (atp->p->parent_wfd == -1)
  395. errx(1, "This process is not an antithread of this pool");
  396. if (write(atp->p->parent_wfd, &status, sizeof(status))!=sizeof(status))
  397. err(1, "Failure writing to parent");
  398. }
  399. /* What's the parent saying? Blocks if fd not ready. */
  400. void *at_read_parent(struct at_pool *atp)
  401. {
  402. void *ret;
  403. if (atp->p->parent_rfd == -1)
  404. errx(1, "This process is not an antithread of this pool");
  405. switch (read(atp->p->parent_rfd, &ret, sizeof(ret))) {
  406. case -1:
  407. err(1, "Reading from parent");
  408. case 0:
  409. /* Parent died. */
  410. return NULL;
  411. case sizeof(ret):
  412. return ret;
  413. default:
  414. /* Should never happen. */
  415. err(1, "Short read from parent");
  416. }
  417. }
  418. /* The fd to poll on */
  419. int at_parent_fd(struct at_pool *atp)
  420. {
  421. if (atp->p->parent_rfd == -1)
  422. errx(1, "This process is not an antithread of this pool");
  423. return atp->p->parent_rfd;
  424. }
  425. /* FIXME: Futexme. */
  426. void at_lock(void *obj)
  427. {
  428. struct at_pool *atp = talloc_find_parent_bytype(obj, struct at_pool);
  429. #if 0
  430. unsigned int *l;
  431. /* This isn't required yet, but ensures it's a talloc ptr */
  432. l = talloc_lock_ptr(obj);
  433. #endif
  434. lock(atp->p->fd, (char *)obj - (char *)atp->p->pool);
  435. #if 0
  436. if (*l)
  437. errx(1, "Object %p was already locked (something died?)", obj);
  438. *l = 1;
  439. #endif
  440. }
  441. void at_unlock(void *obj)
  442. {
  443. struct at_pool *atp = talloc_find_parent_bytype(obj, struct at_pool);
  444. #if 0
  445. unsigned int *l;
  446. l = talloc_lock_ptr(obj);
  447. if (!*l)
  448. errx(1, "Object %p was already unlocked", obj);
  449. *l = 0;
  450. #endif
  451. unlock(atp->p->fd, (char *)obj - (char *)atp->p->pool);
  452. }
  453. void at_lock_all(struct at_pool *atp)
  454. {
  455. lock(atp->p->fd, 0);
  456. }
  457. void at_unlock_all(struct at_pool *atp)
  458. {
  459. unlock(atp->p->fd, 0);
  460. }