antithread.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481
  1. #include <stdlib.h>
  2. #include <unistd.h>
  3. #include <fcntl.h>
  4. #include <stdbool.h>
  5. #include <string.h>
  6. #include <sys/mman.h>
  7. #include <sys/types.h>
  8. #include <sys/wait.h>
  9. #include <errno.h>
  10. #include <err.h>
  11. #include "antithread.h"
  12. #include <ccan/noerr/noerr.h>
  13. #include <ccan/talloc/talloc.h>
  14. #include <ccan/alloc/alloc.h>
  15. /* FIXME: Valgrind support should be possible for some cases. Tricky
  16. * case is where another process allocates for you, but at worst we
  17. * could reset what is valid and what isn't on every entry into the
  18. * library or something. */
  19. struct at_pool
  20. {
  21. const void *ctx;
  22. void *pool;
  23. unsigned long poolsize;
  24. int fd;
  25. int parent_rfd, parent_wfd;
  26. };
  27. struct athread
  28. {
  29. pid_t pid;
  30. int rfd, wfd;
  31. };
  32. /* FIXME: Better locking through futexes. */
  33. static void lock(int fd, unsigned long off)
  34. {
  35. struct flock fl;
  36. fl.l_type = F_WRLCK;
  37. fl.l_whence = SEEK_SET;
  38. fl.l_start = off;
  39. fl.l_len = 1;
  40. while (fcntl(fd, F_SETLKW, &fl) < 0) {
  41. if (errno != EINTR)
  42. err(1, "Failure locking antithread file");
  43. }
  44. }
  45. static void unlock(int fd, unsigned long off)
  46. {
  47. struct flock fl;
  48. int serrno = errno;
  49. fl.l_type = F_UNLCK;
  50. fl.l_whence = SEEK_SET;
  51. fl.l_start = off;
  52. fl.l_len = 1;
  53. fcntl(fd, F_SETLK, &fl);
  54. errno = serrno;
  55. }
  56. static void *at_realloc(const void *parent, void *ptr, size_t size)
  57. {
  58. struct at_pool *p = talloc_find_parent_bytype(parent, struct at_pool);
  59. /* FIXME: realloc in ccan/alloc? */
  60. void *new;
  61. lock(p->fd, 0);
  62. if (size == 0) {
  63. alloc_free(p->pool, p->poolsize, ptr);
  64. new = NULL;
  65. } else if (ptr == NULL) {
  66. /* FIXME: Alignment */
  67. new = alloc_get(p->pool, p->poolsize, size, 16);
  68. } else {
  69. if (size <= alloc_size(p->pool, p->poolsize, ptr))
  70. new = ptr;
  71. else {
  72. new = alloc_get(p->pool, p->poolsize, size, 16);
  73. if (new) {
  74. memcpy(new, ptr,
  75. alloc_size(p->pool, p->poolsize, ptr));
  76. alloc_free(p->pool, p->poolsize, ptr);
  77. }
  78. }
  79. }
  80. unlock(p->fd, 0);
  81. return new;
  82. }
  83. /* We add 16MB to size. This compensates for address randomization. */
  84. #define PADDING (16 * 1024 * 1024)
  85. /* Create a new sharable pool. */
  86. struct at_pool *at_pool(unsigned long size)
  87. {
  88. int fd;
  89. struct at_pool *p;
  90. FILE *f;
  91. /* FIXME: How much should we actually add for overhead?. */
  92. size += 32 * getpagesize();
  93. /* Round up to whole pages. */
  94. size = (size + getpagesize()-1) & ~(getpagesize()-1);
  95. f = tmpfile();
  96. if (!f)
  97. return NULL;
  98. fd = dup(fileno(f));
  99. fclose_noerr(f);
  100. if (fd < 0)
  101. return NULL;
  102. if (ftruncate(fd, size + PADDING) != 0)
  103. goto fail_close;
  104. p = talloc(NULL, struct at_pool);
  105. if (!p)
  106. goto fail_close;
  107. /* First map gets a nice big area. */
  108. p->pool = mmap(NULL, size+PADDING, PROT_READ|PROT_WRITE, MAP_SHARED, fd,
  109. 0);
  110. if (p->pool == MAP_FAILED)
  111. goto fail_free;
  112. /* Then we remap into the middle of it. */
  113. munmap(p->pool, size+PADDING);
  114. p->pool = mmap(p->pool + PADDING/2, size, PROT_READ|PROT_WRITE,
  115. MAP_SHARED, fd, 0);
  116. if (p->pool == MAP_FAILED)
  117. goto fail_free;
  118. /* FIXME: Destructor? */
  119. p->fd = fd;
  120. p->poolsize = size;
  121. p->parent_rfd = p->parent_wfd = -1;
  122. alloc_init(p->pool, p->poolsize);
  123. p->ctx = talloc_add_external(p, at_realloc);
  124. if (!p->ctx)
  125. goto fail_unmap;
  126. return p;
  127. fail_unmap:
  128. munmap(p->pool, size);
  129. fail_free:
  130. talloc_free(p);
  131. fail_close:
  132. close_noerr(fd);
  133. return NULL;
  134. }
  135. /* Talloc off this to allocate from within the pool. */
  136. const void *at_pool_ctx(struct at_pool *atp)
  137. {
  138. return atp->ctx;
  139. }
  140. static int cant_destroy_self(struct athread *at)
  141. {
  142. /* Perhaps this means we want to detach, but it doesn't really
  143. * make sense. */
  144. abort();
  145. return 0;
  146. }
  147. static int destroy_at(struct athread *at)
  148. {
  149. /* If it is already a zombie, this is harmless. */
  150. kill(at->pid, SIGTERM);
  151. close(at->rfd);
  152. close(at->wfd);
  153. /* FIXME: Should we do SIGKILL if process doesn't exit soon? */
  154. if (waitpid(at->pid, NULL, 0) != at->pid)
  155. err(1, "Waiting for athread %p (pid %u)", at, at->pid);
  156. return 0;
  157. }
  158. /* Sets up thread and forks it. NULL on error. */
  159. static struct athread *fork_thread(struct at_pool *pool)
  160. {
  161. int p2c[2], c2p[2];
  162. struct athread *at;
  163. /* You can't already be a child of this pool. */
  164. if (pool->parent_rfd != -1)
  165. errx(1, "Can't create antithread on this pool: we're one");
  166. /* We don't want this allocated *in* the pool. */
  167. at = talloc_steal(pool, talloc(NULL, struct athread));
  168. if (pipe(p2c) != 0)
  169. goto free;
  170. if (pipe(c2p) != 0)
  171. goto close_p2c;
  172. at->pid = fork();
  173. if (at->pid == -1)
  174. goto close_c2p;
  175. if (at->pid == 0) {
  176. /* Child */
  177. close(c2p[0]);
  178. close(p2c[1]);
  179. pool->parent_rfd = p2c[0];
  180. pool->parent_wfd = c2p[1];
  181. talloc_set_destructor(at, cant_destroy_self);
  182. } else {
  183. /* Parent */
  184. close(c2p[1]);
  185. close(p2c[0]);
  186. at->rfd = c2p[0];
  187. at->wfd = p2c[1];
  188. talloc_set_destructor(at, destroy_at);
  189. }
  190. return at;
  191. close_c2p:
  192. close_noerr(c2p[0]);
  193. close_noerr(c2p[1]);
  194. close_p2c:
  195. close_noerr(p2c[0]);
  196. close_noerr(p2c[1]);
  197. free:
  198. talloc_free(at);
  199. return NULL;
  200. }
  201. /* Creating an antithread via fork() */
  202. struct athread *_at_run(struct at_pool *pool,
  203. void *(*fn)(struct at_pool *, void *),
  204. void *obj)
  205. {
  206. struct athread *at;
  207. at = fork_thread(pool);
  208. if (!at)
  209. return NULL;
  210. if (at->pid == 0) {
  211. /* Child */
  212. at_tell_parent(pool, fn(pool, obj));
  213. exit(0);
  214. }
  215. /* Parent */
  216. return at;
  217. }
  218. static unsigned int num_args(char *const argv[])
  219. {
  220. unsigned int i;
  221. for (i = 0; argv[i]; i++);
  222. return i;
  223. }
  224. /* Fork and execvp, with added arguments for child to grab. */
  225. struct athread *at_spawn(struct at_pool *pool, void *arg, char *cmdline[])
  226. {
  227. struct athread *at;
  228. int err;
  229. at = fork_thread(pool);
  230. if (!at)
  231. return NULL;
  232. if (at->pid == 0) {
  233. /* child */
  234. char *argv[num_args(cmdline) + 2];
  235. argv[0] = cmdline[0];
  236. argv[1] = talloc_asprintf(NULL, "AT:%p/%lu/%i/%i/%i/%p",
  237. pool->pool, pool->poolsize,
  238. pool->fd, pool->parent_rfd,
  239. pool->parent_wfd, arg);
  240. /* Copy including NULL terminator. */
  241. memcpy(&argv[2], &cmdline[1], num_args(cmdline)*sizeof(char *));
  242. execvp(argv[0], argv);
  243. err = errno;
  244. write(pool->parent_wfd, &err, sizeof(err));
  245. exit(1);
  246. }
  247. /* Child should always write an error code (or 0). */
  248. if (read(at->rfd, &err, sizeof(err)) != sizeof(err)) {
  249. errno = ECHILD;
  250. talloc_free(at);
  251. return NULL;
  252. }
  253. if (err != 0) {
  254. errno = err;
  255. talloc_free(at);
  256. return NULL;
  257. }
  258. return at;
  259. }
  260. /* The fd to poll on */
  261. int at_fd(struct athread *at)
  262. {
  263. return at->rfd;
  264. }
  265. /* What's the antithread saying? Blocks if fd not ready. */
  266. void *at_read(struct athread *at)
  267. {
  268. void *ret;
  269. switch (read(at->rfd, &ret, sizeof(ret))) {
  270. case -1:
  271. err(1, "Reading from athread %p (pid %u)", at, at->pid);
  272. case 0:
  273. /* Thread died. */
  274. return NULL;
  275. case sizeof(ret):
  276. return ret;
  277. default:
  278. /* Should never happen. */
  279. err(1, "Short read from athread %p (pid %u)", at, at->pid);
  280. }
  281. }
  282. /* Say something to a child. */
  283. void at_tell(struct athread *at, const void *status)
  284. {
  285. if (write(at->wfd, &status, sizeof(status)) != sizeof(status))
  286. err(1, "Failure writing to athread %p (pid %u)", at, at->pid);
  287. }
  288. /* For child to grab arguments from command line (removes them) */
  289. struct at_pool *at_get_pool(int *argc, char *argv[], void **arg)
  290. {
  291. struct at_pool *p = talloc(NULL, struct at_pool);
  292. void *map;
  293. int err;
  294. if (!argv[1]) {
  295. errno = EINVAL;
  296. goto fail;
  297. }
  298. /* If they don't care, use dummy value. */
  299. if (arg == NULL)
  300. arg = &map;
  301. if (sscanf(argv[1], "AT:%p/%lu/%i/%i/%i/%p",
  302. &p->pool, &p->poolsize, &p->fd,
  303. &p->parent_rfd, &p->parent_wfd, arg) != 6) {
  304. errno = EINVAL;
  305. goto fail;
  306. }
  307. /* FIXME: To try to adjust for address space randomization, we
  308. * could re-exec a few times. */
  309. map = mmap(p->pool, p->poolsize, PROT_READ|PROT_WRITE, MAP_SHARED,
  310. p->fd, 0);
  311. if (map != p->pool) {
  312. fprintf(stderr, "Mapping %lu bytes @%p gave %p\n",
  313. p->poolsize, p->pool, map);
  314. errno = ENOMEM;
  315. goto fail;
  316. }
  317. p->ctx = talloc_add_external(p, at_realloc);
  318. if (!p->ctx)
  319. goto fail;
  320. /* Tell parent we're good. */
  321. err = 0;
  322. if (write(p->parent_wfd, &err, sizeof(err)) != sizeof(err)) {
  323. errno = EBADF;
  324. goto fail;
  325. }
  326. /* Delete AT arg. */
  327. memmove(&argv[1], &argv[2], --(*argc));
  328. return p;
  329. fail:
  330. /* FIXME: cleanup properly. */
  331. talloc_free(p);
  332. return NULL;
  333. }
  334. /* Say something to our parent (async). */
  335. void at_tell_parent(struct at_pool *pool, const void *status)
  336. {
  337. if (pool->parent_wfd == -1)
  338. errx(1, "This process is not an antithread of this pool");
  339. if (write(pool->parent_wfd, &status, sizeof(status)) != sizeof(status))
  340. err(1, "Failure writing to parent");
  341. }
  342. /* What's the parent saying? Blocks if fd not ready. */
  343. void *at_read_parent(struct at_pool *pool)
  344. {
  345. void *ret;
  346. if (pool->parent_rfd == -1)
  347. errx(1, "This process is not an antithread of this pool");
  348. switch (read(pool->parent_rfd, &ret, sizeof(ret))) {
  349. case -1:
  350. err(1, "Reading from parent");
  351. case 0:
  352. /* Parent died. */
  353. return NULL;
  354. case sizeof(ret):
  355. return ret;
  356. default:
  357. /* Should never happen. */
  358. err(1, "Short read from parent");
  359. }
  360. }
  361. /* The fd to poll on */
  362. int at_parent_fd(struct at_pool *pool)
  363. {
  364. if (pool->parent_rfd == -1)
  365. errx(1, "This process is not an antithread of this pool");
  366. return pool->parent_rfd;
  367. }
  368. /* FIXME: Futexme. */
  369. void at_lock(void *obj)
  370. {
  371. struct at_pool *p = talloc_find_parent_bytype(obj, struct at_pool);
  372. #if 0
  373. unsigned int *l;
  374. /* This isn't required yet, but ensures it's a talloc ptr */
  375. l = talloc_lock_ptr(obj);
  376. #endif
  377. lock(p->fd, (char *)obj - (char *)p->pool);
  378. #if 0
  379. if (*l)
  380. errx(1, "Object %p was already locked (something died?)", obj);
  381. *l = 1;
  382. #endif
  383. }
  384. void at_unlock(void *obj)
  385. {
  386. struct at_pool *p = talloc_find_parent_bytype(obj, struct at_pool);
  387. #if 0
  388. unsigned int *l;
  389. l = talloc_lock_ptr(obj);
  390. if (!*l)
  391. errx(1, "Object %p was already unlocked", obj);
  392. *l = 0;
  393. #endif
  394. unlock(p->fd, (char *)obj - (char *)p->pool);
  395. }
  396. void at_lock_all(struct at_pool *p)
  397. {
  398. lock(p->fd, 0);
  399. }
  400. void at_unlock_all(struct at_pool *p)
  401. {
  402. unlock(p->fd, 0);
  403. }