poll_windows.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726
  1. /*
  2. * poll_windows: poll compatibility wrapper for Windows
  3. * Copyright © 2012-2013 RealVNC Ltd.
  4. * Copyright © 2009-2010 Pete Batard <pete@akeo.ie>
  5. * With contributions from Michael Plante, Orin Eman et al.
  6. * Parts of poll implementation from libusb-win32, by Stephan Meyer et al.
  7. *
  8. * This library is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * This library is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with this library; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. *
  22. */
  23. /*
  24. * poll() and pipe() Windows compatibility layer for libusbx 1.0
  25. *
  26. * The way this layer works is by using OVERLAPPED with async I/O transfers, as
  27. * OVERLAPPED have an associated event which is flagged for I/O completion.
  28. *
  29. * For USB pollable async I/O, you would typically:
  30. * - obtain a Windows HANDLE to a file or device that has been opened in
  31. * OVERLAPPED mode
  32. * - call usbi_create_fd with this handle to obtain a custom fd.
  33. * Note that if you need simultaneous R/W access, you need to call create_fd
  34. * twice, once in RW_READ and once in RW_WRITE mode to obtain 2 separate
  35. * pollable fds
  36. * - leave the core functions call the poll routine and flag POLLIN/POLLOUT
  37. *
  38. * The pipe pollable synchronous I/O works using the overlapped event associated
  39. * with a fake pipe. The read/write functions are only meant to be used in that
  40. * context.
  41. */
  42. #include <errno.h>
  43. #include <stdio.h>
  44. #include <stdlib.h>
  45. #include "libusbi.h"
  46. // Uncomment to debug the polling layer
  47. //#define DEBUG_POLL_WINDOWS
  48. #if defined(DEBUG_POLL_WINDOWS)
  49. #define poll_dbg usbi_dbg
  50. #else
  51. // MSVC++ < 2005 cannot use a variadic argument and non MSVC
  52. // compilers produce warnings if parenthesis are ommitted.
  53. #if defined(_MSC_VER) && (_MSC_VER < 1400)
  54. #define poll_dbg
  55. #else
  56. #define poll_dbg(...)
  57. #endif
  58. #endif
  59. #if defined(_PREFAST_)
  60. #pragma warning(disable:28719)
  61. #endif
  62. #define CHECK_INIT_POLLING do {if(!is_polling_set) init_polling();} while(0)
  63. // public fd data
  64. const struct winfd INVALID_WINFD = {-1, INVALID_HANDLE_VALUE, NULL, NULL, NULL, RW_NONE};
  65. struct winfd poll_fd[MAX_FDS];
  66. // internal fd data
  67. struct {
  68. CRITICAL_SECTION mutex; // lock for fds
  69. // Additional variables for XP CancelIoEx partial emulation
  70. HANDLE original_handle;
  71. DWORD thread_id;
  72. } _poll_fd[MAX_FDS];
  73. // globals
  74. BOOLEAN is_polling_set = FALSE;
  75. LONG pipe_number = 0;
  76. static volatile LONG compat_spinlock = 0;
  77. #if !defined(_WIN32_WCE)
  78. // CancelIoEx, available on Vista and later only, provides the ability to cancel
  79. // a single transfer (OVERLAPPED) when used. As it may not be part of any of the
  80. // platform headers, we hook into the Kernel32 system DLL directly to seek it.
  81. static BOOL (__stdcall *pCancelIoEx)(HANDLE, LPOVERLAPPED) = NULL;
  82. #define Use_Duplicate_Handles (pCancelIoEx == NULL)
  83. static inline void setup_cancel_io(void)
  84. {
  85. HMODULE hKernel32 = GetModuleHandleA("KERNEL32");
  86. if (hKernel32 != NULL) {
  87. pCancelIoEx = (BOOL (__stdcall *)(HANDLE,LPOVERLAPPED))
  88. GetProcAddress(hKernel32, "CancelIoEx");
  89. }
  90. usbi_dbg("Will use CancelIo%s for I/O cancellation",
  91. Use_Duplicate_Handles?"":"Ex");
  92. }
  93. static inline BOOL cancel_io(int _index)
  94. {
  95. if ((_index < 0) || (_index >= MAX_FDS)) {
  96. return FALSE;
  97. }
  98. if ( (poll_fd[_index].fd < 0) || (poll_fd[_index].handle == INVALID_HANDLE_VALUE)
  99. || (poll_fd[_index].handle == 0) || (poll_fd[_index].overlapped == NULL) ) {
  100. return TRUE;
  101. }
  102. if (poll_fd[_index].itransfer && poll_fd[_index].cancel_fn) {
  103. // Cancel outstanding transfer via the specific callback
  104. (*poll_fd[_index].cancel_fn)(poll_fd[_index].itransfer);
  105. return TRUE;
  106. }
  107. if (pCancelIoEx != NULL) {
  108. return (*pCancelIoEx)(poll_fd[_index].handle, poll_fd[_index].overlapped);
  109. }
  110. if (_poll_fd[_index].thread_id == GetCurrentThreadId()) {
  111. return CancelIo(poll_fd[_index].handle);
  112. }
  113. usbi_warn(NULL, "Unable to cancel I/O that was started from another thread");
  114. return FALSE;
  115. }
  116. #else
  117. #define Use_Duplicate_Handles FALSE
  118. static __inline void setup_cancel_io()
  119. {
  120. // No setup needed on WinCE
  121. }
  122. static __inline BOOL cancel_io(int _index)
  123. {
  124. if ((_index < 0) || (_index >= MAX_FDS)) {
  125. return FALSE;
  126. }
  127. if ( (poll_fd[_index].fd < 0) || (poll_fd[_index].handle == INVALID_HANDLE_VALUE)
  128. || (poll_fd[_index].handle == 0) || (poll_fd[_index].overlapped == NULL) ) {
  129. return TRUE;
  130. }
  131. if (poll_fd[_index].itransfer && poll_fd[_index].cancel_fn) {
  132. // Cancel outstanding transfer via the specific callback
  133. (*poll_fd[_index].cancel_fn)(poll_fd[_index].itransfer);
  134. }
  135. return TRUE;
  136. }
  137. #endif
  138. // Init
  139. void init_polling(void)
  140. {
  141. int i;
  142. while (InterlockedExchange((LONG *)&compat_spinlock, 1) == 1) {
  143. SleepEx(0, TRUE);
  144. }
  145. if (!is_polling_set) {
  146. setup_cancel_io();
  147. for (i=0; i<MAX_FDS; i++) {
  148. poll_fd[i] = INVALID_WINFD;
  149. _poll_fd[i].original_handle = INVALID_HANDLE_VALUE;
  150. _poll_fd[i].thread_id = 0;
  151. InitializeCriticalSection(&_poll_fd[i].mutex);
  152. }
  153. is_polling_set = TRUE;
  154. }
  155. InterlockedExchange((LONG *)&compat_spinlock, 0);
  156. }
  157. // Internal function to retrieve the table index (and lock the fd mutex)
  158. static int _fd_to_index_and_lock(int fd)
  159. {
  160. int i;
  161. if (fd < 0)
  162. return -1;
  163. for (i=0; i<MAX_FDS; i++) {
  164. if (poll_fd[i].fd == fd) {
  165. EnterCriticalSection(&_poll_fd[i].mutex);
  166. // fd might have changed before we got to critical
  167. if (poll_fd[i].fd != fd) {
  168. LeaveCriticalSection(&_poll_fd[i].mutex);
  169. continue;
  170. }
  171. return i;
  172. }
  173. }
  174. return -1;
  175. }
  176. static OVERLAPPED *create_overlapped(void)
  177. {
  178. OVERLAPPED *overlapped = (OVERLAPPED*) calloc(1, sizeof(OVERLAPPED));
  179. if (overlapped == NULL) {
  180. return NULL;
  181. }
  182. overlapped->hEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
  183. if(overlapped->hEvent == NULL) {
  184. free (overlapped);
  185. return NULL;
  186. }
  187. return overlapped;
  188. }
  189. static void free_overlapped(OVERLAPPED *overlapped)
  190. {
  191. if (overlapped == NULL)
  192. return;
  193. if ( (overlapped->hEvent != 0)
  194. && (overlapped->hEvent != INVALID_HANDLE_VALUE) ) {
  195. CloseHandle(overlapped->hEvent);
  196. }
  197. free(overlapped);
  198. }
  199. void exit_polling(void)
  200. {
  201. int i;
  202. while (InterlockedExchange((LONG *)&compat_spinlock, 1) == 1) {
  203. SleepEx(0, TRUE);
  204. }
  205. if (is_polling_set) {
  206. is_polling_set = FALSE;
  207. for (i=0; i<MAX_FDS; i++) {
  208. // Cancel any async I/O (handle can be invalid)
  209. cancel_io(i);
  210. // If anything was pending on that I/O, it should be
  211. // terminating, and we should be able to access the fd
  212. // mutex lock before too long
  213. EnterCriticalSection(&_poll_fd[i].mutex);
  214. free_overlapped(poll_fd[i].overlapped);
  215. if (Use_Duplicate_Handles) {
  216. // Close duplicate handle
  217. if (_poll_fd[i].original_handle != INVALID_HANDLE_VALUE) {
  218. CloseHandle(poll_fd[i].handle);
  219. }
  220. }
  221. poll_fd[i] = INVALID_WINFD;
  222. LeaveCriticalSection(&_poll_fd[i].mutex);
  223. DeleteCriticalSection(&_poll_fd[i].mutex);
  224. }
  225. }
  226. InterlockedExchange((LONG *)&compat_spinlock, 0);
  227. }
  228. /*
  229. * Create a fake pipe.
  230. * As libusbx only uses pipes for signaling, all we need from a pipe is an
  231. * event. To that extent, we create a single wfd and overlapped as a means
  232. * to access that event.
  233. */
  234. int usbi_pipe(int filedes[2])
  235. {
  236. int i;
  237. OVERLAPPED* overlapped;
  238. CHECK_INIT_POLLING;
  239. overlapped = create_overlapped();
  240. if (overlapped == NULL) {
  241. return -1;
  242. }
  243. // The overlapped must have status pending for signaling to work in poll
  244. overlapped->Internal = STATUS_PENDING;
  245. overlapped->InternalHigh = 0;
  246. for (i=0; i<MAX_FDS; i++) {
  247. if (poll_fd[i].fd < 0) {
  248. EnterCriticalSection(&_poll_fd[i].mutex);
  249. // fd might have been allocated before we got to critical
  250. if (poll_fd[i].fd >= 0) {
  251. LeaveCriticalSection(&_poll_fd[i].mutex);
  252. continue;
  253. }
  254. // Use index as the unique fd number
  255. poll_fd[i].fd = i;
  256. // Read end of the "pipe"
  257. filedes[0] = poll_fd[i].fd;
  258. // We can use the same handle for both ends
  259. filedes[1] = filedes[0];
  260. poll_fd[i].handle = DUMMY_HANDLE;
  261. poll_fd[i].overlapped = overlapped;
  262. // There's no polling on the write end, so we just use READ for our needs
  263. poll_fd[i].rw = RW_READ;
  264. _poll_fd[i].original_handle = INVALID_HANDLE_VALUE;
  265. LeaveCriticalSection(&_poll_fd[i].mutex);
  266. return 0;
  267. }
  268. }
  269. free_overlapped(overlapped);
  270. return -1;
  271. }
  272. /*
  273. * Create both an fd and an OVERLAPPED from an open Windows handle, so that
  274. * it can be used with our polling function
  275. * The handle MUST support overlapped transfers (usually requires CreateFile
  276. * with FILE_FLAG_OVERLAPPED)
  277. * Return a pollable file descriptor struct, or INVALID_WINFD on error
  278. *
  279. * Note that the fd returned by this function is a per-transfer fd, rather
  280. * than a per-session fd and cannot be used for anything else but our
  281. * custom functions (the fd itself points to the NUL: device)
  282. * if you plan to do R/W on the same handle, you MUST create 2 fds: one for
  283. * read and one for write. Using a single R/W fd is unsupported and will
  284. * produce unexpected results
  285. */
  286. struct winfd usbi_create_fd(HANDLE handle, int access_mode, struct usbi_transfer *itransfer, cancel_transfer *cancel_fn)
  287. {
  288. int i;
  289. struct winfd wfd = INVALID_WINFD;
  290. OVERLAPPED* overlapped = NULL;
  291. CHECK_INIT_POLLING;
  292. if ((handle == 0) || (handle == INVALID_HANDLE_VALUE)) {
  293. return INVALID_WINFD;
  294. }
  295. wfd.itransfer = itransfer;
  296. wfd.cancel_fn = cancel_fn;
  297. if ((access_mode != RW_READ) && (access_mode != RW_WRITE)) {
  298. usbi_warn(NULL, "only one of RW_READ or RW_WRITE are supported.\n"
  299. "If you want to poll for R/W simultaneously, create multiple fds from the same handle.");
  300. return INVALID_WINFD;
  301. }
  302. if (access_mode == RW_READ) {
  303. wfd.rw = RW_READ;
  304. } else {
  305. wfd.rw = RW_WRITE;
  306. }
  307. overlapped = create_overlapped();
  308. if(overlapped == NULL) {
  309. return INVALID_WINFD;
  310. }
  311. for (i=0; i<MAX_FDS; i++) {
  312. if (poll_fd[i].fd < 0) {
  313. EnterCriticalSection(&_poll_fd[i].mutex);
  314. // fd might have been removed before we got to critical
  315. if (poll_fd[i].fd >= 0) {
  316. LeaveCriticalSection(&_poll_fd[i].mutex);
  317. continue;
  318. }
  319. // Use index as the unique fd number
  320. wfd.fd = i;
  321. // Attempt to emulate some of the CancelIoEx behaviour on platforms
  322. // that don't have it
  323. if (Use_Duplicate_Handles) {
  324. _poll_fd[i].thread_id = GetCurrentThreadId();
  325. if (!DuplicateHandle(GetCurrentProcess(), handle, GetCurrentProcess(),
  326. &wfd.handle, 0, TRUE, DUPLICATE_SAME_ACCESS)) {
  327. usbi_dbg("could not duplicate handle for CancelIo - using original one");
  328. wfd.handle = handle;
  329. // Make sure we won't close the original handle on fd deletion then
  330. _poll_fd[i].original_handle = INVALID_HANDLE_VALUE;
  331. } else {
  332. _poll_fd[i].original_handle = handle;
  333. }
  334. } else {
  335. wfd.handle = handle;
  336. }
  337. wfd.overlapped = overlapped;
  338. memcpy(&poll_fd[i], &wfd, sizeof(struct winfd));
  339. LeaveCriticalSection(&_poll_fd[i].mutex);
  340. return wfd;
  341. }
  342. }
  343. free_overlapped(overlapped);
  344. return INVALID_WINFD;
  345. }
  346. static void _free_index(int _index)
  347. {
  348. // Cancel any async IO (Don't care about the validity of our handles for this)
  349. cancel_io(_index);
  350. // close the duplicate handle (if we have an actual duplicate)
  351. if (Use_Duplicate_Handles) {
  352. if (_poll_fd[_index].original_handle != INVALID_HANDLE_VALUE) {
  353. CloseHandle(poll_fd[_index].handle);
  354. }
  355. _poll_fd[_index].original_handle = INVALID_HANDLE_VALUE;
  356. _poll_fd[_index].thread_id = 0;
  357. }
  358. free_overlapped(poll_fd[_index].overlapped);
  359. poll_fd[_index] = INVALID_WINFD;
  360. }
  361. /*
  362. * Release a pollable file descriptor.
  363. *
  364. * Note that the associated Windows handle is not closed by this call
  365. */
  366. void usbi_free_fd(struct winfd *wfd)
  367. {
  368. int _index;
  369. CHECK_INIT_POLLING;
  370. _index = _fd_to_index_and_lock(wfd->fd);
  371. if (_index < 0) {
  372. return;
  373. }
  374. _free_index(_index);
  375. *wfd = INVALID_WINFD;
  376. LeaveCriticalSection(&_poll_fd[_index].mutex);
  377. }
  378. /*
  379. * The functions below perform various conversions between fd, handle and OVERLAPPED
  380. */
  381. struct winfd fd_to_winfd(int fd)
  382. {
  383. int i;
  384. struct winfd wfd;
  385. CHECK_INIT_POLLING;
  386. if (fd < 0)
  387. return INVALID_WINFD;
  388. for (i=0; i<MAX_FDS; i++) {
  389. if (poll_fd[i].fd == fd) {
  390. EnterCriticalSection(&_poll_fd[i].mutex);
  391. // fd might have been deleted before we got to critical
  392. if (poll_fd[i].fd != fd) {
  393. LeaveCriticalSection(&_poll_fd[i].mutex);
  394. continue;
  395. }
  396. memcpy(&wfd, &poll_fd[i], sizeof(struct winfd));
  397. LeaveCriticalSection(&_poll_fd[i].mutex);
  398. return wfd;
  399. }
  400. }
  401. return INVALID_WINFD;
  402. }
  403. struct winfd handle_to_winfd(HANDLE handle)
  404. {
  405. int i;
  406. struct winfd wfd;
  407. CHECK_INIT_POLLING;
  408. if ((handle == 0) || (handle == INVALID_HANDLE_VALUE))
  409. return INVALID_WINFD;
  410. for (i=0; i<MAX_FDS; i++) {
  411. if (poll_fd[i].handle == handle) {
  412. EnterCriticalSection(&_poll_fd[i].mutex);
  413. // fd might have been deleted before we got to critical
  414. if (poll_fd[i].handle != handle) {
  415. LeaveCriticalSection(&_poll_fd[i].mutex);
  416. continue;
  417. }
  418. memcpy(&wfd, &poll_fd[i], sizeof(struct winfd));
  419. LeaveCriticalSection(&_poll_fd[i].mutex);
  420. return wfd;
  421. }
  422. }
  423. return INVALID_WINFD;
  424. }
  425. struct winfd overlapped_to_winfd(OVERLAPPED* overlapped)
  426. {
  427. int i;
  428. struct winfd wfd;
  429. CHECK_INIT_POLLING;
  430. if (overlapped == NULL)
  431. return INVALID_WINFD;
  432. for (i=0; i<MAX_FDS; i++) {
  433. if (poll_fd[i].overlapped == overlapped) {
  434. EnterCriticalSection(&_poll_fd[i].mutex);
  435. // fd might have been deleted before we got to critical
  436. if (poll_fd[i].overlapped != overlapped) {
  437. LeaveCriticalSection(&_poll_fd[i].mutex);
  438. continue;
  439. }
  440. memcpy(&wfd, &poll_fd[i], sizeof(struct winfd));
  441. LeaveCriticalSection(&_poll_fd[i].mutex);
  442. return wfd;
  443. }
  444. }
  445. return INVALID_WINFD;
  446. }
  447. /*
  448. * POSIX poll equivalent, using Windows OVERLAPPED
  449. * Currently, this function only accepts one of POLLIN or POLLOUT per fd
  450. * (but you can create multiple fds from the same handle for read and write)
  451. */
  452. int usbi_poll(struct pollfd *fds, unsigned int nfds, int timeout)
  453. {
  454. unsigned i;
  455. int _index, object_index, triggered;
  456. HANDLE *handles_to_wait_on;
  457. int *handle_to_index;
  458. DWORD nb_handles_to_wait_on = 0;
  459. DWORD ret;
  460. CHECK_INIT_POLLING;
  461. triggered = 0;
  462. handles_to_wait_on = (HANDLE*) calloc(nfds+1, sizeof(HANDLE)); // +1 for fd_update
  463. handle_to_index = (int*) calloc(nfds, sizeof(int));
  464. if ((handles_to_wait_on == NULL) || (handle_to_index == NULL)) {
  465. errno = ENOMEM;
  466. triggered = -1;
  467. goto poll_exit;
  468. }
  469. for (i = 0; i < nfds; ++i) {
  470. fds[i].revents = 0;
  471. // Only one of POLLIN or POLLOUT can be selected with this version of poll (not both)
  472. if ((fds[i].events & ~POLLIN) && (!(fds[i].events & POLLOUT))) {
  473. fds[i].revents |= POLLERR;
  474. errno = EACCES;
  475. usbi_warn(NULL, "unsupported set of events");
  476. triggered = -1;
  477. goto poll_exit;
  478. }
  479. _index = _fd_to_index_and_lock(fds[i].fd);
  480. poll_dbg("fd[%d]=%d: (overlapped=%p) got events %04X", i, poll_fd[_index].fd, poll_fd[_index].overlapped, fds[i].events);
  481. if ( (_index < 0) || (poll_fd[_index].handle == INVALID_HANDLE_VALUE)
  482. || (poll_fd[_index].handle == 0) || (poll_fd[_index].overlapped == NULL)) {
  483. fds[i].revents |= POLLNVAL | POLLERR;
  484. errno = EBADF;
  485. if (_index >= 0) {
  486. LeaveCriticalSection(&_poll_fd[_index].mutex);
  487. }
  488. usbi_warn(NULL, "invalid fd");
  489. triggered = -1;
  490. goto poll_exit;
  491. }
  492. // IN or OUT must match our fd direction
  493. if ((fds[i].events & POLLIN) && (poll_fd[_index].rw != RW_READ)) {
  494. fds[i].revents |= POLLNVAL | POLLERR;
  495. errno = EBADF;
  496. usbi_warn(NULL, "attempted POLLIN on fd without READ access");
  497. LeaveCriticalSection(&_poll_fd[_index].mutex);
  498. triggered = -1;
  499. goto poll_exit;
  500. }
  501. if ((fds[i].events & POLLOUT) && (poll_fd[_index].rw != RW_WRITE)) {
  502. fds[i].revents |= POLLNVAL | POLLERR;
  503. errno = EBADF;
  504. usbi_warn(NULL, "attempted POLLOUT on fd without WRITE access");
  505. LeaveCriticalSection(&_poll_fd[_index].mutex);
  506. triggered = -1;
  507. goto poll_exit;
  508. }
  509. // The following macro only works if overlapped I/O was reported pending
  510. if ( (HasOverlappedIoCompleted(poll_fd[_index].overlapped))
  511. || (HasOverlappedIoCompletedSync(poll_fd[_index].overlapped)) ) {
  512. poll_dbg(" completed");
  513. // checks above should ensure this works:
  514. fds[i].revents = fds[i].events;
  515. triggered++;
  516. } else {
  517. handles_to_wait_on[nb_handles_to_wait_on] = poll_fd[_index].overlapped->hEvent;
  518. handle_to_index[nb_handles_to_wait_on] = i;
  519. nb_handles_to_wait_on++;
  520. }
  521. LeaveCriticalSection(&_poll_fd[_index].mutex);
  522. }
  523. // If nothing was triggered, wait on all fds that require it
  524. if ((timeout != 0) && (triggered == 0) && (nb_handles_to_wait_on != 0)) {
  525. if (timeout < 0) {
  526. poll_dbg("starting infinite wait for %d handles...", (int)nb_handles_to_wait_on);
  527. } else {
  528. poll_dbg("starting %d ms wait for %d handles...", timeout, (int)nb_handles_to_wait_on);
  529. }
  530. ret = WaitForMultipleObjects(nb_handles_to_wait_on, handles_to_wait_on,
  531. FALSE, (timeout<0)?INFINITE:(DWORD)timeout);
  532. object_index = ret-WAIT_OBJECT_0;
  533. if ((object_index >= 0) && ((DWORD)object_index < nb_handles_to_wait_on)) {
  534. poll_dbg(" completed after wait");
  535. i = handle_to_index[object_index];
  536. _index = _fd_to_index_and_lock(fds[i].fd);
  537. fds[i].revents = fds[i].events;
  538. triggered++;
  539. if (_index >= 0) {
  540. LeaveCriticalSection(&_poll_fd[_index].mutex);
  541. }
  542. } else if (ret == WAIT_TIMEOUT) {
  543. poll_dbg(" timed out");
  544. triggered = 0; // 0 = timeout
  545. } else {
  546. errno = EIO;
  547. triggered = -1; // error
  548. }
  549. }
  550. poll_exit:
  551. if (handles_to_wait_on != NULL) {
  552. free(handles_to_wait_on);
  553. }
  554. if (handle_to_index != NULL) {
  555. free(handle_to_index);
  556. }
  557. return triggered;
  558. }
  559. /*
  560. * close a fake pipe fd
  561. */
  562. int usbi_close(int fd)
  563. {
  564. int _index;
  565. int r = -1;
  566. CHECK_INIT_POLLING;
  567. _index = _fd_to_index_and_lock(fd);
  568. if (_index < 0) {
  569. errno = EBADF;
  570. } else {
  571. free_overlapped(poll_fd[_index].overlapped);
  572. poll_fd[_index] = INVALID_WINFD;
  573. LeaveCriticalSection(&_poll_fd[_index].mutex);
  574. }
  575. return r;
  576. }
  577. /*
  578. * synchronous write for fake "pipe" signaling
  579. */
  580. ssize_t usbi_write(int fd, const void *buf, size_t count)
  581. {
  582. int _index;
  583. UNUSED(buf);
  584. CHECK_INIT_POLLING;
  585. if (count != sizeof(unsigned char)) {
  586. usbi_err(NULL, "this function should only used for signaling");
  587. return -1;
  588. }
  589. _index = _fd_to_index_and_lock(fd);
  590. if ( (_index < 0) || (poll_fd[_index].overlapped == NULL) ) {
  591. errno = EBADF;
  592. if (_index >= 0) {
  593. LeaveCriticalSection(&_poll_fd[_index].mutex);
  594. }
  595. return -1;
  596. }
  597. poll_dbg("set pipe event (fd = %d, thread = %08X)", _index, GetCurrentThreadId());
  598. SetEvent(poll_fd[_index].overlapped->hEvent);
  599. poll_fd[_index].overlapped->Internal = STATUS_WAIT_0;
  600. // If two threads write on the pipe at the same time, we need to
  601. // process two separate reads => use the overlapped as a counter
  602. poll_fd[_index].overlapped->InternalHigh++;
  603. LeaveCriticalSection(&_poll_fd[_index].mutex);
  604. return sizeof(unsigned char);
  605. }
  606. /*
  607. * synchronous read for fake "pipe" signaling
  608. */
  609. ssize_t usbi_read(int fd, void *buf, size_t count)
  610. {
  611. int _index;
  612. ssize_t r = -1;
  613. UNUSED(buf);
  614. CHECK_INIT_POLLING;
  615. if (count != sizeof(unsigned char)) {
  616. usbi_err(NULL, "this function should only used for signaling");
  617. return -1;
  618. }
  619. _index = _fd_to_index_and_lock(fd);
  620. if (_index < 0) {
  621. errno = EBADF;
  622. return -1;
  623. }
  624. if (WaitForSingleObject(poll_fd[_index].overlapped->hEvent, INFINITE) != WAIT_OBJECT_0) {
  625. usbi_warn(NULL, "waiting for event failed: %d", (int)GetLastError());
  626. errno = EIO;
  627. goto out;
  628. }
  629. poll_dbg("clr pipe event (fd = %d, thread = %08X)", _index, GetCurrentThreadId());
  630. poll_fd[_index].overlapped->InternalHigh--;
  631. // Don't reset unless we don't have any more events to process
  632. if (poll_fd[_index].overlapped->InternalHigh <= 0) {
  633. ResetEvent(poll_fd[_index].overlapped->hEvent);
  634. poll_fd[_index].overlapped->Internal = STATUS_PENDING;
  635. }
  636. r = sizeof(unsigned char);
  637. out:
  638. LeaveCriticalSection(&_poll_fd[_index].mutex);
  639. return r;
  640. }