OpenVPN
event.c
Go to the documentation of this file.
1 /*
2  * OpenVPN -- An application to securely tunnel IP networks
3  * over a single TCP/UDP port, with support for SSL/TLS-based
4  * session authentication and key exchange,
5  * packet encryption, packet authentication, and
6  * packet compression.
7  *
8  * Copyright (C) 2002-2024 OpenVPN Inc <sales@openvpn.net>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2
12  * as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, write to the Free Software Foundation, Inc.,
21  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22  */
23 
24 #ifdef HAVE_CONFIG_H
25 #include "config.h"
26 #endif
27 
28 #include "syshead.h"
29 
30 #include "buffer.h"
31 #include "error.h"
32 #include "integer.h"
33 #include "event.h"
34 #include "fdmisc.h"
35 
36 #if EPOLL
37 #include <sys/epoll.h>
38 #endif
39 
40 #include "memdbg.h"
41 
42 /*
43  * Some OSes will prefer select() over poll()
44  * when both are available.
45  */
46 #if defined(TARGET_DARWIN)
47 #define SELECT_PREFERRED_OVER_POLL
48 #endif
49 
50 /*
51  * All non-windows OSes are assumed to have select()
52  */
53 #ifdef _WIN32
54 #define SELECT 0
55 #else
56 #define SELECT 1
57 #endif
58 
59 /*
60  * This should be set to the highest file descriptor
61  * which can be used in one of the FD_ macros.
62  */
63 #ifdef FD_SETSIZE
64 #define SELECT_MAX_FDS FD_SETSIZE
65 #else
66 #define SELECT_MAX_FDS 256
67 #endif
68 
69 static inline int
70 tv_to_ms_timeout(const struct timeval *tv)
71 {
72  if (tv->tv_sec == 0 && tv->tv_usec == 0)
73  {
74  return 0;
75  }
76  else
77  {
78  return max_int(tv->tv_sec * 1000 + (tv->tv_usec + 500) / 1000, 1);
79  }
80 }
81 
82 #ifdef _WIN32
83 
84 struct we_set
85 {
87  bool fast;
88  HANDLE *events;
90  int n_events;
91  int capacity;
92 };
93 
94 static inline void
95 we_set_event(struct we_set *wes, int i, event_t event, unsigned int rwflags, void *arg)
96 {
97  ASSERT(i >= 0 && i < wes->capacity);
98 
99  if (rwflags == EVENT_READ)
100  {
101  ASSERT(event->read != NULL);
102  wes->events[i] = event->read;
103  }
104  else if (rwflags == EVENT_WRITE)
105  {
106  ASSERT(event->write != NULL);
107  wes->events[i] = event->write;
108  }
109  else
110  {
111  msg(M_FATAL, "fatal error in we_set_events: rwflags=%d", rwflags);
112  }
113 
114  wes->esr[i].rwflags = rwflags;
115  wes->esr[i].arg = arg;
116 }
117 
118 static inline bool
119 we_append_event(struct we_set *wes, event_t event, unsigned int rwflags, void *arg)
120 {
121  if (rwflags & EVENT_WRITE)
122  {
123  if (wes->n_events < wes->capacity)
124  {
125  we_set_event(wes, wes->n_events, event, EVENT_WRITE, arg);
126  ++wes->n_events;
127  }
128  else
129  {
130  return false;
131  }
132  }
133  if (rwflags & EVENT_READ)
134  {
135  if (wes->n_events < wes->capacity)
136  {
137  we_set_event(wes, wes->n_events, event, EVENT_READ, arg);
138  ++wes->n_events;
139  }
140  else
141  {
142  return false;
143  }
144  }
145  return true;
146 }
147 
148 static void
149 we_del_event(struct we_set *wes, event_t event)
150 {
151  int i, j = 0;
152  const int len = wes->n_events;
153 
154  for (i = 0; i < len; ++i)
155  {
156  const HANDLE h = wes->events[i];
157  if (h == event->read || h == event->write)
158  {
159  --wes->n_events;
160  }
161  else
162  {
163  if (i != j)
164  {
165  wes->events[j] = wes->events[i];
166  wes->esr[j] = wes->esr[i];
167  }
168  ++j;
169  }
170  }
171 }
172 
173 static void
174 we_del_index(struct we_set *wes, int index)
175 {
176  int i;
177  ASSERT(index >= 0 && index < wes->n_events);
178  for (i = index; i < wes->n_events - 1; ++i)
179  {
180  wes->events[i] = wes->events[i+1];
181  wes->esr[i] = wes->esr[i+1];
182  }
183  --wes->n_events;
184 }
185 
186 static void
187 we_get_rw_indices(struct we_set *wes, event_t event, int *ri, int *wi)
188 {
189  int i;
190  *ri = *wi = -1;
191  for (i = 0; i < wes->n_events; ++i)
192  {
193  const HANDLE h = wes->events[i];
194  if (h == event->read)
195  {
196  ASSERT(*ri == -1);
197  *ri = i;
198  }
199  else if (h == event->write)
200  {
201  ASSERT(*wi == -1);
202  *wi = i;
203  }
204  }
205 }
206 
207 static void
209 {
210  struct we_set *wes = (struct we_set *) es;
211  free(wes->events);
212  free(wes->esr);
213  free(wes);
214 }
215 
216 static void
218 {
219  struct we_set *wes = (struct we_set *) es;
220  ASSERT(wes->fast);
221  wes->n_events = 0;
222 }
223 
224 static void
225 we_del(struct event_set *es, event_t event)
226 {
227  struct we_set *wes = (struct we_set *) es;
228  ASSERT(!wes->fast);
229  we_del_event(wes, event);
230 }
231 
232 static void
233 we_ctl(struct event_set *es, event_t event, unsigned int rwflags, void *arg)
234 {
235  struct we_set *wes = (struct we_set *) es;
236 
237  dmsg(D_EVENT_WAIT, "WE_CTL n=%d ev=%p rwflags=0x%04x arg=" ptr_format,
238  wes->n_events,
239  event,
240  rwflags,
241  (ptr_type)arg);
242 
243  if (wes->fast)
244  {
245  if (!we_append_event(wes, event, rwflags, arg))
246  {
247  goto err;
248  }
249  }
250  else
251  {
252  int ri, wi;
253  int one = -1;
254  int n = 0;
255 
256  we_get_rw_indices(wes, event, &ri, &wi);
257  if (wi >= 0)
258  {
259  one = wi;
260  ++n;
261  }
262  if (ri >= 0)
263  {
264  one = ri;
265  ++n;
266  }
267  switch (rwflags)
268  {
269  case 0:
270  switch (n)
271  {
272  case 0:
273  break;
274 
275  case 1:
276  we_del_index(wes, one);
277  break;
278 
279  case 2:
280  we_del_event(wes, event);
281  break;
282 
283  default:
284  ASSERT(0);
285  }
286  break;
287 
288  case EVENT_READ:
289  switch (n)
290  {
291  case 0:
292  if (!we_append_event(wes, event, EVENT_READ, arg))
293  {
294  goto err;
295  }
296  break;
297 
298  case 1:
299  we_set_event(wes, one, event, EVENT_READ, arg);
300  break;
301 
302  case 2:
303  we_del_index(wes, wi);
304  break;
305 
306  default:
307  ASSERT(0);
308  }
309  break;
310 
311  case EVENT_WRITE:
312  switch (n)
313  {
314  case 0:
315  if (!we_append_event(wes, event, EVENT_WRITE, arg))
316  {
317  goto err;
318  }
319  break;
320 
321  case 1:
322  we_set_event(wes, one, event, EVENT_WRITE, arg);
323  break;
324 
325  case 2:
326  we_del_index(wes, ri);
327  break;
328 
329  default:
330  ASSERT(0);
331  }
332  break;
333 
334  case EVENT_READ|EVENT_WRITE:
335  switch (n)
336  {
337  case 0:
338  if (!we_append_event(wes, event, EVENT_READ|EVENT_WRITE, arg))
339  {
340  goto err;
341  }
342  break;
343 
344  case 1:
345  if (ri == -1)
346  {
347  ASSERT(wi != -1);
348  if (!we_append_event(wes, event, EVENT_READ, arg))
349  {
350  goto err;
351  }
352  }
353  else if (wi == -1)
354  {
355  if (!we_append_event(wes, event, EVENT_WRITE, arg))
356  {
357  goto err;
358  }
359  }
360  else
361  {
362  ASSERT(0);
363  }
364  break;
365 
366  case 2:
367  break;
368 
369  default:
370  ASSERT(0);
371  }
372  break;
373 
374  default:
375  msg(M_FATAL, "fatal error in we_ctl: rwflags=%d", rwflags);
376  }
377  }
378  return;
379 
380 err:
381  msg(D_EVENT_ERRORS, "Error: Windows resource limit WSA_MAXIMUM_WAIT_EVENTS (%d) has been exceeded", WSA_MAXIMUM_WAIT_EVENTS);
382 }
383 
384 static int
385 we_wait(struct event_set *es, const struct timeval *tv, struct event_set_return *out, int outlen)
386 {
387  struct we_set *wes = (struct we_set *) es;
388  const int timeout = tv_to_ms_timeout(tv);
389  DWORD status;
390 
391  dmsg(D_EVENT_WAIT, "WE_WAIT enter n=%d to=%d", wes->n_events, timeout);
392 
393 #ifdef ENABLE_DEBUG
395  {
396  int i;
397  for (i = 0; i < wes->n_events; ++i)
398  {
399  dmsg(D_EVENT_WAIT, "[%d] ev=%p rwflags=0x%04x arg=" ptr_format,
400  i,
401  wes->events[i],
402  wes->esr[i].rwflags,
403  (ptr_type)wes->esr[i].arg);
404  }
405  }
406 #endif
407 
408  /*
409  * First poll our event list with 0 timeout
410  */
411  status = WSAWaitForMultipleEvents(
412  (DWORD) wes->n_events,
413  wes->events,
414  FALSE,
415  (DWORD) 0,
416  FALSE);
417 
418  /*
419  * If at least one event is already set, we must
420  * individually poll the whole list.
421  */
422  if (status >= WSA_WAIT_EVENT_0 && status < WSA_WAIT_EVENT_0 + (DWORD) wes->n_events)
423  {
424  int i;
425  int j = 0;
426  for (i = 0; i < wes->n_events; ++i)
427  {
428  if (j >= outlen)
429  {
430  break;
431  }
432  if (WaitForSingleObject(wes->events[i], 0) == WAIT_OBJECT_0)
433  {
434  *out = wes->esr[i];
435  dmsg(D_EVENT_WAIT, "WE_WAIT leave [%d,%d] rwflags=0x%04x arg=" ptr_format,
436  i, j, out->rwflags, (ptr_type)out->arg);
437  ++j;
438  ++out;
439  }
440  }
441  return j;
442  }
443  else
444  {
445  /*
446  * If caller specified timeout > 0, we know at this point
447  * that no events are set, so wait only for the first event
448  * (or timeout) and return at most one event_set_return object.
449  *
450  * If caller specified timeout == 0, the second call to
451  * WSAWaitForMultipleEvents would be redundant -- just
452  * return 0 indicating timeout.
453  */
454  if (timeout > 0)
455  {
456  status = WSAWaitForMultipleEvents(
457  (DWORD) wes->n_events,
458  wes->events,
459  FALSE,
460  (DWORD) timeout,
461  FALSE);
462  }
463 
464  if (outlen >= 1 && status >= WSA_WAIT_EVENT_0 && status < WSA_WAIT_EVENT_0 + (DWORD) wes->n_events)
465  {
466  *out = wes->esr[status - WSA_WAIT_EVENT_0];
467  dmsg(D_EVENT_WAIT, "WE_WAIT leave rwflags=0x%04x arg=" ptr_format,
468  out->rwflags, (ptr_type)out->arg);
469  return 1;
470  }
471  else if (status == WSA_WAIT_TIMEOUT)
472  {
473  return 0;
474  }
475  else
476  {
477  return -1;
478  }
479  }
480 }
481 
482 static struct event_set *
483 we_init(int *maxevents, unsigned int flags)
484 {
485  struct we_set *wes;
486 
487  dmsg(D_EVENT_WAIT, "WE_INIT maxevents=%d flags=0x%08x", *maxevents, flags);
488 
489  ALLOC_OBJ_CLEAR(wes, struct we_set);
490 
491  /* set dispatch functions */
492  wes->func.free = we_free;
493  wes->func.reset = we_reset;
494  wes->func.del = we_del;
495  wes->func.ctl = we_ctl;
496  wes->func.wait = we_wait;
497 
498  if (flags & EVENT_METHOD_FAST)
499  {
500  wes->fast = true;
501  }
502  wes->n_events = 0;
503 
504  /* Figure our event capacity */
505  ASSERT(*maxevents > 0);
506  wes->capacity = min_int(*maxevents * 2, WSA_MAXIMUM_WAIT_EVENTS);
507  *maxevents = min_int(*maxevents, WSA_MAXIMUM_WAIT_EVENTS);
508 
509  /* Allocate space for Win32 event handles */
510  ALLOC_ARRAY_CLEAR(wes->events, HANDLE, wes->capacity);
511 
512  /* Allocate space for event_set_return objects */
513  ALLOC_ARRAY_CLEAR(wes->esr, struct event_set_return, wes->capacity);
514 
515  dmsg(D_EVENT_WAIT, "WE_INIT maxevents=%d capacity=%d",
516  *maxevents, wes->capacity);
517 
518  return (struct event_set *) wes;
519 }
520 
521 #endif /* _WIN32 */
522 
523 #if EPOLL
524 
525 struct ep_set
526 {
527  struct event_set_functions func;
528  bool fast;
529  int epfd;
530  int maxevents;
531  struct epoll_event *events;
532 };
533 
534 static void
535 ep_free(struct event_set *es)
536 {
537  struct ep_set *eps = (struct ep_set *) es;
538  close(eps->epfd);
539  free(eps->events);
540  free(eps);
541 }
542 
543 static void
544 ep_reset(struct event_set *es)
545 {
546  const struct ep_set *eps = (struct ep_set *) es;
547  ASSERT(eps->fast);
548 }
549 
550 static void
551 ep_del(struct event_set *es, event_t event)
552 {
553  struct epoll_event ev;
554  struct ep_set *eps = (struct ep_set *) es;
555 
556  dmsg(D_EVENT_WAIT, "EP_DEL ev=%d", (int)event);
557 
558  ASSERT(!eps->fast);
559  CLEAR(ev);
560  if (epoll_ctl(eps->epfd, EPOLL_CTL_DEL, event, &ev) < 0)
561  {
562  msg(M_WARN|M_ERRNO, "EVENT: epoll_ctl EPOLL_CTL_DEL failed, sd=%d", (int)event);
563  }
564 }
565 
566 static void
567 ep_ctl(struct event_set *es, event_t event, unsigned int rwflags, void *arg)
568 {
569  struct ep_set *eps = (struct ep_set *) es;
570  struct epoll_event ev;
571 
572  CLEAR(ev);
573 
574  ev.data.ptr = arg;
575  if (rwflags & EVENT_READ)
576  {
577  ev.events |= EPOLLIN;
578  }
579  if (rwflags & EVENT_WRITE)
580  {
581  ev.events |= EPOLLOUT;
582  }
583 
584  dmsg(D_EVENT_WAIT, "EP_CTL fd=%d rwflags=0x%04x ev=0x%08x arg=" ptr_format,
585  (int)event,
586  rwflags,
587  (unsigned int)ev.events,
588  (ptr_type)ev.data.ptr);
589 
590  if (epoll_ctl(eps->epfd, EPOLL_CTL_MOD, event, &ev) < 0)
591  {
592  if (errno == ENOENT)
593  {
594  if (epoll_ctl(eps->epfd, EPOLL_CTL_ADD, event, &ev) < 0)
595  {
596  msg(M_ERR, "EVENT: epoll_ctl EPOLL_CTL_ADD failed, sd=%d", (int)event);
597  }
598  }
599  else
600  {
601  msg(M_ERR, "EVENT: epoll_ctl EPOLL_CTL_MOD failed, sd=%d", (int)event);
602  }
603  }
604 }
605 
606 static int
607 ep_wait(struct event_set *es, const struct timeval *tv, struct event_set_return *out, int outlen)
608 {
609  struct ep_set *eps = (struct ep_set *) es;
610  int stat;
611 
612  if (outlen > eps->maxevents)
613  {
614  outlen = eps->maxevents;
615  }
616 
617  stat = epoll_wait(eps->epfd, eps->events, outlen, tv_to_ms_timeout(tv));
618  ASSERT(stat <= outlen);
619 
620  if (stat > 0)
621  {
622  int i;
623  const struct epoll_event *ev = eps->events;
624  struct event_set_return *esr = out;
625  for (i = 0; i < stat; ++i)
626  {
627  esr->rwflags = 0;
628  if (ev->events & (EPOLLIN|EPOLLPRI|EPOLLERR|EPOLLHUP))
629  {
630  esr->rwflags |= EVENT_READ;
631  }
632  if (ev->events & EPOLLOUT)
633  {
634  esr->rwflags |= EVENT_WRITE;
635  }
636  esr->arg = ev->data.ptr;
637  dmsg(D_EVENT_WAIT, "EP_WAIT[%d] rwflags=0x%04x ev=0x%08x arg=" ptr_format,
638  i, esr->rwflags, ev->events, (ptr_type)ev->data.ptr);
639  ++ev;
640  ++esr;
641  }
642  }
643  return stat;
644 }
645 
646 static struct event_set *
647 ep_init(int *maxevents, unsigned int flags)
648 {
649  struct ep_set *eps;
650  int fd;
651 
652  dmsg(D_EVENT_WAIT, "EP_INIT maxevents=%d flags=0x%08x", *maxevents, flags);
653 
654  /* open epoll file descriptor */
655  fd = epoll_create(*maxevents);
656  if (fd < 0)
657  {
658  return NULL;
659  }
660 
661  set_cloexec(fd);
662 
663  ALLOC_OBJ_CLEAR(eps, struct ep_set);
664 
665  /* set dispatch functions */
666  eps->func.free = ep_free;
667  eps->func.reset = ep_reset;
668  eps->func.del = ep_del;
669  eps->func.ctl = ep_ctl;
670  eps->func.wait = ep_wait;
671 
672  /* fast method ("sort of") corresponds to epoll one-shot */
673  if (flags & EVENT_METHOD_FAST)
674  {
675  eps->fast = true;
676  }
677 
678  /* allocate space for epoll_wait return */
679  ASSERT(*maxevents > 0);
680  eps->maxevents = *maxevents;
681  ALLOC_ARRAY_CLEAR(eps->events, struct epoll_event, eps->maxevents);
682 
683  /* set epoll control fd */
684  eps->epfd = fd;
685 
686  return (struct event_set *) eps;
687 }
688 #endif /* EPOLL */
689 
690 #if POLL
691 
692 struct po_set
693 {
694  struct event_set_functions func;
695  bool fast;
696  struct pollfd *events;
697  void **args;
698  int n_events;
699  int capacity;
700 };
701 
702 static void
703 po_free(struct event_set *es)
704 {
705  struct po_set *pos = (struct po_set *) es;
706  free(pos->events);
707  free(pos->args);
708  free(pos);
709 }
710 
711 static void
712 po_reset(struct event_set *es)
713 {
714  struct po_set *pos = (struct po_set *) es;
715  ASSERT(pos->fast);
716  pos->n_events = 0;
717 }
718 
719 static void
720 po_del(struct event_set *es, event_t event)
721 {
722  struct po_set *pos = (struct po_set *) es;
723  int i;
724 
725  dmsg(D_EVENT_WAIT, "PO_DEL ev=%d", (int)event);
726 
727  ASSERT(!pos->fast);
728  for (i = 0; i < pos->n_events; ++i)
729  {
730  if (pos->events[i].fd == event)
731  {
732  int j;
733  for (j = i; j < pos->n_events - 1; ++j)
734  {
735  pos->events[j] = pos->events[j+1];
736  pos->args[j] = pos->args[j+1];
737  }
738  --pos->n_events;
739  break;
740  }
741  }
742 }
743 
744 static inline void
745 po_set_pollfd_events(struct pollfd *pfdp, unsigned int rwflags)
746 {
747  pfdp->events = 0;
748  if (rwflags & EVENT_WRITE)
749  {
750  pfdp->events |= POLLOUT;
751  }
752  if (rwflags & EVENT_READ)
753  {
754  pfdp->events |= (POLLIN|POLLPRI);
755  }
756 }
757 
758 static inline bool
759 po_append_event(struct po_set *pos, event_t event, unsigned int rwflags, void *arg)
760 {
761  if (pos->n_events < pos->capacity)
762  {
763  struct pollfd *pfdp = &pos->events[pos->n_events];
764  pfdp->fd = event;
765  pos->args[pos->n_events] = arg;
766  po_set_pollfd_events(pfdp, rwflags);
767  ++pos->n_events;
768  return true;
769  }
770  else
771  {
772  return false;
773  }
774 }
775 
776 static void
777 po_ctl(struct event_set *es, event_t event, unsigned int rwflags, void *arg)
778 {
779  struct po_set *pos = (struct po_set *) es;
780 
781  dmsg(D_EVENT_WAIT, "PO_CTL rwflags=0x%04x ev=%d arg=" ptr_format,
782  rwflags, (int)event, (ptr_type)arg);
783 
784  if (pos->fast)
785  {
786  if (!po_append_event(pos, event, rwflags, arg))
787  {
788  goto err;
789  }
790  }
791  else
792  {
793  int i;
794  for (i = 0; i < pos->n_events; ++i)
795  {
796  struct pollfd *pfdp = &pos->events[i];
797  if (pfdp->fd == event)
798  {
799  pos->args[i] = arg;
800  po_set_pollfd_events(pfdp, rwflags);
801  goto done;
802  }
803  }
804  if (!po_append_event(pos, event, rwflags, arg))
805  {
806  goto err;
807  }
808  }
809 
810 done:
811  return;
812 
813 err:
814  msg(D_EVENT_ERRORS, "Error: poll: too many I/O wait events");
815 }
816 
817 static int
818 po_wait(struct event_set *es, const struct timeval *tv, struct event_set_return *out, int outlen)
819 {
820  struct po_set *pos = (struct po_set *) es;
821  int stat;
822 
823  stat = poll(pos->events, pos->n_events, tv_to_ms_timeout(tv));
824 
825  ASSERT(stat <= pos->n_events);
826 
827  if (stat > 0)
828  {
829  int i, j = 0;
830  const struct pollfd *pfdp = pos->events;
831  for (i = 0; i < pos->n_events && j < outlen; ++i)
832  {
833  if (pfdp->revents & (POLLIN|POLLPRI|POLLERR|POLLHUP|POLLOUT))
834  {
835  out->rwflags = 0;
836  if (pfdp->revents & (POLLIN|POLLPRI|POLLERR|POLLHUP))
837  {
838  out->rwflags |= EVENT_READ;
839  }
840  if (pfdp->revents & POLLOUT)
841  {
842  out->rwflags |= EVENT_WRITE;
843  }
844  out->arg = pos->args[i];
845  dmsg(D_EVENT_WAIT, "PO_WAIT[%d,%d] fd=%d rev=0x%08x rwflags=0x%04x arg=" ptr_format " %s",
846  i, j, pfdp->fd, pfdp->revents, out->rwflags, (ptr_type)out->arg, pos->fast ? "" : "[scalable]");
847  ++out;
848  ++j;
849  }
850  else if (pfdp->revents)
851  {
852  msg(D_EVENT_ERRORS, "Error: poll: unknown revents=0x%04x for fd=%d",
853  (unsigned int)pfdp->revents, pfdp->fd);
854  }
855  ++pfdp;
856  }
857  return j;
858  }
859  return stat;
860 }
861 
862 static struct event_set *
863 po_init(int *maxevents, unsigned int flags)
864 {
865  struct po_set *pos;
866 
867  dmsg(D_EVENT_WAIT, "PO_INIT maxevents=%d flags=0x%08x", *maxevents, flags);
868 
869  ALLOC_OBJ_CLEAR(pos, struct po_set);
870 
871  /* set dispatch functions */
872  pos->func.free = po_free;
873  pos->func.reset = po_reset;
874  pos->func.del = po_del;
875  pos->func.ctl = po_ctl;
876  pos->func.wait = po_wait;
877 
878  if (flags & EVENT_METHOD_FAST)
879  {
880  pos->fast = true;
881  }
882 
883  pos->n_events = 0;
884 
885  /* Figure our event capacity */
886  ASSERT(*maxevents > 0);
887  pos->capacity = *maxevents;
888 
889  /* Allocate space for pollfd structures to be passed to poll() */
890  ALLOC_ARRAY_CLEAR(pos->events, struct pollfd, pos->capacity);
891 
892  /* Allocate space for event_set_return objects */
893  ALLOC_ARRAY_CLEAR(pos->args, void *, pos->capacity);
894 
895  return (struct event_set *) pos;
896 }
897 #endif /* POLL */
898 
899 #if SELECT
900 
901 struct se_set
902 {
903  struct event_set_functions func;
904  bool fast;
905  fd_set readfds;
906  fd_set writefds;
907  void **args; /* allocated to capacity size */
908  int maxfd; /* largest fd seen so far, always < capacity */
909  int capacity; /* fixed largest fd + 1 */
910 };
911 
912 static void
913 se_free(struct event_set *es)
914 {
915  struct se_set *ses = (struct se_set *) es;
916  free(ses->args);
917  free(ses);
918 }
919 
920 static void
921 se_reset(struct event_set *es)
922 {
923  struct se_set *ses = (struct se_set *) es;
924  int i;
925  ASSERT(ses->fast);
926 
927  dmsg(D_EVENT_WAIT, "SE_RESET");
928 
929  FD_ZERO(&ses->readfds);
930  FD_ZERO(&ses->writefds);
931  for (i = 0; i <= ses->maxfd; ++i)
932  {
933  ses->args[i] = NULL;
934  }
935  ses->maxfd = -1;
936 }
937 
938 static void
939 se_del(struct event_set *es, event_t event)
940 {
941  struct se_set *ses = (struct se_set *) es;
942  ASSERT(!ses->fast);
943 
944  dmsg(D_EVENT_WAIT, "SE_DEL ev=%d", (int)event);
945 
946  if (event >= 0 && event < ses->capacity)
947  {
948  FD_CLR(event, &ses->readfds);
949  FD_CLR(event, &ses->writefds);
950  ses->args[event] = NULL;
951  }
952  else
953  {
954  msg(D_EVENT_ERRORS, "Error: select/se_del: too many I/O wait events");
955  }
956  return;
957 }
958 
959 static void
960 se_ctl(struct event_set *es, event_t event, unsigned int rwflags, void *arg)
961 {
962  struct se_set *ses = (struct se_set *) es;
963 
964  dmsg(D_EVENT_WAIT, "SE_CTL rwflags=0x%04x ev=%d fast=%d cap=%d maxfd=%d arg=" ptr_format,
965  rwflags, (int)event, (int)ses->fast, ses->capacity, ses->maxfd, (ptr_type)arg);
966 
967  if (event >= 0 && event < ses->capacity)
968  {
969  ses->maxfd = max_int(event, ses->maxfd);
970  ses->args[event] = arg;
971  if (ses->fast)
972  {
973  if (rwflags & EVENT_READ)
974  {
975  openvpn_fd_set(event, &ses->readfds);
976  }
977  if (rwflags & EVENT_WRITE)
978  {
979  openvpn_fd_set(event, &ses->writefds);
980  }
981  }
982  else
983  {
984  if (rwflags & EVENT_READ)
985  {
986  openvpn_fd_set(event, &ses->readfds);
987  }
988  else
989  {
990  FD_CLR(event, &ses->readfds);
991  }
992  if (rwflags & EVENT_WRITE)
993  {
994  openvpn_fd_set(event, &ses->writefds);
995  }
996  else
997  {
998  FD_CLR(event, &ses->writefds);
999  }
1000  }
1001  }
1002  else
1003  {
1004  msg(D_EVENT_ERRORS, "Error: select: too many I/O wait events, fd=%d cap=%d",
1005  (int) event,
1006  ses->capacity);
1007  }
1008 }
1009 
1010 static int
1011 se_wait_return(struct se_set *ses,
1012  fd_set *read,
1013  fd_set *write,
1014  struct event_set_return *out,
1015  int outlen)
1016 {
1017  int i, j = 0;
1018  for (i = 0; i <= ses->maxfd && j < outlen; ++i)
1019  {
1020  const bool r = FD_ISSET(i, read);
1021  const bool w = FD_ISSET(i, write);
1022  if (r || w)
1023  {
1024  out->rwflags = 0;
1025  if (r)
1026  {
1027  out->rwflags |= EVENT_READ;
1028  }
1029  if (w)
1030  {
1031  out->rwflags |= EVENT_WRITE;
1032  }
1033  out->arg = ses->args[i];
1034  dmsg(D_EVENT_WAIT, "SE_WAIT[%d,%d] rwflags=0x%04x arg=" ptr_format,
1035  i, j, out->rwflags, (ptr_type)out->arg);
1036  ++out;
1037  ++j;
1038  }
1039  }
1040  return j;
1041 }
1042 
1043 static int
1044 se_wait_fast(struct event_set *es, const struct timeval *tv, struct event_set_return *out, int outlen)
1045 {
1046  struct se_set *ses = (struct se_set *) es;
1047  struct timeval tv_tmp = *tv;
1048  int stat;
1049 
1050  dmsg(D_EVENT_WAIT, "SE_WAIT_FAST maxfd=%d tv=%" PRIi64 "/%ld",
1051  ses->maxfd,
1052  (int64_t)tv_tmp.tv_sec,
1053  (long)tv_tmp.tv_usec);
1054 
1055  stat = select(ses->maxfd + 1, &ses->readfds, &ses->writefds, NULL, &tv_tmp);
1056 
1057  if (stat > 0)
1058  {
1059  stat = se_wait_return(ses, &ses->readfds, &ses->writefds, out, outlen);
1060  }
1061 
1062  return stat;
1063 }
1064 
1065 static int
1066 se_wait_scalable(struct event_set *es, const struct timeval *tv, struct event_set_return *out, int outlen)
1067 {
1068  struct se_set *ses = (struct se_set *) es;
1069  struct timeval tv_tmp = *tv;
1070  fd_set read = ses->readfds;
1071  fd_set write = ses->writefds;
1072  int stat;
1073 
1074  dmsg(D_EVENT_WAIT, "SE_WAIT_SCALEABLE maxfd=%d tv=%" PRIi64 "/%ld",
1075  ses->maxfd, (int64_t)tv_tmp.tv_sec, (long)tv_tmp.tv_usec);
1076 
1077  stat = select(ses->maxfd + 1, &read, &write, NULL, &tv_tmp);
1078 
1079  if (stat > 0)
1080  {
1081  stat = se_wait_return(ses, &read, &write, out, outlen);
1082  }
1083 
1084  return stat;
1085 }
1086 
1087 static struct event_set *
1088 se_init(int *maxevents, unsigned int flags)
1089 {
1090  struct se_set *ses;
1091 
1092  dmsg(D_EVENT_WAIT, "SE_INIT maxevents=%d flags=0x%08x", *maxevents, flags);
1093 
1094  ALLOC_OBJ_CLEAR(ses, struct se_set);
1095 
1096  /* set dispatch functions */
1097  ses->func.free = se_free;
1098  ses->func.reset = se_reset;
1099  ses->func.del = se_del;
1100  ses->func.ctl = se_ctl;
1101  ses->func.wait = se_wait_scalable;
1102 
1103  if (flags & EVENT_METHOD_FAST)
1104  {
1105  ses->fast = true;
1106  ses->func.wait = se_wait_fast;
1107  }
1108 
1109  /* Select needs to be passed this value + 1 */
1110  ses->maxfd = -1;
1111 
1112  /* Set our event capacity */
1113  ASSERT(*maxevents > 0);
1114  *maxevents = min_int(*maxevents, SELECT_MAX_FDS);
1115  ses->capacity = SELECT_MAX_FDS;
1116 
1117  /* Allocate space for event_set_return void * args */
1118  ALLOC_ARRAY_CLEAR(ses->args, void *, ses->capacity);
1119 
1120  return (struct event_set *) ses;
1121 }
1122 #endif /* SELECT */
1123 
1124 static struct event_set *
1125 event_set_init_simple(int *maxevents, unsigned int flags)
1126 {
1127  struct event_set *ret = NULL;
1128 #ifdef _WIN32
1129  ret = we_init(maxevents, flags);
1130 #elif POLL && SELECT
1131 #if 0 /* Define to 1 if EVENT_METHOD_US_TIMEOUT should cause select to be favored over poll */
1132  if (flags & EVENT_METHOD_US_TIMEOUT)
1133  {
1134  ret = se_init(maxevents, flags);
1135  }
1136 #endif
1137 #ifdef SELECT_PREFERRED_OVER_POLL
1138  if (!ret)
1139  {
1140  ret = se_init(maxevents, flags);
1141  }
1142  if (!ret)
1143  {
1144  ret = po_init(maxevents, flags);
1145  }
1146 #else /* ifdef SELECT_PREFERRED_OVER_POLL */
1147  if (!ret)
1148  {
1149  ret = po_init(maxevents, flags);
1150  }
1151  if (!ret)
1152  {
1153  ret = se_init(maxevents, flags);
1154  }
1155 #endif
1156 #elif POLL
1157  ret = po_init(maxevents, flags);
1158 #elif SELECT
1159  ret = se_init(maxevents, flags);
1160 #else /* ifdef _WIN32 */
1161 #error At least one of poll, select, or WSAWaitForMultipleEvents must be supported by the kernel
1162 #endif /* ifdef _WIN32 */
1163  ASSERT(ret);
1164  return ret;
1165 }
1166 
1167 static struct event_set *
1168 event_set_init_scalable(int *maxevents, unsigned int flags)
1169 {
1170  struct event_set *ret = NULL;
1171 #if EPOLL
1172  ret = ep_init(maxevents, flags);
1173  if (!ret)
1174  {
1175  msg(M_WARN, "Note: sys_epoll API is unavailable, falling back to poll/select API");
1176  ret = event_set_init_simple(maxevents, flags);
1177  }
1178 #else /* if EPOLL */
1179  ret = event_set_init_simple(maxevents, flags);
1180 #endif
1181  ASSERT(ret);
1182  return ret;
1183 }
1184 
1185 struct event_set *
1186 event_set_init(int *maxevents, unsigned int flags)
1187 {
1188  if (flags & EVENT_METHOD_FAST)
1189  {
1190  return event_set_init_simple(maxevents, flags);
1191  }
1192  else
1193  {
1194  return event_set_init_scalable(maxevents, flags);
1195  }
1196 }
event_set_functions::del
void(* del)(struct event_set *es, event_t event)
Definition: event.h:106
we_del_event
static void we_del_event(struct we_set *wes, event_t event)
Definition: event.c:149
rw_handle::read
HANDLE read
Definition: win32.h:80
ALLOC_ARRAY_CLEAR
#define ALLOC_ARRAY_CLEAR(dptr, type, n)
Definition: buffer.h:1081
error.h
M_ERRNO
#define M_ERRNO
Definition: error.h:94
M_FATAL
#define M_FATAL
Definition: error.h:89
EVENT_METHOD_US_TIMEOUT
#define EVENT_METHOD_US_TIMEOUT
Definition: event.h:82
es
struct env_set * es
Definition: test_pkcs11.c:133
event_set_functions::ctl
void(* ctl)(struct event_set *es, event_t event, unsigned int rwflags, void *arg)
Definition: event.h:107
event_set_functions::reset
void(* reset)(struct event_set *es)
Definition: event.h:105
pos
static int pos(char c)
Definition: base64.c:105
dmsg
#define dmsg(flags,...)
Definition: error.h:148
fdmisc.h
event_set_functions
Definition: event.h:102
D_EVENT_ERRORS
#define D_EVENT_ERRORS
Definition: errlevel.h:66
EVENT_READ
#define EVENT_READ
Definition: event.h:39
we_set::n_events
int n_events
Definition: event.c:90
we_del_index
static void we_del_index(struct we_set *wes, int index)
Definition: event.c:174
event_set_functions::wait
int(* wait)(struct event_set *es, const struct timeval *tv, struct event_set_return *out, int outlen)
Definition: event.h:115
CLEAR
#define CLEAR(x)
Definition: basic.h:33
we_get_rw_indices
static void we_get_rw_indices(struct we_set *wes, event_t event, int *ri, int *wi)
Definition: event.c:187
we_set::capacity
int capacity
Definition: event.c:91
ASSERT
#define ASSERT(x)
Definition: error.h:195
read
@ read
Definition: interactive.c:218
write
@ write
Definition: interactive.c:219
event_set_return::arg
void * arg
Definition: event.h:121
we_set
Definition: event.c:84
M_WARN
#define M_WARN
Definition: error.h:91
we_init
static struct event_set * we_init(int *maxevents, unsigned int flags)
Definition: event.c:483
openvpn_fd_set
static void openvpn_fd_set(socket_descriptor_t fd, fd_set *setp)
Definition: fdmisc.h:40
we_set::esr
struct event_set_return * esr
Definition: event.c:89
M_ERR
#define M_ERR
Definition: error.h:105
event_set_init_simple
static struct event_set * event_set_init_simple(int *maxevents, unsigned int flags)
Definition: event.c:1125
we_set::fast
bool fast
Definition: event.c:87
D_EVENT_WAIT
#define D_EVENT_WAIT
Definition: errlevel.h:162
event.h
EVENT_METHOD_FAST
#define EVENT_METHOD_FAST
Definition: event.h:83
buffer.h
syshead.h
we_del
static void we_del(struct event_set *es, event_t event)
Definition: event.c:225
SELECT_MAX_FDS
#define SELECT_MAX_FDS
Definition: event.c:66
we_free
static void we_free(struct event_set *es)
Definition: event.c:208
we_reset
static void we_reset(struct event_set *es)
Definition: event.c:217
check_debug_level
static bool check_debug_level(unsigned int level)
Definition: error.h:220
set_cloexec
void set_cloexec(socket_descriptor_t fd)
Definition: fdmisc.c:79
event_set
Definition: event.h:124
we_set::func
struct event_set_functions func
Definition: event.c:86
we_set::events
HANDLE * events
Definition: event.c:88
event_set_init_scalable
static struct event_set * event_set_init_scalable(int *maxevents, unsigned int flags)
Definition: event.c:1168
max_int
static int max_int(int x, int y)
Definition: integer.h:76
status
static SERVICE_STATUS status
Definition: interactive.c:53
ptr_type
unsigned long ptr_type
Definition: common.h:58
rw_handle::write
HANDLE write
Definition: win32.h:81
min_int
static int min_int(int x, int y)
Definition: integer.h:89
event_set_init
struct event_set * event_set_init(int *maxevents, unsigned int flags)
Definition: event.c:1186
event_set_return
Definition: event.h:118
rw_handle
Definition: win32.h:79
ALLOC_OBJ_CLEAR
#define ALLOC_OBJ_CLEAR(dptr, type)
Definition: buffer.h:1065
tv_to_ms_timeout
static int tv_to_ms_timeout(const struct timeval *tv)
Definition: event.c:70
config.h
we_wait
static int we_wait(struct event_set *es, const struct timeval *tv, struct event_set_return *out, int outlen)
Definition: event.c:385
we_append_event
static bool we_append_event(struct we_set *wes, event_t event, unsigned int rwflags, void *arg)
Definition: event.c:119
EVENT_WRITE
#define EVENT_WRITE
Definition: event.h:40
event_set_return::rwflags
unsigned int rwflags
Definition: event.h:120
memdbg.h
we_set_event
static void we_set_event(struct we_set *wes, int i, event_t event, unsigned int rwflags, void *arg)
Definition: event.c:95
event_set_functions::free
void(* free)(struct event_set *es)
Definition: event.h:104
msg
#define msg(flags,...)
Definition: error.h:144
integer.h
ptr_format
#define ptr_format
Definition: common.h:49
we_ctl
static void we_ctl(struct event_set *es, event_t event, unsigned int rwflags, void *arg)
Definition: event.c:233