summaryrefslogtreecommitdiff
path: root/libs/libevent/src
diff options
context:
space:
mode:
authorKirill Volinsky <mataes2007@gmail.com>2016-03-23 10:06:12 +0000
committerKirill Volinsky <mataes2007@gmail.com>2016-03-23 10:06:12 +0000
commit04f670ac098e07fe1cf5770d0d77e77f3b756a6b (patch)
treea26cb7ca4da13bbb2312a92c078df2e338d44145 /libs/libevent/src
parent7e8aa70c724e6b72817cba090b7b178fd7f86341 (diff)
libevent moved to libs folder
telegram not compiled yet git-svn-id: http://svn.miranda-ng.org/main/trunk@16524 1316c22d-e87f-b044-9b9b-93d7a3e3ba9c
Diffstat (limited to 'libs/libevent/src')
-rw-r--r--libs/libevent/src/WIN32-Code/getopt.c149
-rw-r--r--libs/libevent/src/WIN32-Code/getopt.h33
-rw-r--r--libs/libevent/src/WIN32-Code/getopt_long.c233
-rw-r--r--libs/libevent/src/WIN32-Code/nmake/evconfig-private.h6
-rw-r--r--libs/libevent/src/WIN32-Code/nmake/event2/event-config.h360
-rw-r--r--libs/libevent/src/WIN32-Code/tree.h677
-rw-r--r--libs/libevent/src/arc4random.c556
-rw-r--r--libs/libevent/src/buffer.c3439
-rw-r--r--libs/libevent/src/buffer_iocp.c326
-rw-r--r--libs/libevent/src/bufferevent-internal.h480
-rw-r--r--libs/libevent/src/bufferevent.c1016
-rw-r--r--libs/libevent/src/bufferevent_async.c686
-rw-r--r--libs/libevent/src/bufferevent_filter.c555
-rw-r--r--libs/libevent/src/bufferevent_openssl.c1484
-rw-r--r--libs/libevent/src/bufferevent_pair.c360
-rw-r--r--libs/libevent/src/bufferevent_ratelim.c1092
-rw-r--r--libs/libevent/src/bufferevent_sock.c707
-rw-r--r--libs/libevent/src/changelist-internal.h102
-rw-r--r--libs/libevent/src/compat/sys/queue.h488
-rw-r--r--libs/libevent/src/defer-internal.h70
-rw-r--r--libs/libevent/src/epolltable-internal.h1166
-rw-r--r--libs/libevent/src/evbuffer-internal.h351
-rw-r--r--libs/libevent/src/evdns.c4761
-rw-r--r--libs/libevent/src/event-internal.h479
-rw-r--r--libs/libevent/src/event.c3940
-rw-r--r--libs/libevent/src/event_iocp.c294
-rw-r--r--libs/libevent/src/event_tagging.c605
-rw-r--r--libs/libevent/src/evmap-internal.h117
-rw-r--r--libs/libevent/src/evmap.c1055
-rw-r--r--libs/libevent/src/evrpc-internal.h205
-rw-r--r--libs/libevent/src/evrpc.c1171
-rw-r--r--libs/libevent/src/evsignal-internal.h65
-rw-r--r--libs/libevent/src/evthread-internal.h392
-rw-r--r--libs/libevent/src/evthread.c509
-rw-r--r--libs/libevent/src/evthread_win32.c341
-rw-r--r--libs/libevent/src/evutil.c2667
-rw-r--r--libs/libevent/src/evutil_rand.c206
-rw-r--r--libs/libevent/src/evutil_time.c538
-rw-r--r--libs/libevent/src/ht-internal.h487
-rw-r--r--libs/libevent/src/http-internal.h200
-rw-r--r--libs/libevent/src/http.c4892
-rw-r--r--libs/libevent/src/iocp-internal.h201
-rw-r--r--libs/libevent/src/ipv6-internal.h83
-rw-r--r--libs/libevent/src/listener.c889
-rw-r--r--libs/libevent/src/log-internal.h83
-rw-r--r--libs/libevent/src/log.c253
-rw-r--r--libs/libevent/src/minheap-internal.h188
-rw-r--r--libs/libevent/src/mm-internal.h87
-rw-r--r--libs/libevent/src/ratelim-internal.h105
-rw-r--r--libs/libevent/src/signal.c479
-rw-r--r--libs/libevent/src/strlcpy-internal.h22
-rw-r--r--libs/libevent/src/strlcpy.c75
-rw-r--r--libs/libevent/src/time-internal.h98
-rw-r--r--libs/libevent/src/util-internal.h485
-rw-r--r--libs/libevent/src/win32select.c388
55 files changed, 40696 insertions, 0 deletions
diff --git a/libs/libevent/src/WIN32-Code/getopt.c b/libs/libevent/src/WIN32-Code/getopt.c
new file mode 100644
index 0000000000..0fcba5d915
--- /dev/null
+++ b/libs/libevent/src/WIN32-Code/getopt.c
@@ -0,0 +1,149 @@
+/* $NetBSD: getopt.c,v 1.16 1999/12/02 13:15:56 kleink Exp $ */
+
+/*
+ * Copyright (c) 1987, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS
+ * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if 0
+static char sccsid[] = "@(#)getopt.c 8.3 (Berkeley) 4/27/95";
+#endif
+
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+
+#define __P(x) x
+#define _DIAGASSERT(x) assert(x)
+
+#ifdef __weak_alias
+__weak_alias(getopt,_getopt);
+#endif
+
+
+int opterr = 1, /* if error message should be printed */
+ optind = 1, /* index into parent argv vector */
+ optopt, /* character checked for validity */
+ optreset; /* reset getopt */
+char *optarg; /* argument associated with option */
+
+static char * _progname __P((char *));
+int getopt_internal __P((int, char * const *, const char *));
+
+static char *
+_progname(nargv0)
+ char * nargv0;
+{
+ char * tmp;
+
+ _DIAGASSERT(nargv0 != NULL);
+
+ tmp = strrchr(nargv0, '/');
+ if (tmp)
+ tmp++;
+ else
+ tmp = nargv0;
+ return(tmp);
+}
+
+#define BADCH (int)'?'
+#define BADARG (int)':'
+#define EMSG ""
+
+/*
+ * getopt --
+ * Parse argc/argv argument vector.
+ */
+int
+getopt(nargc, nargv, ostr)
+ int nargc;
+ char * const nargv[];
+ const char *ostr;
+{
+ static char *__progname = 0;
+ static char *place = EMSG; /* option letter processing */
+ char *oli; /* option letter list index */
+ __progname = __progname?__progname:_progname(*nargv);
+
+ _DIAGASSERT(nargv != NULL);
+ _DIAGASSERT(ostr != NULL);
+
+ if (optreset || !*place) { /* update scanning pointer */
+ optreset = 0;
+ if (optind >= nargc || *(place = nargv[optind]) != '-') {
+ place = EMSG;
+ return (-1);
+ }
+ if (place[1] && *++place == '-' /* found "--" */
+ && place[1] == '\0') {
+ ++optind;
+ place = EMSG;
+ return (-1);
+ }
+ } /* option letter okay? */
+ if ((optopt = (int)*place++) == (int)':' ||
+ !(oli = strchr(ostr, optopt))) {
+ /*
+ * if the user didn't specify '-' as an option,
+ * assume it means -1.
+ */
+ if (optopt == (int)'-')
+ return (-1);
+ if (!*place)
+ ++optind;
+ if (opterr && *ostr != ':')
+ (void)fprintf(stderr,
+ "%s: illegal option -- %c\n", __progname, optopt);
+ return (BADCH);
+ }
+ if (*++oli != ':') { /* don't need argument */
+ optarg = NULL;
+ if (!*place)
+ ++optind;
+ }
+ else { /* need an argument */
+ if (*place) /* no white space */
+ optarg = place;
+ else if (nargc <= ++optind) { /* no arg */
+ place = EMSG;
+ if (*ostr == ':')
+ return (BADARG);
+ if (opterr)
+ (void)fprintf(stderr,
+ "%s: option requires an argument -- %c\n",
+ __progname, optopt);
+ return (BADCH);
+ }
+ else /* white space */
+ optarg = nargv[optind];
+ place = EMSG;
+ ++optind;
+ }
+ return (optopt); /* dump back option letter */
+}
+
diff --git a/libs/libevent/src/WIN32-Code/getopt.h b/libs/libevent/src/WIN32-Code/getopt.h
new file mode 100644
index 0000000000..796f455050
--- /dev/null
+++ b/libs/libevent/src/WIN32-Code/getopt.h
@@ -0,0 +1,33 @@
+#ifndef __GETOPT_H__
+#define __GETOPT_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern int opterr; /* if error message should be printed */
+extern int optind; /* index into parent argv vector */
+extern int optopt; /* character checked for validity */
+extern int optreset; /* reset getopt */
+extern char *optarg; /* argument associated with option */
+
+struct option
+{
+ const char *name;
+ int has_arg;
+ int *flag;
+ int val;
+};
+
+#define no_argument 0
+#define required_argument 1
+#define optional_argument 2
+
+int getopt(int, char**, const char*);
+int getopt_long(int, char**, const char*, const struct option*, int*);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __GETOPT_H__ */
diff --git a/libs/libevent/src/WIN32-Code/getopt_long.c b/libs/libevent/src/WIN32-Code/getopt_long.c
new file mode 100644
index 0000000000..03f0c01a15
--- /dev/null
+++ b/libs/libevent/src/WIN32-Code/getopt_long.c
@@ -0,0 +1,233 @@
+
+/*
+ * Copyright (c) 1987, 1993, 1994, 1996
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS
+ * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "getopt.h"
+
+extern int opterr; /* if error message should be printed */
+extern int optind; /* index into parent argv vector */
+extern int optopt; /* character checked for validity */
+extern int optreset; /* reset getopt */
+extern char *optarg; /* argument associated with option */
+
+#define __P(x) x
+#define _DIAGASSERT(x) assert(x)
+
+static char * __progname __P((char *));
+int getopt_internal __P((int, char * const *, const char *));
+
+static char *
+__progname(nargv0)
+ char * nargv0;
+{
+ char * tmp;
+
+ _DIAGASSERT(nargv0 != NULL);
+
+ tmp = strrchr(nargv0, '/');
+ if (tmp)
+ tmp++;
+ else
+ tmp = nargv0;
+ return(tmp);
+}
+
+#define BADCH (int)'?'
+#define BADARG (int)':'
+#define EMSG ""
+
+/*
+ * getopt --
+ * Parse argc/argv argument vector.
+ */
+int
+getopt_internal(nargc, nargv, ostr)
+ int nargc;
+ char * const *nargv;
+ const char *ostr;
+{
+ static char *place = EMSG; /* option letter processing */
+ char *oli; /* option letter list index */
+
+ _DIAGASSERT(nargv != NULL);
+ _DIAGASSERT(ostr != NULL);
+
+ if (optreset || !*place) { /* update scanning pointer */
+ optreset = 0;
+ if (optind >= nargc || *(place = nargv[optind]) != '-') {
+ place = EMSG;
+ return (-1);
+ }
+ if (place[1] && *++place == '-') { /* found "--" */
+ /* ++optind; */
+ place = EMSG;
+ return (-2);
+ }
+ } /* option letter okay? */
+ if ((optopt = (int)*place++) == (int)':' ||
+ !(oli = strchr(ostr, optopt))) {
+ /*
+ * if the user didn't specify '-' as an option,
+ * assume it means -1.
+ */
+ if (optopt == (int)'-')
+ return (-1);
+ if (!*place)
+ ++optind;
+ if (opterr && *ostr != ':')
+ (void)fprintf(stderr,
+ "%s: illegal option -- %c\n", __progname(nargv[0]), optopt);
+ return (BADCH);
+ }
+ if (*++oli != ':') { /* don't need argument */
+ optarg = NULL;
+ if (!*place)
+ ++optind;
+ } else { /* need an argument */
+ if (*place) /* no white space */
+ optarg = place;
+ else if (nargc <= ++optind) { /* no arg */
+ place = EMSG;
+ if ((opterr) && (*ostr != ':'))
+ (void)fprintf(stderr,
+ "%s: option requires an argument -- %c\n",
+ __progname(nargv[0]), optopt);
+ return (BADARG);
+ } else /* white space */
+ optarg = nargv[optind];
+ place = EMSG;
+ ++optind;
+ }
+ return (optopt); /* dump back option letter */
+}
+
+#if 0
+/*
+ * getopt --
+ * Parse argc/argv argument vector.
+ */
+int
+getopt2(nargc, nargv, ostr)
+ int nargc;
+ char * const *nargv;
+ const char *ostr;
+{
+ int retval;
+
+ if ((retval = getopt_internal(nargc, nargv, ostr)) == -2) {
+ retval = -1;
+ ++optind;
+ }
+ return(retval);
+}
+#endif
+
+/*
+ * getopt_long --
+ * Parse argc/argv argument vector.
+ */
+int
+getopt_long(nargc, nargv, options, long_options, index)
+ int nargc;
+ char ** nargv;
+ const char * options;
+ const struct option * long_options;
+ int * index;
+{
+ int retval;
+
+ _DIAGASSERT(nargv != NULL);
+ _DIAGASSERT(options != NULL);
+ _DIAGASSERT(long_options != NULL);
+ /* index may be NULL */
+
+ if ((retval = getopt_internal(nargc, nargv, options)) == -2) {
+ char *current_argv = nargv[optind++] + 2, *has_equal;
+ int i, current_argv_len, match = -1;
+
+ if (*current_argv == '\0') {
+ return(-1);
+ }
+ if ((has_equal = strchr(current_argv, '=')) != NULL) {
+ current_argv_len = has_equal - current_argv;
+ has_equal++;
+ } else
+ current_argv_len = strlen(current_argv);
+
+ for (i = 0; long_options[i].name; i++) {
+ if (strncmp(current_argv, long_options[i].name, current_argv_len))
+ continue;
+
+ if (strlen(long_options[i].name) == (unsigned)current_argv_len) {
+ match = i;
+ break;
+ }
+ if (match == -1)
+ match = i;
+ }
+ if (match != -1) {
+ if (long_options[match].has_arg == required_argument ||
+ long_options[match].has_arg == optional_argument) {
+ if (has_equal)
+ optarg = has_equal;
+ else
+ optarg = nargv[optind++];
+ }
+ if ((long_options[match].has_arg == required_argument)
+ && (optarg == NULL)) {
+ /*
+ * Missing argument, leading :
+ * indicates no error should be generated
+ */
+ if ((opterr) && (*options != ':'))
+ (void)fprintf(stderr,
+ "%s: option requires an argument -- %s\n",
+ __progname(nargv[0]), current_argv);
+ return (BADARG);
+ }
+ } else { /* No matching argument */
+ if ((opterr) && (*options != ':'))
+ (void)fprintf(stderr,
+ "%s: illegal option -- %s\n", __progname(nargv[0]), current_argv);
+ return (BADCH);
+ }
+ if (long_options[match].flag) {
+ *long_options[match].flag = long_options[match].val;
+ retval = 0;
+ } else
+ retval = long_options[match].val;
+ if (index)
+ *index = match;
+ }
+ return(retval);
+}
diff --git a/libs/libevent/src/WIN32-Code/nmake/evconfig-private.h b/libs/libevent/src/WIN32-Code/nmake/evconfig-private.h
new file mode 100644
index 0000000000..88e206272b
--- /dev/null
+++ b/libs/libevent/src/WIN32-Code/nmake/evconfig-private.h
@@ -0,0 +1,6 @@
+#if !defined(EVENT_EVCONFIG__PRIVATE_H_) && !defined(__MINGW32__)
+#define EVENT_EVCONFIG__PRIVATE_H_
+
+/* Nothing to see here. Move along. */
+
+#endif
diff --git a/libs/libevent/src/WIN32-Code/nmake/event2/event-config.h b/libs/libevent/src/WIN32-Code/nmake/event2/event-config.h
new file mode 100644
index 0000000000..8cbf190289
--- /dev/null
+++ b/libs/libevent/src/WIN32-Code/nmake/event2/event-config.h
@@ -0,0 +1,360 @@
+/* event2/event-config.h
+ *
+ * This file was generated by autoconf when libevent was built, and post-
+ * processed by Libevent so that its macros would have a uniform prefix.
+ *
+ * DO NOT EDIT THIS FILE.
+ *
+ * Do not rely on macros in this file existing in later versions.
+ */
+#ifndef EVENT_CONFIG_H__
+#define EVENT_CONFIG_H__
+/* config.h. Generated by configure. */
+/* config.h.in. Generated from configure.in by autoheader. */
+
+/* Define if libevent should not allow replacing the mm functions */
+/* #undef EVENT__DISABLE_MM_REPLACEMENT */
+
+/* Define if libevent should not be compiled with thread support */
+/* #undef EVENT__DISABLE_THREAD_SUPPORT */
+
+/* Define if clock_gettime is available in libc */
+/* #undef _EVENT_DNS_USE_CPU_CLOCK_FOR_ID */
+
+/* Define is no secure id variant is available */
+/* #define _EVENT_DNS_USE_GETTIMEOFDAY_FOR_ID 1 */
+#define EVENT_DNS_USE_FTIME_FOR_ID_ 1
+
+/* Define to 1 if you have the <arpa/inet.h> header file. */
+/* #undef EVENT__HAVE_ARPA_INET_H */
+
+/* Define to 1 if you have the `clock_gettime' function. */
+/* #undef EVENT__HAVE_CLOCK_GETTIME */
+
+/* Define if /dev/poll is available */
+/* #undef EVENT__HAVE_DEVPOLL */
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+/* #undef EVENT__HAVE_DLFCN_H */
+
+/* Define if your system supports the epoll system calls */
+/* #undef EVENT__HAVE_EPOLL */
+
+/* Define to 1 if you have the `epoll_ctl' function. */
+/* #undef EVENT__HAVE_EPOLL_CTL */
+
+/* Define to 1 if you have the `eventfd' function. */
+/* #undef EVENT__HAVE_EVENTFD */
+
+/* Define if your system supports event ports */
+/* #undef EVENT__HAVE_EVENT_PORTS */
+
+/* Define to 1 if you have the `fcntl' function. */
+/* #undef EVENT__HAVE_FCNTL */
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#define EVENT__HAVE_FCNTL_H 1
+
+/* Define to 1 if you have the `getaddrinfo' function. */
+#define EVENT__HAVE_GETADDRINFO 1
+
+/* Define to 1 if you have the `getnameinfo' function. */
+#define EVENT__HAVE_GETNAMEINFO 1
+
+/* Define to 1 if you have the `getprotobynumber' function. */
+#define EVENT__HAVE_GETPROTOBYNUMBER 1
+
+/* Define to 1 if you have the `getservbyname' function. */
+#define EVENT__HAVE_GETSERVBYNAME 1
+
+/* Define to 1 if you have the `gettimeofday' function. */
+/* #define EVENT__HAVE_GETTIMEOFDAY 1 */
+
+/* Define to 1 if you have the `inet_ntop' function. */
+/* #undef EVENT__HAVE_INET_NTOP */
+
+/* Define to 1 if you have the `inet_pton' function. */
+/* #undef EVENT__HAVE_INET_PTON */
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+/* #define EVENT__HAVE_INTTYPES_H 1 */
+
+/* Define to 1 if you have the `kqueue' function. */
+/* #undef EVENT__HAVE_KQUEUE */
+
+/* Define if the system has zlib */
+/* #undef EVENT__HAVE_LIBZ */
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define EVENT__HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the `mmap' function. */
+/* #undef EVENT__HAVE_MMAP */
+
+/* Define to 1 if you have the <netinet/in6.h> header file. */
+/* #undef EVENT__HAVE_NETINET_IN6_H */
+
+/* Define to 1 if you have the <netinet/in.h> header file. */
+/* #undef EVENT__HAVE_NETINET_IN_H */
+
+/* Define to 1 if you have the `pipe' function. */
+/* #undef EVENT__HAVE_PIPE */
+
+/* Define to 1 if you have the `poll' function. */
+/* #undef EVENT__HAVE_POLL */
+
+/* Define to 1 if you have the <poll.h> header file. */
+/* #undef EVENT__HAVE_POLL_H */
+
+/* Define to 1 if you have the `port_create' function. */
+/* #undef EVENT__HAVE_PORT_CREATE */
+
+/* Define to 1 if you have the <port.h> header file. */
+/* #undef EVENT__HAVE_PORT_H */
+
+/* Define if you have POSIX threads libraries and header files. */
+/* #undef EVENT__HAVE_PTHREAD */
+
+/* Define if we have pthreads on this system */
+/* #undef EVENT__HAVE_PTHREADS */
+
+/* Define to 1 if the system has the type `sa_family_t'. */
+/* #undef EVENT__HAVE_SA_FAMILY_T */
+
+/* Define to 1 if you have the `select' function. */
+/* #undef EVENT__HAVE_SELECT */
+
+/* Define to 1 if you have the `sendfile' function. */
+/* #undef EVENT__HAVE_SENDFILE */
+
+/* Define if F_SETFD is defined in <fcntl.h> */
+/* #undef EVENT__HAVE_SETFD */
+
+/* Define to 1 if you have the `sigaction' function. */
+/* #undef EVENT__HAVE_SIGACTION */
+
+/* Define to 1 if you have the `signal' function. */
+#define EVENT__HAVE_SIGNAL 1
+
+/* Define to 1 if you have the `splice' function. */
+/* #undef EVENT__HAVE_SPLICE */
+
+/* Define to 1 if you have the <stdarg.h> header file. */
+#define EVENT__HAVE_STDARG_H 1
+
+/* Define to 1 if you have the <stddef.h> header file. */
+#define EVENT__HAVE_STDDEF_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+/* #define EVENT__HAVE_STDINT_H 1 */
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define EVENT__HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define EVENT__HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define EVENT__HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strlcpy' function. */
+/* #undef EVENT__HAVE_STRLCPY */
+
+/* Define to 1 if you have the `strsep' function. */
+/* #undef EVENT__HAVE_STRSEP */
+
+/* Define to 1 if you have the `strtok_r' function. */
+/* #undef EVENT__HAVE_STRTOK_R */
+
+/* Define to 1 if you have the `strtoll' function. */
+/* #define EVENT__HAVE_STRTOLL 1 */
+
+#define EVENT__HAVE_STRUCT_ADDRINFO 1
+
+/* Define to 1 if the system has the type `struct in6_addr'. */
+#define EVENT__HAVE_STRUCT_IN6_ADDR 1
+
+/* Define to 1 if `s6_addr16' is member of `struct in6_addr'. */
+#define EVENT__HAVE_STRUCT_IN6_ADDR_S6_ADDR16 1
+
+/* Define to 1 if `s6_addr32' is member of `struct in6_addr'. */
+#define EVENT__HAVE_STRUCT_IN6_ADDR_S6_ADDR32 1
+
+/* Define to 1 if the system has the type `struct sockaddr_in6'. */
+#define EVENT__HAVE_STRUCT_SOCKADDR_IN6 1
+
+/* Define to 1 if `sin6_len' is member of `struct sockaddr_in6'. */
+/* #undef EVENT__HAVE_STRUCT_SOCKADDR_IN6_SIN6_LEN */
+
+/* Define to 1 if `sin_len' is member of `struct sockaddr_in'. */
+/* #undef EVENT__HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */
+
+/* Define to 1 if the system has the type `struct sockaddr_storage'. */
+#define EVENT__HAVE_STRUCT_SOCKADDR_STORAGE 1
+
+/* Define to 1 if you have the <sys/devpoll.h> header file. */
+/* #undef EVENT__HAVE_SYS_DEVPOLL_H */
+
+/* Define to 1 if you have the <sys/epoll.h> header file. */
+/* #undef EVENT__HAVE_SYS_EPOLL_H */
+
+/* Define to 1 if you have the <sys/eventfd.h> header file. */
+/* #undef EVENT__HAVE_SYS_EVENTFD_H */
+
+/* Define to 1 if you have the <sys/event.h> header file. */
+/* #undef EVENT__HAVE_SYS_EVENT_H */
+
+/* Define to 1 if you have the <sys/ioctl.h> header file. */
+/* #undef EVENT__HAVE_SYS_IOCTL_H */
+
+/* Define to 1 if you have the <sys/mman.h> header file. */
+/* #undef EVENT__HAVE_SYS_MMAN_H */
+
+/* Define to 1 if you have the <sys/param.h> header file. */
+/* #define EVENT__HAVE_SYS_PARAM_H 1 */
+
+/* Define to 1 if you have the <sys/queue.h> header file. */
+/* #undef EVENT__HAVE_SYS_QUEUE_H */
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+/* #undef EVENT__HAVE_SYS_SELECT_H */
+
+/* Define to 1 if you have the <sys/sendfile.h> header file. */
+/* #undef EVENT__HAVE_SYS_SENDFILE_H */
+
+/* Define to 1 if you have the <sys/socket.h> header file. */
+/* #undef EVENT__HAVE_SYS_SOCKET_H */
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define EVENT__HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+/* #define EVENT__HAVE_SYS_TIME_H 1 */
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define EVENT__HAVE_SYS_TYPES_H 1
+
+/* Define to 1 if you have the <sys/uio.h> header file. */
+/* #undef EVENT__HAVE_SYS_UIO_H */
+
+/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
+/* #undef EVENT__HAVE_TAILQFOREACH */
+
+/* Define if timeradd is defined in <sys/time.h> */
+/* #undef EVENT__HAVE_TIMERADD */
+
+/* Define if timerclear is defined in <sys/time.h> */
+#define EVENT__HAVE_TIMERCLEAR 1
+
+/* Define if timercmp is defined in <sys/time.h> */
+#define EVENT__HAVE_TIMERCMP 1
+
+/* Define if timerisset is defined in <sys/time.h> */
+#define EVENT__HAVE_TIMERISSET 1
+
+/* Define to 1 if the system has the type `uint16_t'. */
+/* #define EVENT__HAVE_UINT16_T 1 */
+
+/* Define to 1 if the system has the type `uint32_t'. */
+/* #define EVENT__HAVE_UINT32_T 1 */
+
+/* Define to 1 if the system has the type `uint64_t'. */
+/* #define EVENT__HAVE_UINT64_T 1 */
+
+/* Define to 1 if the system has the type `uint8_t'. */
+/* #define EVENT__HAVE_UINT8_T 1 */
+
+/* Define to 1 if you have the <unistd.h> header file. */
+/* #define EVENT__HAVE_UNISTD_H 1 */
+
+/* Define to 1 if you have the `vasprintf' function. */
+/* #undef EVENT__HAVE_VASPRINTF */
+
+/* Define if kqueue works correctly with pipes */
+/* #undef EVENT__HAVE_WORKING_KQUEUE */
+
+/* Numeric representation of the version */
+#define EVENT__NUMERIC_VERSION 0x02010500
+
+/* Name of package */
+#define EVENT__PACKAGE "libevent"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define EVENT__PACKAGE_BUGREPORT ""
+
+/* Define to the full name of this package. */
+#define EVENT__PACKAGE_NAME ""
+
+/* Define to the full name and version of this package. */
+#define EVENT__PACKAGE_STRING ""
+
+/* Define to the one symbol short name of this package. */
+#define EVENT__PACKAGE_TARNAME ""
+
+/* Define to the version of this package. */
+#define EVENT__PACKAGE_VERSION ""
+
+/* Define to necessary symbol if this constant uses a non-standard name on
+ your system. */
+/* #undef EVENT__PTHREAD_CREATE_JOINABLE */
+
+/* The size of a `int', as computed by sizeof. */
+#define EVENT__SIZEOF_INT 4
+
+/* The size of a `long', as computed by sizeof. */
+#define EVENT__SIZEOF_LONG 4
+
+/* The size of a `long long', as computed by sizeof. */
+#define EVENT__SIZEOF_LONG_LONG 8
+
+/* The size of a `short', as computed by sizeof. */
+#define EVENT__SIZEOF_SHORT 2
+
+/* The size of `size_t', as computed by sizeof. */
+#ifdef _WIN64
+#define EVENT__SIZEOF_SIZE_T 8
+#else
+#define EVENT__SIZEOF_SIZE_T 4
+#endif
+
+/* The size of `void *', as computed by sizeof. */
+#ifdef _WIN64
+#define EVENT__SIZEOF_VOID_P 8
+#else
+#define EVENT__SIZEOF_VOID_P 4
+#endif
+
+/* Define to 1 if you have the ANSI C header files. */
+#define EVENT__STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+#define EVENT__TIME_WITH_SYS_TIME 1
+
+/* Version number of package */
+#define EVENT__VERSION "2.1.5-beta"
+
+/* Define to appropriate substitue if compiler doesnt have __func__ */
+#define EVENT____func__ __FUNCTION__
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef EVENT__const */
+
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+ calls it, or to nothing if 'inline' is not supported under any name. */
+#ifndef _EVENT___cplusplus
+#define EVENT__inline __inline
+#endif
+
+/* Define to `int' if <sys/types.h> does not define. */
+/* #undef EVENT__pid_t */
+
+/* Define to `unsigned' if <sys/types.h> does not define. */
+/* #undef EVENT__size_t */
+
+/* Define to unsigned int if you dont have it */
+#define EVENT__socklen_t unsigned int
+
+/* Define to `int' if <sys/types.h> does not define. */
+#define EVENT__ssize_t SSIZE_T
+
+#endif
diff --git a/libs/libevent/src/WIN32-Code/tree.h b/libs/libevent/src/WIN32-Code/tree.h
new file mode 100644
index 0000000000..2ccfbf20ac
--- /dev/null
+++ b/libs/libevent/src/WIN32-Code/tree.h
@@ -0,0 +1,677 @@
+/* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */
+/*
+ * Copyright 2002 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SYS_TREE_H_
+#define _SYS_TREE_H_
+
+/*
+ * This file defines data structures for different types of trees:
+ * splay trees and red-black trees.
+ *
+ * A splay tree is a self-organizing data structure. Every operation
+ * on the tree causes a splay to happen. The splay moves the requested
+ * node to the root of the tree and partly rebalances it.
+ *
+ * This has the benefit that request locality causes faster lookups as
+ * the requested nodes move to the top of the tree. On the other hand,
+ * every lookup causes memory writes.
+ *
+ * The Balance Theorem bounds the total access time for m operations
+ * and n inserts on an initially empty tree as O((m + n)lg n). The
+ * amortized cost for a sequence of m accesses to a splay tree is O(lg n);
+ *
+ * A red-black tree is a binary search tree with the node color as an
+ * extra attribute. It fulfills a set of conditions:
+ * - every search path from the root to a leaf consists of the
+ * same number of black nodes,
+ * - each red node (except for the root) has a black parent,
+ * - each leaf node is black.
+ *
+ * Every operation on a red-black tree is bounded as O(lg n).
+ * The maximum height of a red-black tree is 2lg (n+1).
+ */
+
+#define SPLAY_HEAD(name, type) \
+struct name { \
+ struct type *sph_root; /* root of the tree */ \
+}
+
+#define SPLAY_INITIALIZER(root) \
+ { NULL }
+
+#define SPLAY_INIT(root) do { \
+ (root)->sph_root = NULL; \
+} while (0)
+
+#define SPLAY_ENTRY(type) \
+struct { \
+ struct type *spe_left; /* left element */ \
+ struct type *spe_right; /* right element */ \
+}
+
+#define SPLAY_LEFT(elm, field) (elm)->field.spe_left
+#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right
+#define SPLAY_ROOT(head) (head)->sph_root
+#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL)
+
+/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */
+#define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \
+ SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \
+ SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
+ (head)->sph_root = tmp; \
+} while (0)
+
+#define SPLAY_ROTATE_LEFT(head, tmp, field) do { \
+ SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \
+ SPLAY_LEFT(tmp, field) = (head)->sph_root; \
+ (head)->sph_root = tmp; \
+} while (0)
+
+#define SPLAY_LINKLEFT(head, tmp, field) do { \
+ SPLAY_LEFT(tmp, field) = (head)->sph_root; \
+ tmp = (head)->sph_root; \
+ (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
+} while (0)
+
+#define SPLAY_LINKRIGHT(head, tmp, field) do { \
+ SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
+ tmp = (head)->sph_root; \
+ (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
+} while (0)
+
+#define SPLAY_ASSEMBLE(head, node, left, right, field) do { \
+ SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \
+ SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);\
+ SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \
+ SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \
+} while (0)
+
+/* Generates prototypes and inline functions */
+
+#define SPLAY_PROTOTYPE(name, type, field, cmp) \
+void name##_SPLAY(struct name *, struct type *); \
+void name##_SPLAY_MINMAX(struct name *, int); \
+struct type *name##_SPLAY_INSERT(struct name *, struct type *); \
+struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \
+ \
+/* Finds the node with the same key as elm */ \
+static __inline struct type * \
+name##_SPLAY_FIND(struct name *head, struct type *elm) \
+{ \
+ if (SPLAY_EMPTY(head)) \
+ return(NULL); \
+ name##_SPLAY(head, elm); \
+ if ((cmp)(elm, (head)->sph_root) == 0) \
+ return (head->sph_root); \
+ return (NULL); \
+} \
+ \
+static __inline struct type * \
+name##_SPLAY_NEXT(struct name *head, struct type *elm) \
+{ \
+ name##_SPLAY(head, elm); \
+ if (SPLAY_RIGHT(elm, field) != NULL) { \
+ elm = SPLAY_RIGHT(elm, field); \
+ while (SPLAY_LEFT(elm, field) != NULL) { \
+ elm = SPLAY_LEFT(elm, field); \
+ } \
+ } else \
+ elm = NULL; \
+ return (elm); \
+} \
+ \
+static __inline struct type * \
+name##_SPLAY_MIN_MAX(struct name *head, int val) \
+{ \
+ name##_SPLAY_MINMAX(head, val); \
+ return (SPLAY_ROOT(head)); \
+}
+
+/* Main splay operation.
+ * Moves node close to the key of elm to top
+ */
+#define SPLAY_GENERATE(name, type, field, cmp) \
+struct type * \
+name##_SPLAY_INSERT(struct name *head, struct type *elm) \
+{ \
+ if (SPLAY_EMPTY(head)) { \
+ SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \
+ } else { \
+ int __comp; \
+ name##_SPLAY(head, elm); \
+ __comp = (cmp)(elm, (head)->sph_root); \
+ if(__comp < 0) { \
+ SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field);\
+ SPLAY_RIGHT(elm, field) = (head)->sph_root; \
+ SPLAY_LEFT((head)->sph_root, field) = NULL; \
+ } else if (__comp > 0) { \
+ SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field);\
+ SPLAY_LEFT(elm, field) = (head)->sph_root; \
+ SPLAY_RIGHT((head)->sph_root, field) = NULL; \
+ } else \
+ return ((head)->sph_root); \
+ } \
+ (head)->sph_root = (elm); \
+ return (NULL); \
+} \
+ \
+struct type * \
+name##_SPLAY_REMOVE(struct name *head, struct type *elm) \
+{ \
+ struct type *__tmp; \
+ if (SPLAY_EMPTY(head)) \
+ return (NULL); \
+ name##_SPLAY(head, elm); \
+ if ((cmp)(elm, (head)->sph_root) == 0) { \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \
+ (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);\
+ } else { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ (head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\
+ name##_SPLAY(head, elm); \
+ SPLAY_RIGHT((head)->sph_root, field) = __tmp; \
+ } \
+ return (elm); \
+ } \
+ return (NULL); \
+} \
+ \
+void \
+name##_SPLAY(struct name *head, struct type *elm) \
+{ \
+ struct type __node, *__left, *__right, *__tmp; \
+ int __comp; \
+\
+ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
+ __left = __right = &__node; \
+\
+ while ((__comp = (cmp)(elm, (head)->sph_root))) { \
+ if (__comp < 0) { \
+ __tmp = SPLAY_LEFT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if ((cmp)(elm, __tmp) < 0){ \
+ SPLAY_ROTATE_RIGHT(head, __tmp, field); \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKLEFT(head, __right, field); \
+ } else if (__comp > 0) { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if ((cmp)(elm, __tmp) > 0){ \
+ SPLAY_ROTATE_LEFT(head, __tmp, field); \
+ if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKRIGHT(head, __left, field); \
+ } \
+ } \
+ SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
+} \
+ \
+/* Splay with either the minimum or the maximum element \
+ * Used to find minimum or maximum element in tree. \
+ */ \
+void name##_SPLAY_MINMAX(struct name *head, int __comp) \
+{ \
+ struct type __node, *__left, *__right, *__tmp; \
+\
+ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
+ __left = __right = &__node; \
+\
+ while (1) { \
+ if (__comp < 0) { \
+ __tmp = SPLAY_LEFT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if (__comp < 0){ \
+ SPLAY_ROTATE_RIGHT(head, __tmp, field); \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKLEFT(head, __right, field); \
+ } else if (__comp > 0) { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if (__comp > 0) { \
+ SPLAY_ROTATE_LEFT(head, __tmp, field); \
+ if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKRIGHT(head, __left, field); \
+ } \
+ } \
+ SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
+}
+
+#define SPLAY_NEGINF -1
+#define SPLAY_INF 1
+
+#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y)
+#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y)
+#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y)
+#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y)
+#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \
+ : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF))
+#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \
+ : name##_SPLAY_MIN_MAX(x, SPLAY_INF))
+
+#define SPLAY_FOREACH(x, name, head) \
+ for ((x) = SPLAY_MIN(name, head); \
+ (x) != NULL; \
+ (x) = SPLAY_NEXT(name, head, x))
+
+/* Macros that define a red-back tree */
+#define RB_HEAD(name, type) \
+struct name { \
+ struct type *rbh_root; /* root of the tree */ \
+}
+
+#define RB_INITIALIZER(root) \
+ { NULL }
+
+#define RB_INIT(root) do { \
+ (root)->rbh_root = NULL; \
+} while (0)
+
+#define RB_BLACK 0
+#define RB_RED 1
+#define RB_ENTRY(type) \
+struct { \
+ struct type *rbe_left; /* left element */ \
+ struct type *rbe_right; /* right element */ \
+ struct type *rbe_parent; /* parent element */ \
+ int rbe_color; /* node color */ \
+}
+
+#define RB_LEFT(elm, field) (elm)->field.rbe_left
+#define RB_RIGHT(elm, field) (elm)->field.rbe_right
+#define RB_PARENT(elm, field) (elm)->field.rbe_parent
+#define RB_COLOR(elm, field) (elm)->field.rbe_color
+#define RB_ROOT(head) (head)->rbh_root
+#define RB_EMPTY(head) (RB_ROOT(head) == NULL)
+
+#define RB_SET(elm, parent, field) do { \
+ RB_PARENT(elm, field) = parent; \
+ RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \
+ RB_COLOR(elm, field) = RB_RED; \
+} while (0)
+
+#define RB_SET_BLACKRED(black, red, field) do { \
+ RB_COLOR(black, field) = RB_BLACK; \
+ RB_COLOR(red, field) = RB_RED; \
+} while (0)
+
+#ifndef RB_AUGMENT
+#define RB_AUGMENT(x)
+#endif
+
+#define RB_ROTATE_LEFT(head, elm, tmp, field) do { \
+ (tmp) = RB_RIGHT(elm, field); \
+ if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field))) { \
+ RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \
+ } \
+ RB_AUGMENT(elm); \
+ if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field))) { \
+ if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
+ RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
+ else \
+ RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
+ } else \
+ (head)->rbh_root = (tmp); \
+ RB_LEFT(tmp, field) = (elm); \
+ RB_PARENT(elm, field) = (tmp); \
+ RB_AUGMENT(tmp); \
+ if ((RB_PARENT(tmp, field))) \
+ RB_AUGMENT(RB_PARENT(tmp, field)); \
+} while (0)
+
+#define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \
+ (tmp) = RB_LEFT(elm, field); \
+ if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field))) { \
+ RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \
+ } \
+ RB_AUGMENT(elm); \
+ if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field))) { \
+ if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
+ RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
+ else \
+ RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
+ } else \
+ (head)->rbh_root = (tmp); \
+ RB_RIGHT(tmp, field) = (elm); \
+ RB_PARENT(elm, field) = (tmp); \
+ RB_AUGMENT(tmp); \
+ if ((RB_PARENT(tmp, field))) \
+ RB_AUGMENT(RB_PARENT(tmp, field)); \
+} while (0)
+
+/* Generates prototypes and inline functions */
+#define RB_PROTOTYPE(name, type, field, cmp) \
+void name##_RB_INSERT_COLOR(struct name *, struct type *); \
+void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\
+struct type *name##_RB_REMOVE(struct name *, struct type *); \
+struct type *name##_RB_INSERT(struct name *, struct type *); \
+struct type *name##_RB_FIND(struct name *, struct type *); \
+struct type *name##_RB_NEXT(struct type *); \
+struct type *name##_RB_MINMAX(struct name *, int); \
+ \
+
+/* Main rb operation.
+ * Moves node close to the key of elm to top
+ */
+#define RB_GENERATE(name, type, field, cmp) \
+void \
+name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \
+{ \
+ struct type *parent, *gparent, *tmp; \
+ while ((parent = RB_PARENT(elm, field)) && \
+ RB_COLOR(parent, field) == RB_RED) { \
+ gparent = RB_PARENT(parent, field); \
+ if (parent == RB_LEFT(gparent, field)) { \
+ tmp = RB_RIGHT(gparent, field); \
+ if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
+ RB_COLOR(tmp, field) = RB_BLACK; \
+ RB_SET_BLACKRED(parent, gparent, field);\
+ elm = gparent; \
+ continue; \
+ } \
+ if (RB_RIGHT(parent, field) == elm) { \
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ tmp = parent; \
+ parent = elm; \
+ elm = tmp; \
+ } \
+ RB_SET_BLACKRED(parent, gparent, field); \
+ RB_ROTATE_RIGHT(head, gparent, tmp, field); \
+ } else { \
+ tmp = RB_LEFT(gparent, field); \
+ if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
+ RB_COLOR(tmp, field) = RB_BLACK; \
+ RB_SET_BLACKRED(parent, gparent, field);\
+ elm = gparent; \
+ continue; \
+ } \
+ if (RB_LEFT(parent, field) == elm) { \
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ tmp = parent; \
+ parent = elm; \
+ elm = tmp; \
+ } \
+ RB_SET_BLACKRED(parent, gparent, field); \
+ RB_ROTATE_LEFT(head, gparent, tmp, field); \
+ } \
+ } \
+ RB_COLOR(head->rbh_root, field) = RB_BLACK; \
+} \
+ \
+void \
+name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, struct type *elm) \
+{ \
+ struct type *tmp; \
+ while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \
+ elm != RB_ROOT(head)) { \
+ if (RB_LEFT(parent, field) == elm) { \
+ tmp = RB_RIGHT(parent, field); \
+ if (RB_COLOR(tmp, field) == RB_RED) { \
+ RB_SET_BLACKRED(tmp, parent, field); \
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ tmp = RB_RIGHT(parent, field); \
+ } \
+ if ((RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\
+ (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\
+ RB_COLOR(tmp, field) = RB_RED; \
+ elm = parent; \
+ parent = RB_PARENT(elm, field); \
+ } else { \
+ if (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) {\
+ struct type *oleft; \
+ if ((oleft = RB_LEFT(tmp, field)))\
+ RB_COLOR(oleft, field) = RB_BLACK;\
+ RB_COLOR(tmp, field) = RB_RED; \
+ RB_ROTATE_RIGHT(head, tmp, oleft, field);\
+ tmp = RB_RIGHT(parent, field); \
+ } \
+ RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
+ RB_COLOR(parent, field) = RB_BLACK; \
+ if (RB_RIGHT(tmp, field)) \
+ RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK;\
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ elm = RB_ROOT(head); \
+ break; \
+ } \
+ } else { \
+ tmp = RB_LEFT(parent, field); \
+ if (RB_COLOR(tmp, field) == RB_RED) { \
+ RB_SET_BLACKRED(tmp, parent, field); \
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ tmp = RB_LEFT(parent, field); \
+ } \
+ if ((RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\
+ (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\
+ RB_COLOR(tmp, field) = RB_RED; \
+ elm = parent; \
+ parent = RB_PARENT(elm, field); \
+ } else { \
+ if (RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) {\
+ struct type *oright; \
+ if ((oright = RB_RIGHT(tmp, field)))\
+ RB_COLOR(oright, field) = RB_BLACK;\
+ RB_COLOR(tmp, field) = RB_RED; \
+ RB_ROTATE_LEFT(head, tmp, oright, field);\
+ tmp = RB_LEFT(parent, field); \
+ } \
+ RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
+ RB_COLOR(parent, field) = RB_BLACK; \
+ if (RB_LEFT(tmp, field)) \
+ RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK;\
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ elm = RB_ROOT(head); \
+ break; \
+ } \
+ } \
+ } \
+ if (elm) \
+ RB_COLOR(elm, field) = RB_BLACK; \
+} \
+ \
+struct type * \
+name##_RB_REMOVE(struct name *head, struct type *elm) \
+{ \
+ struct type *child, *parent, *old = elm; \
+ int color; \
+ if (RB_LEFT(elm, field) == NULL) \
+ child = RB_RIGHT(elm, field); \
+ else if (RB_RIGHT(elm, field) == NULL) \
+ child = RB_LEFT(elm, field); \
+ else { \
+ struct type *left; \
+ elm = RB_RIGHT(elm, field); \
+ while ((left = RB_LEFT(elm, field))) \
+ elm = left; \
+ child = RB_RIGHT(elm, field); \
+ parent = RB_PARENT(elm, field); \
+ color = RB_COLOR(elm, field); \
+ if (child) \
+ RB_PARENT(child, field) = parent; \
+ if (parent) { \
+ if (RB_LEFT(parent, field) == elm) \
+ RB_LEFT(parent, field) = child; \
+ else \
+ RB_RIGHT(parent, field) = child; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = child; \
+ if (RB_PARENT(elm, field) == old) \
+ parent = elm; \
+ (elm)->field = (old)->field; \
+ if (RB_PARENT(old, field)) { \
+ if (RB_LEFT(RB_PARENT(old, field), field) == old)\
+ RB_LEFT(RB_PARENT(old, field), field) = elm;\
+ else \
+ RB_RIGHT(RB_PARENT(old, field), field) = elm;\
+ RB_AUGMENT(RB_PARENT(old, field)); \
+ } else \
+ RB_ROOT(head) = elm; \
+ RB_PARENT(RB_LEFT(old, field), field) = elm; \
+ if (RB_RIGHT(old, field)) \
+ RB_PARENT(RB_RIGHT(old, field), field) = elm; \
+ if (parent) { \
+ left = parent; \
+ do { \
+ RB_AUGMENT(left); \
+ } while ((left = RB_PARENT(left, field))); \
+ } \
+ goto color; \
+ } \
+ parent = RB_PARENT(elm, field); \
+ color = RB_COLOR(elm, field); \
+ if (child) \
+ RB_PARENT(child, field) = parent; \
+ if (parent) { \
+ if (RB_LEFT(parent, field) == elm) \
+ RB_LEFT(parent, field) = child; \
+ else \
+ RB_RIGHT(parent, field) = child; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = child; \
+color: \
+ if (color == RB_BLACK) \
+ name##_RB_REMOVE_COLOR(head, parent, child); \
+ return (old); \
+} \
+ \
+/* Inserts a node into the RB tree */ \
+struct type * \
+name##_RB_INSERT(struct name *head, struct type *elm) \
+{ \
+ struct type *tmp; \
+ struct type *parent = NULL; \
+ int comp = 0; \
+ tmp = RB_ROOT(head); \
+ while (tmp) { \
+ parent = tmp; \
+ comp = (cmp)(elm, parent); \
+ if (comp < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else if (comp > 0) \
+ tmp = RB_RIGHT(tmp, field); \
+ else \
+ return (tmp); \
+ } \
+ RB_SET(elm, parent, field); \
+ if (parent != NULL) { \
+ if (comp < 0) \
+ RB_LEFT(parent, field) = elm; \
+ else \
+ RB_RIGHT(parent, field) = elm; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = elm; \
+ name##_RB_INSERT_COLOR(head, elm); \
+ return (NULL); \
+} \
+ \
+/* Finds the node with the same key as elm */ \
+struct type * \
+name##_RB_FIND(struct name *head, struct type *elm) \
+{ \
+ struct type *tmp = RB_ROOT(head); \
+ int comp; \
+ while (tmp) { \
+ comp = cmp(elm, tmp); \
+ if (comp < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else if (comp > 0) \
+ tmp = RB_RIGHT(tmp, field); \
+ else \
+ return (tmp); \
+ } \
+ return (NULL); \
+} \
+ \
+struct type * \
+name##_RB_NEXT(struct type *elm) \
+{ \
+ if (RB_RIGHT(elm, field)) { \
+ elm = RB_RIGHT(elm, field); \
+ while (RB_LEFT(elm, field)) \
+ elm = RB_LEFT(elm, field); \
+ } else { \
+ if (RB_PARENT(elm, field) && \
+ (elm == RB_LEFT(RB_PARENT(elm, field), field))) \
+ elm = RB_PARENT(elm, field); \
+ else { \
+ while (RB_PARENT(elm, field) && \
+ (elm == RB_RIGHT(RB_PARENT(elm, field), field)))\
+ elm = RB_PARENT(elm, field); \
+ elm = RB_PARENT(elm, field); \
+ } \
+ } \
+ return (elm); \
+} \
+ \
+struct type * \
+name##_RB_MINMAX(struct name *head, int val) \
+{ \
+ struct type *tmp = RB_ROOT(head); \
+ struct type *parent = NULL; \
+ while (tmp) { \
+ parent = tmp; \
+ if (val < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else \
+ tmp = RB_RIGHT(tmp, field); \
+ } \
+ return (parent); \
+}
+
+#define RB_NEGINF -1
+#define RB_INF 1
+
+#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y)
+#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y)
+#define RB_FIND(name, x, y) name##_RB_FIND(x, y)
+#define RB_NEXT(name, x, y) name##_RB_NEXT(y)
+#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF)
+#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF)
+
+#define RB_FOREACH(x, name, head) \
+ for ((x) = RB_MIN(name, head); \
+ (x) != NULL; \
+ (x) = name##_RB_NEXT(x))
+
+#endif /* _SYS_TREE_H_ */
diff --git a/libs/libevent/src/arc4random.c b/libs/libevent/src/arc4random.c
new file mode 100644
index 0000000000..a2338e692a
--- /dev/null
+++ b/libs/libevent/src/arc4random.c
@@ -0,0 +1,556 @@
+/* Portable arc4random.c based on arc4random.c from OpenBSD.
+ * Portable version by Chris Davis, adapted for Libevent by Nick Mathewson
+ * Copyright (c) 2010 Chris Davis, Niels Provos, and Nick Mathewson
+ * Copyright (c) 2010-2012 Niels Provos and Nick Mathewson
+ *
+ * Note that in Libevent, this file isn't compiled directly. Instead,
+ * it's included from evutil_rand.c
+ */
+
+/*
+ * Copyright (c) 1996, David Mazieres <dm@uun.org>
+ * Copyright (c) 2008, Damien Miller <djm@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Arc4 random number generator for OpenBSD.
+ *
+ * This code is derived from section 17.1 of Applied Cryptography,
+ * second edition, which describes a stream cipher allegedly
+ * compatible with RSA Labs "RC4" cipher (the actual description of
+ * which is a trade secret). The same algorithm is used as a stream
+ * cipher called "arcfour" in Tatu Ylonen's ssh package.
+ *
+ * Here the stream cipher has been modified always to include the time
+ * when initializing the state. That makes it impossible to
+ * regenerate the same random sequence twice, so this can't be used
+ * for encryption, but will generate good random numbers.
+ *
+ * RC4 is a registered trademark of RSA Laboratories.
+ */
+
+#ifndef ARC4RANDOM_EXPORT
+#define ARC4RANDOM_EXPORT
+#endif
+
+#ifndef ARC4RANDOM_UINT32
+#define ARC4RANDOM_UINT32 uint32_t
+#endif
+
+#ifndef ARC4RANDOM_NO_INCLUDES
+#include "evconfig-private.h"
+#ifdef _WIN32
+#include <wincrypt.h>
+#include <process.h>
+#else
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/param.h>
+#include <sys/time.h>
+#ifdef EVENT__HAVE_SYS_SYSCTL_H
+#include <sys/sysctl.h>
+#endif
+#endif
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+/* Add platform entropy 32 bytes (256 bits) at a time. */
+#define ADD_ENTROPY 32
+
+/* Re-seed from the platform RNG after generating this many bytes. */
+#define BYTES_BEFORE_RESEED 1600000
+
+struct arc4_stream {
+ unsigned char i;
+ unsigned char j;
+ unsigned char s[256];
+};
+
+#ifdef _WIN32
+#define getpid _getpid
+#define pid_t int
+#endif
+
+static int rs_initialized;
+static struct arc4_stream rs;
+static pid_t arc4_stir_pid;
+static int arc4_count;
+static int arc4_seeded_ok;
+
+static inline unsigned char arc4_getbyte(void);
+
+static inline void
+arc4_init(void)
+{
+ int n;
+
+ for (n = 0; n < 256; n++)
+ rs.s[n] = n;
+ rs.i = 0;
+ rs.j = 0;
+}
+
+static inline void
+arc4_addrandom(const unsigned char *dat, int datlen)
+{
+ int n;
+ unsigned char si;
+
+ rs.i--;
+ for (n = 0; n < 256; n++) {
+ rs.i = (rs.i + 1);
+ si = rs.s[rs.i];
+ rs.j = (rs.j + si + dat[n % datlen]);
+ rs.s[rs.i] = rs.s[rs.j];
+ rs.s[rs.j] = si;
+ }
+ rs.j = rs.i;
+}
+
+#ifndef _WIN32
+static ssize_t
+read_all(int fd, unsigned char *buf, size_t count)
+{
+ size_t numread = 0;
+ ssize_t result;
+
+ while (numread < count) {
+ result = read(fd, buf+numread, count-numread);
+ if (result<0)
+ return -1;
+ else if (result == 0)
+ break;
+ numread += result;
+ }
+
+ return (ssize_t)numread;
+}
+#endif
+
+#ifdef _WIN32
+#define TRY_SEED_WIN32
+static int
+arc4_seed_win32(void)
+{
+ /* This is adapted from Tor's crypto_seed_rng() */
+ static int provider_set = 0;
+ static HCRYPTPROV provider;
+ unsigned char buf[ADD_ENTROPY];
+
+ if (!provider_set) {
+ if (!CryptAcquireContext(&provider, NULL, NULL, PROV_RSA_FULL,
+ CRYPT_VERIFYCONTEXT)) {
+ if (GetLastError() != (DWORD)NTE_BAD_KEYSET)
+ return -1;
+ }
+ provider_set = 1;
+ }
+ if (!CryptGenRandom(provider, sizeof(buf), buf))
+ return -1;
+ arc4_addrandom(buf, sizeof(buf));
+ evutil_memclear_(buf, sizeof(buf));
+ arc4_seeded_ok = 1;
+ return 0;
+}
+#endif
+
+#if defined(EVENT__HAVE_SYS_SYSCTL_H) && defined(EVENT__HAVE_SYSCTL)
+#if EVENT__HAVE_DECL_CTL_KERN && EVENT__HAVE_DECL_KERN_RANDOM && EVENT__HAVE_DECL_RANDOM_UUID
+#define TRY_SEED_SYSCTL_LINUX
+static int
+arc4_seed_sysctl_linux(void)
+{
+ /* Based on code by William Ahern, this function tries to use the
+ * RANDOM_UUID sysctl to get entropy from the kernel. This can work
+ * even if /dev/urandom is inaccessible for some reason (e.g., we're
+ * running in a chroot). */
+ int mib[] = { CTL_KERN, KERN_RANDOM, RANDOM_UUID };
+ unsigned char buf[ADD_ENTROPY];
+ size_t len, n;
+ unsigned i;
+ int any_set;
+
+ memset(buf, 0, sizeof(buf));
+
+ for (len = 0; len < sizeof(buf); len += n) {
+ n = sizeof(buf) - len;
+
+ if (0 != sysctl(mib, 3, &buf[len], &n, NULL, 0))
+ return -1;
+ }
+ /* make sure that the buffer actually got set. */
+ for (i=0,any_set=0; i<sizeof(buf); ++i) {
+ any_set |= buf[i];
+ }
+ if (!any_set)
+ return -1;
+
+ arc4_addrandom(buf, sizeof(buf));
+ evutil_memclear_(buf, sizeof(buf));
+ arc4_seeded_ok = 1;
+ return 0;
+}
+#endif
+
+#if EVENT__HAVE_DECL_CTL_KERN && EVENT__HAVE_DECL_KERN_ARND
+#define TRY_SEED_SYSCTL_BSD
+static int
+arc4_seed_sysctl_bsd(void)
+{
+ /* Based on code from William Ahern and from OpenBSD, this function
+ * tries to use the KERN_ARND syscall to get entropy from the kernel.
+ * This can work even if /dev/urandom is inaccessible for some reason
+ * (e.g., we're running in a chroot). */
+ int mib[] = { CTL_KERN, KERN_ARND };
+ unsigned char buf[ADD_ENTROPY];
+ size_t len, n;
+ int i, any_set;
+
+ memset(buf, 0, sizeof(buf));
+
+ len = sizeof(buf);
+ if (sysctl(mib, 2, buf, &len, NULL, 0) == -1) {
+ for (len = 0; len < sizeof(buf); len += sizeof(unsigned)) {
+ n = sizeof(unsigned);
+ if (n + len > sizeof(buf))
+ n = len - sizeof(buf);
+ if (sysctl(mib, 2, &buf[len], &n, NULL, 0) == -1)
+ return -1;
+ }
+ }
+ /* make sure that the buffer actually got set. */
+ for (i=any_set=0; i<sizeof(buf); ++i) {
+ any_set |= buf[i];
+ }
+ if (!any_set)
+ return -1;
+
+ arc4_addrandom(buf, sizeof(buf));
+ evutil_memclear_(buf, sizeof(buf));
+ arc4_seeded_ok = 1;
+ return 0;
+}
+#endif
+#endif /* defined(EVENT__HAVE_SYS_SYSCTL_H) */
+
+#ifdef __linux__
+#define TRY_SEED_PROC_SYS_KERNEL_RANDOM_UUID
+static int
+arc4_seed_proc_sys_kernel_random_uuid(void)
+{
+ /* Occasionally, somebody will make /proc/sys accessible in a chroot,
+ * but not /dev/urandom. Let's try /proc/sys/kernel/random/uuid.
+ * Its format is stupid, so we need to decode it from hex.
+ */
+ int fd;
+ char buf[128];
+ unsigned char entropy[64];
+ int bytes, n, i, nybbles;
+ for (bytes = 0; bytes<ADD_ENTROPY; ) {
+ fd = evutil_open_closeonexec_("/proc/sys/kernel/random/uuid", O_RDONLY, 0);
+ if (fd < 0)
+ return -1;
+ n = read(fd, buf, sizeof(buf));
+ close(fd);
+ if (n<=0)
+ return -1;
+ memset(entropy, 0, sizeof(entropy));
+ for (i=nybbles=0; i<n; ++i) {
+ if (EVUTIL_ISXDIGIT_(buf[i])) {
+ int nyb = evutil_hex_char_to_int_(buf[i]);
+ if (nybbles & 1) {
+ entropy[nybbles/2] |= nyb;
+ } else {
+ entropy[nybbles/2] |= nyb<<4;
+ }
+ ++nybbles;
+ }
+ }
+ if (nybbles < 2)
+ return -1;
+ arc4_addrandom(entropy, nybbles/2);
+ bytes += nybbles/2;
+ }
+ evutil_memclear_(entropy, sizeof(entropy));
+ evutil_memclear_(buf, sizeof(buf));
+ arc4_seeded_ok = 1;
+ return 0;
+}
+#endif
+
+#ifndef _WIN32
+#define TRY_SEED_URANDOM
+static char *arc4random_urandom_filename = NULL;
+
+static int arc4_seed_urandom_helper_(const char *fname)
+{
+ unsigned char buf[ADD_ENTROPY];
+ int fd;
+ size_t n;
+
+ fd = evutil_open_closeonexec_(fname, O_RDONLY, 0);
+ if (fd<0)
+ return -1;
+ n = read_all(fd, buf, sizeof(buf));
+ close(fd);
+ if (n != sizeof(buf))
+ return -1;
+ arc4_addrandom(buf, sizeof(buf));
+ evutil_memclear_(buf, sizeof(buf));
+ arc4_seeded_ok = 1;
+ return 0;
+}
+
+static int
+arc4_seed_urandom(void)
+{
+ /* This is adapted from Tor's crypto_seed_rng() */
+ static const char *filenames[] = {
+ "/dev/srandom", "/dev/urandom", "/dev/random", NULL
+ };
+ int i;
+ if (arc4random_urandom_filename)
+ return arc4_seed_urandom_helper_(arc4random_urandom_filename);
+
+ for (i = 0; filenames[i]; ++i) {
+ if (arc4_seed_urandom_helper_(filenames[i]) == 0) {
+ return 0;
+ }
+ }
+
+ return -1;
+}
+#endif
+
+static int
+arc4_seed(void)
+{
+ int ok = 0;
+ /* We try every method that might work, and don't give up even if one
+ * does seem to work. There's no real harm in over-seeding, and if
+ * one of these sources turns out to be broken, that would be bad. */
+#ifdef TRY_SEED_WIN32
+ if (0 == arc4_seed_win32())
+ ok = 1;
+#endif
+#ifdef TRY_SEED_URANDOM
+ if (0 == arc4_seed_urandom())
+ ok = 1;
+#endif
+#ifdef TRY_SEED_PROC_SYS_KERNEL_RANDOM_UUID
+ if (arc4random_urandom_filename == NULL &&
+ 0 == arc4_seed_proc_sys_kernel_random_uuid())
+ ok = 1;
+#endif
+#ifdef TRY_SEED_SYSCTL_LINUX
+ /* Apparently Linux is deprecating sysctl, and spewing warning
+ * messages when you try to use it. */
+ if (!ok && 0 == arc4_seed_sysctl_linux())
+ ok = 1;
+#endif
+#ifdef TRY_SEED_SYSCTL_BSD
+ if (0 == arc4_seed_sysctl_bsd())
+ ok = 1;
+#endif
+ return ok ? 0 : -1;
+}
+
+static int
+arc4_stir(void)
+{
+ int i;
+
+ if (!rs_initialized) {
+ arc4_init();
+ rs_initialized = 1;
+ }
+
+ arc4_seed();
+ if (!arc4_seeded_ok)
+ return -1;
+
+ /*
+ * Discard early keystream, as per recommendations in
+ * "Weaknesses in the Key Scheduling Algorithm of RC4" by
+ * Scott Fluhrer, Itsik Mantin, and Adi Shamir.
+ * http://www.wisdom.weizmann.ac.il/~itsik/RC4/Papers/Rc4_ksa.ps
+ *
+ * Ilya Mironov's "(Not So) Random Shuffles of RC4" suggests that
+ * we drop at least 2*256 bytes, with 12*256 as a conservative
+ * value.
+ *
+ * RFC4345 says to drop 6*256.
+ *
+ * At least some versions of this code drop 4*256, in a mistaken
+ * belief that "words" in the Fluhrer/Mantin/Shamir paper refers
+ * to processor words.
+ *
+ * We add another sect to the cargo cult, and choose 12*256.
+ */
+ for (i = 0; i < 12*256; i++)
+ (void)arc4_getbyte();
+
+ arc4_count = BYTES_BEFORE_RESEED;
+
+ return 0;
+}
+
+
+static void
+arc4_stir_if_needed(void)
+{
+ pid_t pid = getpid();
+
+ if (arc4_count <= 0 || !rs_initialized || arc4_stir_pid != pid)
+ {
+ arc4_stir_pid = pid;
+ arc4_stir();
+ }
+}
+
+static inline unsigned char
+arc4_getbyte(void)
+{
+ unsigned char si, sj;
+
+ rs.i = (rs.i + 1);
+ si = rs.s[rs.i];
+ rs.j = (rs.j + si);
+ sj = rs.s[rs.j];
+ rs.s[rs.i] = sj;
+ rs.s[rs.j] = si;
+ return (rs.s[(si + sj) & 0xff]);
+}
+
+static inline unsigned int
+arc4_getword(void)
+{
+ unsigned int val;
+
+ val = arc4_getbyte() << 24;
+ val |= arc4_getbyte() << 16;
+ val |= arc4_getbyte() << 8;
+ val |= arc4_getbyte();
+
+ return val;
+}
+
+#ifndef ARC4RANDOM_NOSTIR
+ARC4RANDOM_EXPORT int
+arc4random_stir(void)
+{
+ int val;
+ ARC4_LOCK_();
+ val = arc4_stir();
+ ARC4_UNLOCK_();
+ return val;
+}
+#endif
+
+#ifndef ARC4RANDOM_NOADDRANDOM
+ARC4RANDOM_EXPORT void
+arc4random_addrandom(const unsigned char *dat, int datlen)
+{
+ int j;
+ ARC4_LOCK_();
+ if (!rs_initialized)
+ arc4_stir();
+ for (j = 0; j < datlen; j += 256) {
+ /* arc4_addrandom() ignores all but the first 256 bytes of
+ * its input. We want to make sure to look at ALL the
+ * data in 'dat', just in case the user is doing something
+ * crazy like passing us all the files in /var/log. */
+ arc4_addrandom(dat + j, datlen - j);
+ }
+ ARC4_UNLOCK_();
+}
+#endif
+
+#ifndef ARC4RANDOM_NORANDOM
+ARC4RANDOM_EXPORT ARC4RANDOM_UINT32
+arc4random(void)
+{
+ ARC4RANDOM_UINT32 val;
+ ARC4_LOCK_();
+ arc4_count -= 4;
+ arc4_stir_if_needed();
+ val = arc4_getword();
+ ARC4_UNLOCK_();
+ return val;
+}
+#endif
+
+ARC4RANDOM_EXPORT void
+arc4random_buf(void *buf_, size_t n)
+{
+ unsigned char *buf = buf_;
+ ARC4_LOCK_();
+ arc4_stir_if_needed();
+ while (n--) {
+ if (--arc4_count <= 0)
+ arc4_stir();
+ buf[n] = arc4_getbyte();
+ }
+ ARC4_UNLOCK_();
+}
+
+#ifndef ARC4RANDOM_NOUNIFORM
+/*
+ * Calculate a uniformly distributed random number less than upper_bound
+ * avoiding "modulo bias".
+ *
+ * Uniformity is achieved by generating new random numbers until the one
+ * returned is outside the range [0, 2**32 % upper_bound). This
+ * guarantees the selected random number will be inside
+ * [2**32 % upper_bound, 2**32) which maps back to [0, upper_bound)
+ * after reduction modulo upper_bound.
+ */
+ARC4RANDOM_EXPORT unsigned int
+arc4random_uniform(unsigned int upper_bound)
+{
+ ARC4RANDOM_UINT32 r, min;
+
+ if (upper_bound < 2)
+ return 0;
+
+#if (UINT_MAX > 0xffffffffUL)
+ min = 0x100000000UL % upper_bound;
+#else
+ /* Calculate (2**32 % upper_bound) avoiding 64-bit math */
+ if (upper_bound > 0x80000000)
+ min = 1 + ~upper_bound; /* 2**32 - upper_bound */
+ else {
+ /* (2**32 - (x * 2)) % x == 2**32 % x when x <= 2**31 */
+ min = ((0xffffffff - (upper_bound * 2)) + 1) % upper_bound;
+ }
+#endif
+
+ /*
+ * This could theoretically loop forever but each retry has
+ * p > 0.5 (worst case, usually far better) of selecting a
+ * number inside the range we need, so it should rarely need
+ * to re-roll.
+ */
+ for (;;) {
+ r = arc4random();
+ if (r >= min)
+ break;
+ }
+
+ return r % upper_bound;
+}
+#endif
diff --git a/libs/libevent/src/buffer.c b/libs/libevent/src/buffer.c
new file mode 100644
index 0000000000..7cca0e8a7d
--- /dev/null
+++ b/libs/libevent/src/buffer.c
@@ -0,0 +1,3439 @@
+/*
+ * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <windows.h>
+#include <io.h>
+#endif
+
+#ifdef EVENT__HAVE_VASPRINTF
+/* If we have vasprintf, we need to define _GNU_SOURCE before we include
+ * stdio.h. This comes from evconfig-private.h.
+ */
+#endif
+
+#include <sys/types.h>
+
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#ifdef EVENT__HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+
+#ifdef EVENT__HAVE_SYS_UIO_H
+#include <sys/uio.h>
+#endif
+
+#ifdef EVENT__HAVE_SYS_IOCTL_H
+#include <sys/ioctl.h>
+#endif
+
+#ifdef EVENT__HAVE_SYS_MMAN_H
+#include <sys/mman.h>
+#endif
+
+#ifdef EVENT__HAVE_SYS_SENDFILE_H
+#include <sys/sendfile.h>
+#endif
+#ifdef EVENT__HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef EVENT__HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#include <limits.h>
+
+#include "event2/event.h"
+#include "event2/buffer.h"
+#include "event2/buffer_compat.h"
+#include "event2/bufferevent.h"
+#include "event2/bufferevent_compat.h"
+#include "event2/bufferevent_struct.h"
+#include "event2/thread.h"
+#include "log-internal.h"
+#include "mm-internal.h"
+#include "util-internal.h"
+#include "evthread-internal.h"
+#include "evbuffer-internal.h"
+#include "bufferevent-internal.h"
+
+/* some systems do not have MAP_FAILED */
+#ifndef MAP_FAILED
+#define MAP_FAILED ((void *)-1)
+#endif
+
+/* send file support */
+#if defined(EVENT__HAVE_SYS_SENDFILE_H) && defined(EVENT__HAVE_SENDFILE) && defined(__linux__)
+#define USE_SENDFILE 1
+#define SENDFILE_IS_LINUX 1
+#elif defined(EVENT__HAVE_SENDFILE) && defined(__FreeBSD__)
+#define USE_SENDFILE 1
+#define SENDFILE_IS_FREEBSD 1
+#elif defined(EVENT__HAVE_SENDFILE) && defined(__APPLE__)
+#define USE_SENDFILE 1
+#define SENDFILE_IS_MACOSX 1
+#elif defined(EVENT__HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__)
+#define USE_SENDFILE 1
+#define SENDFILE_IS_SOLARIS 1
+#endif
+
+/* Mask of user-selectable callback flags. */
+#define EVBUFFER_CB_USER_FLAGS 0xffff
+/* Mask of all internal-use-only flags. */
+#define EVBUFFER_CB_INTERNAL_FLAGS 0xffff0000
+
+/* Flag set if the callback is using the cb_obsolete function pointer */
+#define EVBUFFER_CB_OBSOLETE 0x00040000
+
+/* evbuffer_chain support */
+#define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off)
+#define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \
+ 0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off))
+
+#define CHAIN_PINNED(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0)
+#define CHAIN_PINNED_R(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0)
+
+/* evbuffer_ptr support */
+#define PTR_NOT_FOUND(ptr) do { \
+ (ptr)->pos = -1; \
+ (ptr)->internal_.chain = NULL; \
+ (ptr)->internal_.pos_in_chain = 0; \
+} while (0)
+
+static void evbuffer_chain_align(struct evbuffer_chain *chain);
+static int evbuffer_chain_should_realign(struct evbuffer_chain *chain,
+ size_t datalen);
+static void evbuffer_deferred_callback(struct event_callback *cb, void *arg);
+static int evbuffer_ptr_memcmp(const struct evbuffer *buf,
+ const struct evbuffer_ptr *pos, const char *mem, size_t len);
+static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf,
+ size_t datlen);
+static int evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos,
+ size_t howfar);
+static int evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg);
+static inline void evbuffer_chain_incref(struct evbuffer_chain *chain);
+
+static struct evbuffer_chain *
+evbuffer_chain_new(size_t size)
+{
+ struct evbuffer_chain *chain;
+ size_t to_alloc;
+
+ if (size > EVBUFFER_CHAIN_MAX - EVBUFFER_CHAIN_SIZE)
+ return (NULL);
+
+ size += EVBUFFER_CHAIN_SIZE;
+
+ /* get the next largest memory that can hold the buffer */
+ if (size < EVBUFFER_CHAIN_MAX / 2) {
+ to_alloc = MIN_BUFFER_SIZE;
+ while (to_alloc < size) {
+ to_alloc <<= 1;
+ }
+ } else {
+ to_alloc = size;
+ }
+
+ /* we get everything in one chunk */
+ if ((chain = mm_malloc(to_alloc)) == NULL)
+ return (NULL);
+
+ memset(chain, 0, EVBUFFER_CHAIN_SIZE);
+
+ chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE;
+
+ /* this way we can manipulate the buffer to different addresses,
+ * which is required for mmap for example.
+ */
+ chain->buffer = EVBUFFER_CHAIN_EXTRA(unsigned char, chain);
+
+ chain->refcnt = 1;
+
+ return (chain);
+}
+
+static inline void
+evbuffer_chain_free(struct evbuffer_chain *chain)
+{
+ EVUTIL_ASSERT(chain->refcnt > 0);
+ if (--chain->refcnt > 0) {
+ /* chain is still referenced by other chains */
+ return;
+ }
+
+ if (CHAIN_PINNED(chain)) {
+ /* will get freed once no longer dangling */
+ chain->refcnt++;
+ chain->flags |= EVBUFFER_DANGLING;
+ return;
+ }
+
+ /* safe to release chain, it's either a referencing
+ * chain or all references to it have been freed */
+ if (chain->flags & EVBUFFER_REFERENCE) {
+ struct evbuffer_chain_reference *info =
+ EVBUFFER_CHAIN_EXTRA(
+ struct evbuffer_chain_reference,
+ chain);
+ if (info->cleanupfn)
+ (*info->cleanupfn)(chain->buffer,
+ chain->buffer_len,
+ info->extra);
+ }
+ if (chain->flags & EVBUFFER_FILESEGMENT) {
+ struct evbuffer_chain_file_segment *info =
+ EVBUFFER_CHAIN_EXTRA(
+ struct evbuffer_chain_file_segment,
+ chain);
+ if (info->segment) {
+#ifdef _WIN32
+ if (info->segment->is_mapping)
+ UnmapViewOfFile(chain->buffer);
+#endif
+ evbuffer_file_segment_free(info->segment);
+ }
+ }
+ if (chain->flags & EVBUFFER_MULTICAST) {
+ struct evbuffer_multicast_parent *info =
+ EVBUFFER_CHAIN_EXTRA(
+ struct evbuffer_multicast_parent,
+ chain);
+ /* referencing chain is being freed, decrease
+ * refcounts of source chain and associated
+ * evbuffer (which get freed once both reach
+ * zero) */
+ EVUTIL_ASSERT(info->source != NULL);
+ EVUTIL_ASSERT(info->parent != NULL);
+ EVBUFFER_LOCK(info->source);
+ evbuffer_chain_free(info->parent);
+ evbuffer_decref_and_unlock_(info->source);
+ }
+
+ mm_free(chain);
+}
+
+static void
+evbuffer_free_all_chains(struct evbuffer_chain *chain)
+{
+ struct evbuffer_chain *next;
+ for (; chain; chain = next) {
+ next = chain->next;
+ evbuffer_chain_free(chain);
+ }
+}
+
+#ifndef NDEBUG
+static int
+evbuffer_chains_all_empty(struct evbuffer_chain *chain)
+{
+ for (; chain; chain = chain->next) {
+ if (chain->off)
+ return 0;
+ }
+ return 1;
+}
+#else
+/* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid
+"unused variable" warnings. */
+static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) {
+ return 1;
+}
+#endif
+
+/* Free all trailing chains in 'buf' that are neither pinned nor empty, prior
+ * to replacing them all with a new chain. Return a pointer to the place
+ * where the new chain will go.
+ *
+ * Internal; requires lock. The caller must fix up buf->last and buf->first
+ * as needed; they might have been freed.
+ */
+static struct evbuffer_chain **
+evbuffer_free_trailing_empty_chains(struct evbuffer *buf)
+{
+ struct evbuffer_chain **ch = buf->last_with_datap;
+ /* Find the first victim chain. It might be *last_with_datap */
+ while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch)))
+ ch = &(*ch)->next;
+ if (*ch) {
+ EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch));
+ evbuffer_free_all_chains(*ch);
+ *ch = NULL;
+ }
+ return ch;
+}
+
+/* Add a single chain 'chain' to the end of 'buf', freeing trailing empty
+ * chains as necessary. Requires lock. Does not schedule callbacks.
+ */
+static void
+evbuffer_chain_insert(struct evbuffer *buf,
+ struct evbuffer_chain *chain)
+{
+ ASSERT_EVBUFFER_LOCKED(buf);
+ if (*buf->last_with_datap == NULL) {
+ /* There are no chains data on the buffer at all. */
+ EVUTIL_ASSERT(buf->last_with_datap == &buf->first);
+ EVUTIL_ASSERT(buf->first == NULL);
+ buf->first = buf->last = chain;
+ } else {
+ struct evbuffer_chain **chp;
+ chp = evbuffer_free_trailing_empty_chains(buf);
+ *chp = chain;
+ if (chain->off)
+ buf->last_with_datap = chp;
+ buf->last = chain;
+ }
+ buf->total_len += chain->off;
+}
+
+static inline struct evbuffer_chain *
+evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen)
+{
+ struct evbuffer_chain *chain;
+ if ((chain = evbuffer_chain_new(datlen)) == NULL)
+ return NULL;
+ evbuffer_chain_insert(buf, chain);
+ return chain;
+}
+
+void
+evbuffer_chain_pin_(struct evbuffer_chain *chain, unsigned flag)
+{
+ EVUTIL_ASSERT((chain->flags & flag) == 0);
+ chain->flags |= flag;
+}
+
+void
+evbuffer_chain_unpin_(struct evbuffer_chain *chain, unsigned flag)
+{
+ EVUTIL_ASSERT((chain->flags & flag) != 0);
+ chain->flags &= ~flag;
+ if (chain->flags & EVBUFFER_DANGLING)
+ evbuffer_chain_free(chain);
+}
+
+static inline void
+evbuffer_chain_incref(struct evbuffer_chain *chain)
+{
+ ++chain->refcnt;
+}
+
+struct evbuffer *
+evbuffer_new(void)
+{
+ struct evbuffer *buffer;
+
+ buffer = mm_calloc(1, sizeof(struct evbuffer));
+ if (buffer == NULL)
+ return (NULL);
+
+ LIST_INIT(&buffer->callbacks);
+ buffer->refcnt = 1;
+ buffer->last_with_datap = &buffer->first;
+
+ return (buffer);
+}
+
+int
+evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags)
+{
+ EVBUFFER_LOCK(buf);
+ buf->flags |= (ev_uint32_t)flags;
+ EVBUFFER_UNLOCK(buf);
+ return 0;
+}
+
+int
+evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags)
+{
+ EVBUFFER_LOCK(buf);
+ buf->flags &= ~(ev_uint32_t)flags;
+ EVBUFFER_UNLOCK(buf);
+ return 0;
+}
+
+void
+evbuffer_incref_(struct evbuffer *buf)
+{
+ EVBUFFER_LOCK(buf);
+ ++buf->refcnt;
+ EVBUFFER_UNLOCK(buf);
+}
+
+void
+evbuffer_incref_and_lock_(struct evbuffer *buf)
+{
+ EVBUFFER_LOCK(buf);
+ ++buf->refcnt;
+}
+
+int
+evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base)
+{
+ EVBUFFER_LOCK(buffer);
+ buffer->cb_queue = base;
+ buffer->deferred_cbs = 1;
+ event_deferred_cb_init_(&buffer->deferred,
+ event_base_get_npriorities(base) / 2,
+ evbuffer_deferred_callback, buffer);
+ EVBUFFER_UNLOCK(buffer);
+ return 0;
+}
+
+int
+evbuffer_enable_locking(struct evbuffer *buf, void *lock)
+{
+#ifdef EVENT__DISABLE_THREAD_SUPPORT
+ return -1;
+#else
+ if (buf->lock)
+ return -1;
+
+ if (!lock) {
+ EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ if (!lock)
+ return -1;
+ buf->lock = lock;
+ buf->own_lock = 1;
+ } else {
+ buf->lock = lock;
+ buf->own_lock = 0;
+ }
+
+ return 0;
+#endif
+}
+
+void
+evbuffer_set_parent_(struct evbuffer *buf, struct bufferevent *bev)
+{
+ EVBUFFER_LOCK(buf);
+ buf->parent = bev;
+ EVBUFFER_UNLOCK(buf);
+}
+
+static void
+evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred)
+{
+ struct evbuffer_cb_entry *cbent, *next;
+ struct evbuffer_cb_info info;
+ size_t new_size;
+ ev_uint32_t mask, masked_val;
+ int clear = 1;
+
+ if (running_deferred) {
+ mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
+ masked_val = EVBUFFER_CB_ENABLED;
+ } else if (buffer->deferred_cbs) {
+ mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
+ masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
+ /* Don't zero-out n_add/n_del, since the deferred callbacks
+ will want to see them. */
+ clear = 0;
+ } else {
+ mask = EVBUFFER_CB_ENABLED;
+ masked_val = EVBUFFER_CB_ENABLED;
+ }
+
+ ASSERT_EVBUFFER_LOCKED(buffer);
+
+ if (LIST_EMPTY(&buffer->callbacks)) {
+ buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
+ return;
+ }
+ if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0)
+ return;
+
+ new_size = buffer->total_len;
+ info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb;
+ info.n_added = buffer->n_add_for_cb;
+ info.n_deleted = buffer->n_del_for_cb;
+ if (clear) {
+ buffer->n_add_for_cb = 0;
+ buffer->n_del_for_cb = 0;
+ }
+ for (cbent = LIST_FIRST(&buffer->callbacks);
+ cbent != LIST_END(&buffer->callbacks);
+ cbent = next) {
+ /* Get the 'next' pointer now in case this callback decides
+ * to remove itself or something. */
+ next = LIST_NEXT(cbent, next);
+
+ if ((cbent->flags & mask) != masked_val)
+ continue;
+
+ if ((cbent->flags & EVBUFFER_CB_OBSOLETE))
+ cbent->cb.cb_obsolete(buffer,
+ info.orig_size, new_size, cbent->cbarg);
+ else
+ cbent->cb.cb_func(buffer, &info, cbent->cbarg);
+ }
+}
+
+void
+evbuffer_invoke_callbacks_(struct evbuffer *buffer)
+{
+ if (LIST_EMPTY(&buffer->callbacks)) {
+ buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
+ return;
+ }
+
+ if (buffer->deferred_cbs) {
+ if (event_deferred_cb_schedule_(buffer->cb_queue, &buffer->deferred)) {
+ evbuffer_incref_and_lock_(buffer);
+ if (buffer->parent)
+ bufferevent_incref_(buffer->parent);
+ }
+ EVBUFFER_UNLOCK(buffer);
+ }
+
+ evbuffer_run_callbacks(buffer, 0);
+}
+
+static void
+evbuffer_deferred_callback(struct event_callback *cb, void *arg)
+{
+ struct bufferevent *parent = NULL;
+ struct evbuffer *buffer = arg;
+
+ /* XXXX It would be better to run these callbacks without holding the
+ * lock */
+ EVBUFFER_LOCK(buffer);
+ parent = buffer->parent;
+ evbuffer_run_callbacks(buffer, 1);
+ evbuffer_decref_and_unlock_(buffer);
+ if (parent)
+ bufferevent_decref_(parent);
+}
+
+static void
+evbuffer_remove_all_callbacks(struct evbuffer *buffer)
+{
+ struct evbuffer_cb_entry *cbent;
+
+ while ((cbent = LIST_FIRST(&buffer->callbacks))) {
+ LIST_REMOVE(cbent, next);
+ mm_free(cbent);
+ }
+}
+
+void
+evbuffer_decref_and_unlock_(struct evbuffer *buffer)
+{
+ struct evbuffer_chain *chain, *next;
+ ASSERT_EVBUFFER_LOCKED(buffer);
+
+ EVUTIL_ASSERT(buffer->refcnt > 0);
+
+ if (--buffer->refcnt > 0) {
+ EVBUFFER_UNLOCK(buffer);
+ return;
+ }
+
+ for (chain = buffer->first; chain != NULL; chain = next) {
+ next = chain->next;
+ evbuffer_chain_free(chain);
+ }
+ evbuffer_remove_all_callbacks(buffer);
+ if (buffer->deferred_cbs)
+ event_deferred_cb_cancel_(buffer->cb_queue, &buffer->deferred);
+
+ EVBUFFER_UNLOCK(buffer);
+ if (buffer->own_lock)
+ EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ mm_free(buffer);
+}
+
+void
+evbuffer_free(struct evbuffer *buffer)
+{
+ EVBUFFER_LOCK(buffer);
+ evbuffer_decref_and_unlock_(buffer);
+}
+
+void
+evbuffer_lock(struct evbuffer *buf)
+{
+ EVBUFFER_LOCK(buf);
+}
+
+void
+evbuffer_unlock(struct evbuffer *buf)
+{
+ EVBUFFER_UNLOCK(buf);
+}
+
+size_t
+evbuffer_get_length(const struct evbuffer *buffer)
+{
+ size_t result;
+
+ EVBUFFER_LOCK(buffer);
+
+ result = (buffer->total_len);
+
+ EVBUFFER_UNLOCK(buffer);
+
+ return result;
+}
+
+size_t
+evbuffer_get_contiguous_space(const struct evbuffer *buf)
+{
+ struct evbuffer_chain *chain;
+ size_t result;
+
+ EVBUFFER_LOCK(buf);
+ chain = buf->first;
+ result = (chain != NULL ? chain->off : 0);
+ EVBUFFER_UNLOCK(buf);
+
+ return result;
+}
+
+size_t
+evbuffer_add_iovec(struct evbuffer * buf, struct evbuffer_iovec * vec, int n_vec) {
+ int n;
+ size_t res;
+ size_t to_alloc;
+
+ EVBUFFER_LOCK(buf);
+
+ res = to_alloc = 0;
+
+ for (n = 0; n < n_vec; n++) {
+ to_alloc += vec[n].iov_len;
+ }
+
+ if (evbuffer_expand_fast_(buf, to_alloc, 2) < 0) {
+ goto done;
+ }
+
+ for (n = 0; n < n_vec; n++) {
+ /* XXX each 'add' call here does a bunch of setup that's
+ * obviated by evbuffer_expand_fast_, and some cleanup that we
+ * would like to do only once. Instead we should just extract
+ * the part of the code that's needed. */
+
+ if (evbuffer_add(buf, vec[n].iov_base, vec[n].iov_len) < 0) {
+ goto done;
+ }
+
+ res += vec[n].iov_len;
+ }
+
+done:
+ EVBUFFER_UNLOCK(buf);
+ return res;
+}
+
+int
+evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size,
+ struct evbuffer_iovec *vec, int n_vecs)
+{
+ struct evbuffer_chain *chain, **chainp;
+ int n = -1;
+
+ EVBUFFER_LOCK(buf);
+ if (buf->freeze_end)
+ goto done;
+ if (n_vecs < 1)
+ goto done;
+ if (n_vecs == 1) {
+ if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL)
+ goto done;
+
+ vec[0].iov_base = CHAIN_SPACE_PTR(chain);
+ vec[0].iov_len = (size_t) CHAIN_SPACE_LEN(chain);
+ EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size);
+ n = 1;
+ } else {
+ if (evbuffer_expand_fast_(buf, size, n_vecs)<0)
+ goto done;
+ n = evbuffer_read_setup_vecs_(buf, size, vec, n_vecs,
+ &chainp, 0);
+ }
+
+done:
+ EVBUFFER_UNLOCK(buf);
+ return n;
+
+}
+
+static int
+advance_last_with_data(struct evbuffer *buf)
+{
+ int n = 0;
+ ASSERT_EVBUFFER_LOCKED(buf);
+
+ if (!*buf->last_with_datap)
+ return 0;
+
+ while ((*buf->last_with_datap)->next && (*buf->last_with_datap)->next->off) {
+ buf->last_with_datap = &(*buf->last_with_datap)->next;
+ ++n;
+ }
+ return n;
+}
+
+int
+evbuffer_commit_space(struct evbuffer *buf,
+ struct evbuffer_iovec *vec, int n_vecs)
+{
+ struct evbuffer_chain *chain, **firstchainp, **chainp;
+ int result = -1;
+ size_t added = 0;
+ int i;
+
+ EVBUFFER_LOCK(buf);
+
+ if (buf->freeze_end)
+ goto done;
+ if (n_vecs == 0) {
+ result = 0;
+ goto done;
+ } else if (n_vecs == 1 &&
+ (buf->last && vec[0].iov_base == (void*)CHAIN_SPACE_PTR(buf->last))) {
+ /* The user only got or used one chain; it might not
+ * be the first one with space in it. */
+ if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last))
+ goto done;
+ buf->last->off += vec[0].iov_len;
+ added = vec[0].iov_len;
+ if (added)
+ advance_last_with_data(buf);
+ goto okay;
+ }
+
+ /* Advance 'firstchain' to the first chain with space in it. */
+ firstchainp = buf->last_with_datap;
+ if (!*firstchainp)
+ goto done;
+ if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
+ firstchainp = &(*firstchainp)->next;
+ }
+
+ chain = *firstchainp;
+ /* pass 1: make sure that the pointers and lengths of vecs[] are in
+ * bounds before we try to commit anything. */
+ for (i=0; i<n_vecs; ++i) {
+ if (!chain)
+ goto done;
+ if (vec[i].iov_base != (void*)CHAIN_SPACE_PTR(chain) ||
+ (size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain))
+ goto done;
+ chain = chain->next;
+ }
+ /* pass 2: actually adjust all the chains. */
+ chainp = firstchainp;
+ for (i=0; i<n_vecs; ++i) {
+ (*chainp)->off += vec[i].iov_len;
+ added += vec[i].iov_len;
+ if (vec[i].iov_len) {
+ buf->last_with_datap = chainp;
+ }
+ chainp = &(*chainp)->next;
+ }
+
+okay:
+ buf->total_len += added;
+ buf->n_add_for_cb += added;
+ result = 0;
+ evbuffer_invoke_callbacks_(buf);
+
+done:
+ EVBUFFER_UNLOCK(buf);
+ return result;
+}
+
+static inline int
+HAS_PINNED_R(struct evbuffer *buf)
+{
+ return (buf->last && CHAIN_PINNED_R(buf->last));
+}
+
+static inline void
+ZERO_CHAIN(struct evbuffer *dst)
+{
+ ASSERT_EVBUFFER_LOCKED(dst);
+ dst->first = NULL;
+ dst->last = NULL;
+ dst->last_with_datap = &(dst)->first;
+ dst->total_len = 0;
+}
+
+/* Prepares the contents of src to be moved to another buffer by removing
+ * read-pinned chains. The first pinned chain is saved in first, and the
+ * last in last. If src has no read-pinned chains, first and last are set
+ * to NULL. */
+static int
+PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first,
+ struct evbuffer_chain **last)
+{
+ struct evbuffer_chain *chain, **pinned;
+
+ ASSERT_EVBUFFER_LOCKED(src);
+
+ if (!HAS_PINNED_R(src)) {
+ *first = *last = NULL;
+ return 0;
+ }
+
+ pinned = src->last_with_datap;
+ if (!CHAIN_PINNED_R(*pinned))
+ pinned = &(*pinned)->next;
+ EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned));
+ chain = *first = *pinned;
+ *last = src->last;
+
+ /* If there's data in the first pinned chain, we need to allocate
+ * a new chain and copy the data over. */
+ if (chain->off) {
+ struct evbuffer_chain *tmp;
+
+ EVUTIL_ASSERT(pinned == src->last_with_datap);
+ tmp = evbuffer_chain_new(chain->off);
+ if (!tmp)
+ return -1;
+ memcpy(tmp->buffer, chain->buffer + chain->misalign,
+ chain->off);
+ tmp->off = chain->off;
+ *src->last_with_datap = tmp;
+ src->last = tmp;
+ chain->misalign += chain->off;
+ chain->off = 0;
+ } else {
+ src->last = *src->last_with_datap;
+ *pinned = NULL;
+ }
+
+ return 0;
+}
+
+static inline void
+RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned,
+ struct evbuffer_chain *last)
+{
+ ASSERT_EVBUFFER_LOCKED(src);
+
+ if (!pinned) {
+ ZERO_CHAIN(src);
+ return;
+ }
+
+ src->first = pinned;
+ src->last = last;
+ src->last_with_datap = &src->first;
+ src->total_len = 0;
+}
+
+static inline void
+COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src)
+{
+ ASSERT_EVBUFFER_LOCKED(dst);
+ ASSERT_EVBUFFER_LOCKED(src);
+ dst->first = src->first;
+ if (src->last_with_datap == &src->first)
+ dst->last_with_datap = &dst->first;
+ else
+ dst->last_with_datap = src->last_with_datap;
+ dst->last = src->last;
+ dst->total_len = src->total_len;
+}
+
+static void
+APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
+{
+ ASSERT_EVBUFFER_LOCKED(dst);
+ ASSERT_EVBUFFER_LOCKED(src);
+ dst->last->next = src->first;
+ if (src->last_with_datap == &src->first)
+ dst->last_with_datap = &dst->last->next;
+ else
+ dst->last_with_datap = src->last_with_datap;
+ dst->last = src->last;
+ dst->total_len += src->total_len;
+}
+
+static inline void
+APPEND_CHAIN_MULTICAST(struct evbuffer *dst, struct evbuffer *src)
+{
+ struct evbuffer_chain *tmp;
+ struct evbuffer_chain *chain = src->first;
+ struct evbuffer_multicast_parent *extra;
+
+ ASSERT_EVBUFFER_LOCKED(dst);
+ ASSERT_EVBUFFER_LOCKED(src);
+
+ for (; chain; chain = chain->next) {
+ if (!chain->off || chain->flags & EVBUFFER_DANGLING) {
+ /* skip empty chains */
+ continue;
+ }
+
+ tmp = evbuffer_chain_new(sizeof(struct evbuffer_multicast_parent));
+ if (!tmp) {
+ event_warn("%s: out of memory", __func__);
+ return;
+ }
+ extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_multicast_parent, tmp);
+ /* reference evbuffer containing source chain so it
+ * doesn't get released while the chain is still
+ * being referenced to */
+ evbuffer_incref_(src);
+ extra->source = src;
+ /* reference source chain which now becomes immutable */
+ evbuffer_chain_incref(chain);
+ extra->parent = chain;
+ chain->flags |= EVBUFFER_IMMUTABLE;
+ tmp->buffer_len = chain->buffer_len;
+ tmp->misalign = chain->misalign;
+ tmp->off = chain->off;
+ tmp->flags |= EVBUFFER_MULTICAST|EVBUFFER_IMMUTABLE;
+ tmp->buffer = chain->buffer;
+ evbuffer_chain_insert(dst, tmp);
+ }
+}
+
+static void
+PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
+{
+ ASSERT_EVBUFFER_LOCKED(dst);
+ ASSERT_EVBUFFER_LOCKED(src);
+ src->last->next = dst->first;
+ dst->first = src->first;
+ dst->total_len += src->total_len;
+ if (*dst->last_with_datap == NULL) {
+ if (src->last_with_datap == &(src)->first)
+ dst->last_with_datap = &dst->first;
+ else
+ dst->last_with_datap = src->last_with_datap;
+ } else if (dst->last_with_datap == &dst->first) {
+ dst->last_with_datap = &src->last->next;
+ }
+}
+
+int
+evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
+{
+ struct evbuffer_chain *pinned, *last;
+ size_t in_total_len, out_total_len;
+ int result = 0;
+
+ EVBUFFER_LOCK2(inbuf, outbuf);
+ in_total_len = inbuf->total_len;
+ out_total_len = outbuf->total_len;
+
+ if (in_total_len == 0 || outbuf == inbuf)
+ goto done;
+
+ if (outbuf->freeze_end || inbuf->freeze_start) {
+ result = -1;
+ goto done;
+ }
+
+ if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
+ result = -1;
+ goto done;
+ }
+
+ if (out_total_len == 0) {
+ /* There might be an empty chain at the start of outbuf; free
+ * it. */
+ evbuffer_free_all_chains(outbuf->first);
+ COPY_CHAIN(outbuf, inbuf);
+ } else {
+ APPEND_CHAIN(outbuf, inbuf);
+ }
+
+ RESTORE_PINNED(inbuf, pinned, last);
+
+ inbuf->n_del_for_cb += in_total_len;
+ outbuf->n_add_for_cb += in_total_len;
+
+ evbuffer_invoke_callbacks_(inbuf);
+ evbuffer_invoke_callbacks_(outbuf);
+
+done:
+ EVBUFFER_UNLOCK2(inbuf, outbuf);
+ return result;
+}
+
+int
+evbuffer_add_buffer_reference(struct evbuffer *outbuf, struct evbuffer *inbuf)
+{
+ size_t in_total_len, out_total_len;
+ struct evbuffer_chain *chain;
+ int result = 0;
+
+ EVBUFFER_LOCK2(inbuf, outbuf);
+ in_total_len = inbuf->total_len;
+ out_total_len = outbuf->total_len;
+ chain = inbuf->first;
+
+ if (in_total_len == 0)
+ goto done;
+
+ if (outbuf->freeze_end || outbuf == inbuf) {
+ result = -1;
+ goto done;
+ }
+
+ for (; chain; chain = chain->next) {
+ if ((chain->flags & (EVBUFFER_FILESEGMENT|EVBUFFER_SENDFILE|EVBUFFER_MULTICAST)) != 0) {
+ /* chain type can not be referenced */
+ result = -1;
+ goto done;
+ }
+ }
+
+ if (out_total_len == 0) {
+ /* There might be an empty chain at the start of outbuf; free
+ * it. */
+ evbuffer_free_all_chains(outbuf->first);
+ }
+ APPEND_CHAIN_MULTICAST(outbuf, inbuf);
+
+ outbuf->n_add_for_cb += in_total_len;
+ evbuffer_invoke_callbacks_(outbuf);
+
+done:
+ EVBUFFER_UNLOCK2(inbuf, outbuf);
+ return result;
+}
+
+int
+evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
+{
+ struct evbuffer_chain *pinned, *last;
+ size_t in_total_len, out_total_len;
+ int result = 0;
+
+ EVBUFFER_LOCK2(inbuf, outbuf);
+
+ in_total_len = inbuf->total_len;
+ out_total_len = outbuf->total_len;
+
+ if (!in_total_len || inbuf == outbuf)
+ goto done;
+
+ if (outbuf->freeze_start || inbuf->freeze_start) {
+ result = -1;
+ goto done;
+ }
+
+ if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
+ result = -1;
+ goto done;
+ }
+
+ if (out_total_len == 0) {
+ /* There might be an empty chain at the start of outbuf; free
+ * it. */
+ evbuffer_free_all_chains(outbuf->first);
+ COPY_CHAIN(outbuf, inbuf);
+ } else {
+ PREPEND_CHAIN(outbuf, inbuf);
+ }
+
+ RESTORE_PINNED(inbuf, pinned, last);
+
+ inbuf->n_del_for_cb += in_total_len;
+ outbuf->n_add_for_cb += in_total_len;
+
+ evbuffer_invoke_callbacks_(inbuf);
+ evbuffer_invoke_callbacks_(outbuf);
+done:
+ EVBUFFER_UNLOCK2(inbuf, outbuf);
+ return result;
+}
+
+int
+evbuffer_drain(struct evbuffer *buf, size_t len)
+{
+ struct evbuffer_chain *chain, *next;
+ size_t remaining, old_len;
+ int result = 0;
+
+ EVBUFFER_LOCK(buf);
+ old_len = buf->total_len;
+
+ if (old_len == 0)
+ goto done;
+
+ if (buf->freeze_start) {
+ result = -1;
+ goto done;
+ }
+
+ if (len >= old_len && !HAS_PINNED_R(buf)) {
+ len = old_len;
+ for (chain = buf->first; chain != NULL; chain = next) {
+ next = chain->next;
+ evbuffer_chain_free(chain);
+ }
+
+ ZERO_CHAIN(buf);
+ } else {
+ if (len >= old_len)
+ len = old_len;
+
+ buf->total_len -= len;
+ remaining = len;
+ for (chain = buf->first;
+ remaining >= chain->off;
+ chain = next) {
+ next = chain->next;
+ remaining -= chain->off;
+
+ if (chain == *buf->last_with_datap) {
+ buf->last_with_datap = &buf->first;
+ }
+ if (&chain->next == buf->last_with_datap)
+ buf->last_with_datap = &buf->first;
+
+ if (CHAIN_PINNED_R(chain)) {
+ EVUTIL_ASSERT(remaining == 0);
+ chain->misalign += chain->off;
+ chain->off = 0;
+ break;
+ } else
+ evbuffer_chain_free(chain);
+ }
+
+ buf->first = chain;
+ EVUTIL_ASSERT(chain && remaining <= chain->off);
+ chain->misalign += remaining;
+ chain->off -= remaining;
+ }
+
+ buf->n_del_for_cb += len;
+ /* Tell someone about changes in this buffer */
+ evbuffer_invoke_callbacks_(buf);
+
+done:
+ EVBUFFER_UNLOCK(buf);
+ return result;
+}
+
+/* Reads data from an event buffer and drains the bytes read */
+int
+evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen)
+{
+ ev_ssize_t n;
+ EVBUFFER_LOCK(buf);
+ n = evbuffer_copyout_from(buf, NULL, data_out, datlen);
+ if (n > 0) {
+ if (evbuffer_drain(buf, n)<0)
+ n = -1;
+ }
+ EVBUFFER_UNLOCK(buf);
+ return (int)n;
+}
+
+ev_ssize_t
+evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen)
+{
+ return evbuffer_copyout_from(buf, NULL, data_out, datlen);
+}
+
+ev_ssize_t
+evbuffer_copyout_from(struct evbuffer *buf, const struct evbuffer_ptr *pos,
+ void *data_out, size_t datlen)
+{
+ /*XXX fails badly on sendfile case. */
+ struct evbuffer_chain *chain;
+ char *data = data_out;
+ size_t nread;
+ ev_ssize_t result = 0;
+ size_t pos_in_chain;
+
+ EVBUFFER_LOCK(buf);
+
+ if (pos) {
+ if (datlen > (size_t)(EV_SSIZE_MAX - pos->pos)) {
+ result = -1;
+ goto done;
+ }
+ chain = pos->internal_.chain;
+ pos_in_chain = pos->internal_.pos_in_chain;
+ if (datlen + pos->pos > buf->total_len)
+ datlen = buf->total_len - pos->pos;
+ } else {
+ chain = buf->first;
+ pos_in_chain = 0;
+ if (datlen > buf->total_len)
+ datlen = buf->total_len;
+ }
+
+
+ if (datlen == 0)
+ goto done;
+
+ if (buf->freeze_start) {
+ result = -1;
+ goto done;
+ }
+
+ nread = datlen;
+
+ while (datlen && datlen >= chain->off - pos_in_chain) {
+ size_t copylen = chain->off - pos_in_chain;
+ memcpy(data,
+ chain->buffer + chain->misalign + pos_in_chain,
+ copylen);
+ data += copylen;
+ datlen -= copylen;
+
+ chain = chain->next;
+ pos_in_chain = 0;
+ EVUTIL_ASSERT(chain || datlen==0);
+ }
+
+ if (datlen) {
+ EVUTIL_ASSERT(chain);
+ EVUTIL_ASSERT(datlen+pos_in_chain <= chain->off);
+
+ memcpy(data, chain->buffer + chain->misalign + pos_in_chain,
+ datlen);
+ }
+
+ result = nread;
+done:
+ EVBUFFER_UNLOCK(buf);
+ return result;
+}
+
+/* reads data from the src buffer to the dst buffer, avoids memcpy as
+ * possible. */
+/* XXXX should return ev_ssize_t */
+int
+evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst,
+ size_t datlen)
+{
+ /*XXX We should have an option to force this to be zero-copy.*/
+
+ /*XXX can fail badly on sendfile case. */
+ struct evbuffer_chain *chain, *previous;
+ size_t nread = 0;
+ int result;
+
+ EVBUFFER_LOCK2(src, dst);
+
+ chain = previous = src->first;
+
+ if (datlen == 0 || dst == src) {
+ result = 0;
+ goto done;
+ }
+
+ if (dst->freeze_end || src->freeze_start) {
+ result = -1;
+ goto done;
+ }
+
+ /* short-cut if there is no more data buffered */
+ if (datlen >= src->total_len) {
+ datlen = src->total_len;
+ evbuffer_add_buffer(dst, src);
+ result = (int)datlen; /*XXXX should return ev_ssize_t*/
+ goto done;
+ }
+
+ /* removes chains if possible */
+ while (chain->off <= datlen) {
+ /* We can't remove the last with data from src unless we
+ * remove all chains, in which case we would have done the if
+ * block above */
+ EVUTIL_ASSERT(chain != *src->last_with_datap);
+ nread += chain->off;
+ datlen -= chain->off;
+ previous = chain;
+ if (src->last_with_datap == &chain->next)
+ src->last_with_datap = &src->first;
+ chain = chain->next;
+ }
+
+ if (nread) {
+ /* we can remove the chain */
+ struct evbuffer_chain **chp;
+ chp = evbuffer_free_trailing_empty_chains(dst);
+
+ if (dst->first == NULL) {
+ dst->first = src->first;
+ } else {
+ *chp = src->first;
+ }
+ dst->last = previous;
+ previous->next = NULL;
+ src->first = chain;
+ advance_last_with_data(dst);
+
+ dst->total_len += nread;
+ dst->n_add_for_cb += nread;
+ }
+
+ /* we know that there is more data in the src buffer than
+ * we want to read, so we manually drain the chain */
+ evbuffer_add(dst, chain->buffer + chain->misalign, datlen);
+ chain->misalign += datlen;
+ chain->off -= datlen;
+ nread += datlen;
+
+ /* You might think we would want to increment dst->n_add_for_cb
+ * here too. But evbuffer_add above already took care of that.
+ */
+ src->total_len -= nread;
+ src->n_del_for_cb += nread;
+
+ if (nread) {
+ evbuffer_invoke_callbacks_(dst);
+ evbuffer_invoke_callbacks_(src);
+ }
+ result = (int)nread;/*XXXX should change return type */
+
+done:
+ EVBUFFER_UNLOCK2(src, dst);
+ return result;
+}
+
+unsigned char *
+evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size)
+{
+ struct evbuffer_chain *chain, *next, *tmp, *last_with_data;
+ unsigned char *buffer, *result = NULL;
+ ev_ssize_t remaining;
+ int removed_last_with_data = 0;
+ int removed_last_with_datap = 0;
+
+ EVBUFFER_LOCK(buf);
+
+ chain = buf->first;
+
+ if (size < 0)
+ size = buf->total_len;
+ /* if size > buf->total_len, we cannot guarantee to the user that she
+ * is going to have a long enough buffer afterwards; so we return
+ * NULL */
+ if (size == 0 || (size_t)size > buf->total_len)
+ goto done;
+
+ /* No need to pull up anything; the first size bytes are
+ * already here. */
+ if (chain->off >= (size_t)size) {
+ result = chain->buffer + chain->misalign;
+ goto done;
+ }
+
+ /* Make sure that none of the chains we need to copy from is pinned. */
+ remaining = size - chain->off;
+ EVUTIL_ASSERT(remaining >= 0);
+ for (tmp=chain->next; tmp; tmp=tmp->next) {
+ if (CHAIN_PINNED(tmp))
+ goto done;
+ if (tmp->off >= (size_t)remaining)
+ break;
+ remaining -= tmp->off;
+ }
+
+ if (CHAIN_PINNED(chain)) {
+ size_t old_off = chain->off;
+ if (CHAIN_SPACE_LEN(chain) < size - chain->off) {
+ /* not enough room at end of chunk. */
+ goto done;
+ }
+ buffer = CHAIN_SPACE_PTR(chain);
+ tmp = chain;
+ tmp->off = size;
+ size -= old_off;
+ chain = chain->next;
+ } else if (chain->buffer_len - chain->misalign >= (size_t)size) {
+ /* already have enough space in the first chain */
+ size_t old_off = chain->off;
+ buffer = chain->buffer + chain->misalign + chain->off;
+ tmp = chain;
+ tmp->off = size;
+ size -= old_off;
+ chain = chain->next;
+ } else {
+ if ((tmp = evbuffer_chain_new(size)) == NULL) {
+ event_warn("%s: out of memory", __func__);
+ goto done;
+ }
+ buffer = tmp->buffer;
+ tmp->off = size;
+ buf->first = tmp;
+ }
+
+ /* TODO(niels): deal with buffers that point to NULL like sendfile */
+
+ /* Copy and free every chunk that will be entirely pulled into tmp */
+ last_with_data = *buf->last_with_datap;
+ for (; chain != NULL && (size_t)size >= chain->off; chain = next) {
+ next = chain->next;
+
+ memcpy(buffer, chain->buffer + chain->misalign, chain->off);
+ size -= chain->off;
+ buffer += chain->off;
+ if (chain == last_with_data)
+ removed_last_with_data = 1;
+ if (&chain->next == buf->last_with_datap)
+ removed_last_with_datap = 1;
+
+ evbuffer_chain_free(chain);
+ }
+
+ if (chain != NULL) {
+ memcpy(buffer, chain->buffer + chain->misalign, size);
+ chain->misalign += size;
+ chain->off -= size;
+ } else {
+ buf->last = tmp;
+ }
+
+ tmp->next = chain;
+
+ if (removed_last_with_data) {
+ buf->last_with_datap = &buf->first;
+ } else if (removed_last_with_datap) {
+ if (buf->first->next && buf->first->next->off)
+ buf->last_with_datap = &buf->first->next;
+ else
+ buf->last_with_datap = &buf->first;
+ }
+
+ result = (tmp->buffer + tmp->misalign);
+
+done:
+ EVBUFFER_UNLOCK(buf);
+ return result;
+}
+
+/*
+ * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'.
+ * The returned buffer needs to be freed by the called.
+ */
+char *
+evbuffer_readline(struct evbuffer *buffer)
+{
+ return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY);
+}
+
+static inline ev_ssize_t
+evbuffer_strchr(struct evbuffer_ptr *it, const char chr)
+{
+ struct evbuffer_chain *chain = it->internal_.chain;
+ size_t i = it->internal_.pos_in_chain;
+ while (chain != NULL) {
+ char *buffer = (char *)chain->buffer + chain->misalign;
+ char *cp = memchr(buffer+i, chr, chain->off-i);
+ if (cp) {
+ it->internal_.chain = chain;
+ it->internal_.pos_in_chain = cp - buffer;
+ it->pos += (cp - buffer - i);
+ return it->pos;
+ }
+ it->pos += chain->off - i;
+ i = 0;
+ chain = chain->next;
+ }
+
+ return (-1);
+}
+
+static inline char *
+find_eol_char(char *s, size_t len)
+{
+#define CHUNK_SZ 128
+ /* Lots of benchmarking found this approach to be faster in practice
+ * than doing two memchrs over the whole buffer, doin a memchr on each
+ * char of the buffer, or trying to emulate memchr by hand. */
+ char *s_end, *cr, *lf;
+ s_end = s+len;
+ while (s < s_end) {
+ size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s);
+ cr = memchr(s, '\r', chunk);
+ lf = memchr(s, '\n', chunk);
+ if (cr) {
+ if (lf && lf < cr)
+ return lf;
+ return cr;
+ } else if (lf) {
+ return lf;
+ }
+ s += CHUNK_SZ;
+ }
+
+ return NULL;
+#undef CHUNK_SZ
+}
+
+static ev_ssize_t
+evbuffer_find_eol_char(struct evbuffer_ptr *it)
+{
+ struct evbuffer_chain *chain = it->internal_.chain;
+ size_t i = it->internal_.pos_in_chain;
+ while (chain != NULL) {
+ char *buffer = (char *)chain->buffer + chain->misalign;
+ char *cp = find_eol_char(buffer+i, chain->off-i);
+ if (cp) {
+ it->internal_.chain = chain;
+ it->internal_.pos_in_chain = cp - buffer;
+ it->pos += (cp - buffer) - i;
+ return it->pos;
+ }
+ it->pos += chain->off - i;
+ i = 0;
+ chain = chain->next;
+ }
+
+ return (-1);
+}
+
+static inline int
+evbuffer_strspn(
+ struct evbuffer_ptr *ptr, const char *chrset)
+{
+ int count = 0;
+ struct evbuffer_chain *chain = ptr->internal_.chain;
+ size_t i = ptr->internal_.pos_in_chain;
+
+ if (!chain)
+ return 0;
+
+ while (1) {
+ char *buffer = (char *)chain->buffer + chain->misalign;
+ for (; i < chain->off; ++i) {
+ const char *p = chrset;
+ while (*p) {
+ if (buffer[i] == *p++)
+ goto next;
+ }
+ ptr->internal_.chain = chain;
+ ptr->internal_.pos_in_chain = i;
+ ptr->pos += count;
+ return count;
+ next:
+ ++count;
+ }
+ i = 0;
+
+ if (! chain->next) {
+ ptr->internal_.chain = chain;
+ ptr->internal_.pos_in_chain = i;
+ ptr->pos += count;
+ return count;
+ }
+
+ chain = chain->next;
+ }
+}
+
+
+static inline int
+evbuffer_getchr(struct evbuffer_ptr *it)
+{
+ struct evbuffer_chain *chain = it->internal_.chain;
+ size_t off = it->internal_.pos_in_chain;
+
+ if (chain == NULL)
+ return -1;
+
+ return (unsigned char)chain->buffer[chain->misalign + off];
+}
+
+struct evbuffer_ptr
+evbuffer_search_eol(struct evbuffer *buffer,
+ struct evbuffer_ptr *start, size_t *eol_len_out,
+ enum evbuffer_eol_style eol_style)
+{
+ struct evbuffer_ptr it, it2;
+ size_t extra_drain = 0;
+ int ok = 0;
+
+ /* Avoid locking in trivial edge cases */
+ if (start && start->internal_.chain == NULL) {
+ PTR_NOT_FOUND(&it);
+ if (eol_len_out)
+ *eol_len_out = extra_drain;
+ return it;
+ }
+
+ EVBUFFER_LOCK(buffer);
+
+ if (start) {
+ memcpy(&it, start, sizeof(it));
+ } else {
+ it.pos = 0;
+ it.internal_.chain = buffer->first;
+ it.internal_.pos_in_chain = 0;
+ }
+
+ /* the eol_style determines our first stop character and how many
+ * characters we are going to drain afterwards. */
+ switch (eol_style) {
+ case EVBUFFER_EOL_ANY:
+ if (evbuffer_find_eol_char(&it) < 0)
+ goto done;
+ memcpy(&it2, &it, sizeof(it));
+ extra_drain = evbuffer_strspn(&it2, "\r\n");
+ break;
+ case EVBUFFER_EOL_CRLF_STRICT: {
+ it = evbuffer_search(buffer, "\r\n", 2, &it);
+ if (it.pos < 0)
+ goto done;
+ extra_drain = 2;
+ break;
+ }
+ case EVBUFFER_EOL_CRLF: {
+ ev_ssize_t start_pos = it.pos;
+ /* Look for a LF ... */
+ if (evbuffer_strchr(&it, '\n') < 0)
+ goto done;
+ extra_drain = 1;
+ /* ... optionally preceeded by a CR. */
+ if (it.pos == start_pos)
+ break; /* If the first character is \n, don't back up */
+ /* This potentially does an extra linear walk over the first
+ * few chains. Probably, that's not too expensive unless you
+ * have a really pathological setup. */
+ memcpy(&it2, &it, sizeof(it));
+ if (evbuffer_ptr_subtract(buffer, &it2, 1)<0)
+ break;
+ if (evbuffer_getchr(&it2) == '\r') {
+ memcpy(&it, &it2, sizeof(it));
+ extra_drain = 2;
+ }
+ break;
+ }
+ case EVBUFFER_EOL_LF:
+ if (evbuffer_strchr(&it, '\n') < 0)
+ goto done;
+ extra_drain = 1;
+ break;
+ case EVBUFFER_EOL_NUL:
+ if (evbuffer_strchr(&it, '\0') < 0)
+ goto done;
+ extra_drain = 1;
+ break;
+ default:
+ goto done;
+ }
+
+ ok = 1;
+done:
+ EVBUFFER_UNLOCK(buffer);
+
+ if (!ok)
+ PTR_NOT_FOUND(&it);
+ if (eol_len_out)
+ *eol_len_out = extra_drain;
+
+ return it;
+}
+
+char *
+evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out,
+ enum evbuffer_eol_style eol_style)
+{
+ struct evbuffer_ptr it;
+ char *line;
+ size_t n_to_copy=0, extra_drain=0;
+ char *result = NULL;
+
+ EVBUFFER_LOCK(buffer);
+
+ if (buffer->freeze_start) {
+ goto done;
+ }
+
+ it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style);
+ if (it.pos < 0)
+ goto done;
+ n_to_copy = it.pos;
+
+ if ((line = mm_malloc(n_to_copy+1)) == NULL) {
+ event_warn("%s: out of memory", __func__);
+ goto done;
+ }
+
+ evbuffer_remove(buffer, line, n_to_copy);
+ line[n_to_copy] = '\0';
+
+ evbuffer_drain(buffer, extra_drain);
+ result = line;
+done:
+ EVBUFFER_UNLOCK(buffer);
+
+ if (n_read_out)
+ *n_read_out = result ? n_to_copy : 0;
+
+ return result;
+}
+
+#define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096
+
+/* Adds data to an event buffer */
+
+int
+evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen)
+{
+ struct evbuffer_chain *chain, *tmp;
+ const unsigned char *data = data_in;
+ size_t remain, to_alloc;
+ int result = -1;
+
+ EVBUFFER_LOCK(buf);
+
+ if (buf->freeze_end) {
+ goto done;
+ }
+ /* Prevent buf->total_len overflow */
+ if (datlen > EV_SIZE_MAX - buf->total_len) {
+ goto done;
+ }
+
+ chain = buf->last;
+
+ /* If there are no chains allocated for this buffer, allocate one
+ * big enough to hold all the data. */
+ if (chain == NULL) {
+ chain = evbuffer_chain_new(datlen);
+ if (!chain)
+ goto done;
+ evbuffer_chain_insert(buf, chain);
+ }
+
+ if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
+ /* Always true for mutable buffers */
+ EVUTIL_ASSERT(chain->misalign >= 0 &&
+ (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX);
+ remain = chain->buffer_len - (size_t)chain->misalign - chain->off;
+ if (remain >= datlen) {
+ /* there's enough space to hold all the data in the
+ * current last chain */
+ memcpy(chain->buffer + chain->misalign + chain->off,
+ data, datlen);
+ chain->off += datlen;
+ buf->total_len += datlen;
+ buf->n_add_for_cb += datlen;
+ goto out;
+ } else if (!CHAIN_PINNED(chain) &&
+ evbuffer_chain_should_realign(chain, datlen)) {
+ /* we can fit the data into the misalignment */
+ evbuffer_chain_align(chain);
+
+ memcpy(chain->buffer + chain->off, data, datlen);
+ chain->off += datlen;
+ buf->total_len += datlen;
+ buf->n_add_for_cb += datlen;
+ goto out;
+ }
+ } else {
+ /* we cannot write any data to the last chain */
+ remain = 0;
+ }
+
+ /* we need to add another chain */
+ to_alloc = chain->buffer_len;
+ if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2)
+ to_alloc <<= 1;
+ if (datlen > to_alloc)
+ to_alloc = datlen;
+ tmp = evbuffer_chain_new(to_alloc);
+ if (tmp == NULL)
+ goto done;
+
+ if (remain) {
+ memcpy(chain->buffer + chain->misalign + chain->off,
+ data, remain);
+ chain->off += remain;
+ buf->total_len += remain;
+ buf->n_add_for_cb += remain;
+ }
+
+ data += remain;
+ datlen -= remain;
+
+ memcpy(tmp->buffer, data, datlen);
+ tmp->off = datlen;
+ evbuffer_chain_insert(buf, tmp);
+ buf->n_add_for_cb += datlen;
+
+out:
+ evbuffer_invoke_callbacks_(buf);
+ result = 0;
+done:
+ EVBUFFER_UNLOCK(buf);
+ return result;
+}
+
+int
+evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen)
+{
+ struct evbuffer_chain *chain, *tmp;
+ int result = -1;
+
+ EVBUFFER_LOCK(buf);
+
+ if (buf->freeze_start) {
+ goto done;
+ }
+ if (datlen > EV_SIZE_MAX - buf->total_len) {
+ goto done;
+ }
+
+ chain = buf->first;
+
+ if (chain == NULL) {
+ chain = evbuffer_chain_new(datlen);
+ if (!chain)
+ goto done;
+ evbuffer_chain_insert(buf, chain);
+ }
+
+ /* we cannot touch immutable buffers */
+ if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
+ /* Always true for mutable buffers */
+ EVUTIL_ASSERT(chain->misalign >= 0 &&
+ (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX);
+
+ /* If this chain is empty, we can treat it as
+ * 'empty at the beginning' rather than 'empty at the end' */
+ if (chain->off == 0)
+ chain->misalign = chain->buffer_len;
+
+ if ((size_t)chain->misalign >= datlen) {
+ /* we have enough space to fit everything */
+ memcpy(chain->buffer + chain->misalign - datlen,
+ data, datlen);
+ chain->off += datlen;
+ chain->misalign -= datlen;
+ buf->total_len += datlen;
+ buf->n_add_for_cb += datlen;
+ goto out;
+ } else if (chain->misalign) {
+ /* we can only fit some of the data. */
+ memcpy(chain->buffer,
+ (char*)data + datlen - chain->misalign,
+ (size_t)chain->misalign);
+ chain->off += (size_t)chain->misalign;
+ buf->total_len += (size_t)chain->misalign;
+ buf->n_add_for_cb += (size_t)chain->misalign;
+ datlen -= (size_t)chain->misalign;
+ chain->misalign = 0;
+ }
+ }
+
+ /* we need to add another chain */
+ if ((tmp = evbuffer_chain_new(datlen)) == NULL)
+ goto done;
+ buf->first = tmp;
+ if (buf->last_with_datap == &buf->first)
+ buf->last_with_datap = &tmp->next;
+
+ tmp->next = chain;
+
+ tmp->off = datlen;
+ EVUTIL_ASSERT(datlen <= tmp->buffer_len);
+ tmp->misalign = tmp->buffer_len - datlen;
+
+ memcpy(tmp->buffer + tmp->misalign, data, datlen);
+ buf->total_len += datlen;
+ buf->n_add_for_cb += (size_t)chain->misalign;
+
+out:
+ evbuffer_invoke_callbacks_(buf);
+ result = 0;
+done:
+ EVBUFFER_UNLOCK(buf);
+ return result;
+}
+
+/** Helper: realigns the memory in chain->buffer so that misalign is 0. */
+static void
+evbuffer_chain_align(struct evbuffer_chain *chain)
+{
+ EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE));
+ EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY));
+ memmove(chain->buffer, chain->buffer + chain->misalign, chain->off);
+ chain->misalign = 0;
+}
+
+#define MAX_TO_COPY_IN_EXPAND 4096
+#define MAX_TO_REALIGN_IN_EXPAND 2048
+
+/** Helper: return true iff we should realign chain to fit datalen bytes of
+ data in it. */
+static int
+evbuffer_chain_should_realign(struct evbuffer_chain *chain,
+ size_t datlen)
+{
+ return chain->buffer_len - chain->off >= datlen &&
+ (chain->off < chain->buffer_len / 2) &&
+ (chain->off <= MAX_TO_REALIGN_IN_EXPAND);
+}
+
+/* Expands the available space in the event buffer to at least datlen, all in
+ * a single chunk. Return that chunk. */
+static struct evbuffer_chain *
+evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen)
+{
+ struct evbuffer_chain *chain, **chainp;
+ struct evbuffer_chain *result = NULL;
+ ASSERT_EVBUFFER_LOCKED(buf);
+
+ chainp = buf->last_with_datap;
+
+ /* XXX If *chainp is no longer writeable, but has enough space in its
+ * misalign, this might be a bad idea: we could still use *chainp, not
+ * (*chainp)->next. */
+ if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0)
+ chainp = &(*chainp)->next;
+
+ /* 'chain' now points to the first chain with writable space (if any)
+ * We will either use it, realign it, replace it, or resize it. */
+ chain = *chainp;
+
+ if (chain == NULL ||
+ (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) {
+ /* We can't use the last_with_data chain at all. Just add a
+ * new one that's big enough. */
+ goto insert_new;
+ }
+
+ /* If we can fit all the data, then we don't have to do anything */
+ if (CHAIN_SPACE_LEN(chain) >= datlen) {
+ result = chain;
+ goto ok;
+ }
+
+ /* If the chain is completely empty, just replace it by adding a new
+ * empty chain. */
+ if (chain->off == 0) {
+ goto insert_new;
+ }
+
+ /* If the misalignment plus the remaining space fulfills our data
+ * needs, we could just force an alignment to happen. Afterwards, we
+ * have enough space. But only do this if we're saving a lot of space
+ * and not moving too much data. Otherwise the space savings are
+ * probably offset by the time lost in copying.
+ */
+ if (evbuffer_chain_should_realign(chain, datlen)) {
+ evbuffer_chain_align(chain);
+ result = chain;
+ goto ok;
+ }
+
+ /* At this point, we can either resize the last chunk with space in
+ * it, use the next chunk after it, or If we add a new chunk, we waste
+ * CHAIN_SPACE_LEN(chain) bytes in the former last chunk. If we
+ * resize, we have to copy chain->off bytes.
+ */
+
+ /* Would expanding this chunk be affordable and worthwhile? */
+ if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 ||
+ chain->off > MAX_TO_COPY_IN_EXPAND ||
+ (datlen < EVBUFFER_CHAIN_MAX &&
+ EVBUFFER_CHAIN_MAX - datlen >= chain->off)) {
+ /* It's not worth resizing this chain. Can the next one be
+ * used? */
+ if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) {
+ /* Yes, we can just use the next chain (which should
+ * be empty. */
+ result = chain->next;
+ goto ok;
+ } else {
+ /* No; append a new chain (which will free all
+ * terminal empty chains.) */
+ goto insert_new;
+ }
+ } else {
+ /* Okay, we're going to try to resize this chain: Not doing so
+ * would waste at least 1/8 of its current allocation, and we
+ * can do so without having to copy more than
+ * MAX_TO_COPY_IN_EXPAND bytes. */
+ /* figure out how much space we need */
+ size_t length = chain->off + datlen;
+ struct evbuffer_chain *tmp = evbuffer_chain_new(length);
+ if (tmp == NULL)
+ goto err;
+
+ /* copy the data over that we had so far */
+ tmp->off = chain->off;
+ memcpy(tmp->buffer, chain->buffer + chain->misalign,
+ chain->off);
+ /* fix up the list */
+ EVUTIL_ASSERT(*chainp == chain);
+ result = *chainp = tmp;
+
+ if (buf->last == chain)
+ buf->last = tmp;
+
+ tmp->next = chain->next;
+ evbuffer_chain_free(chain);
+ goto ok;
+ }
+
+insert_new:
+ result = evbuffer_chain_insert_new(buf, datlen);
+ if (!result)
+ goto err;
+ok:
+ EVUTIL_ASSERT(result);
+ EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen);
+err:
+ return result;
+}
+
+/* Make sure that datlen bytes are available for writing in the last n
+ * chains. Never copies or moves data. */
+int
+evbuffer_expand_fast_(struct evbuffer *buf, size_t datlen, int n)
+{
+ struct evbuffer_chain *chain = buf->last, *tmp, *next;
+ size_t avail;
+ int used;
+
+ ASSERT_EVBUFFER_LOCKED(buf);
+ EVUTIL_ASSERT(n >= 2);
+
+ if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) {
+ /* There is no last chunk, or we can't touch the last chunk.
+ * Just add a new chunk. */
+ chain = evbuffer_chain_new(datlen);
+ if (chain == NULL)
+ return (-1);
+
+ evbuffer_chain_insert(buf, chain);
+ return (0);
+ }
+
+ used = 0; /* number of chains we're using space in. */
+ avail = 0; /* how much space they have. */
+ /* How many bytes can we stick at the end of buffer as it is? Iterate
+ * over the chains at the end of the buffer, tring to see how much
+ * space we have in the first n. */
+ for (chain = *buf->last_with_datap; chain; chain = chain->next) {
+ if (chain->off) {
+ size_t space = (size_t) CHAIN_SPACE_LEN(chain);
+ EVUTIL_ASSERT(chain == *buf->last_with_datap);
+ if (space) {
+ avail += space;
+ ++used;
+ }
+ } else {
+ /* No data in chain; realign it. */
+ chain->misalign = 0;
+ avail += chain->buffer_len;
+ ++used;
+ }
+ if (avail >= datlen) {
+ /* There is already enough space. Just return */
+ return (0);
+ }
+ if (used == n)
+ break;
+ }
+
+ /* There wasn't enough space in the first n chains with space in
+ * them. Either add a new chain with enough space, or replace all
+ * empty chains with one that has enough space, depending on n. */
+ if (used < n) {
+ /* The loop ran off the end of the chains before it hit n
+ * chains; we can add another. */
+ EVUTIL_ASSERT(chain == NULL);
+
+ tmp = evbuffer_chain_new(datlen - avail);
+ if (tmp == NULL)
+ return (-1);
+
+ buf->last->next = tmp;
+ buf->last = tmp;
+ /* (we would only set last_with_data if we added the first
+ * chain. But if the buffer had no chains, we would have
+ * just allocated a new chain earlier) */
+ return (0);
+ } else {
+ /* Nuke _all_ the empty chains. */
+ int rmv_all = 0; /* True iff we removed last_with_data. */
+ chain = *buf->last_with_datap;
+ if (!chain->off) {
+ EVUTIL_ASSERT(chain == buf->first);
+ rmv_all = 1;
+ avail = 0;
+ } else {
+ /* can't overflow, since only mutable chains have
+ * huge misaligns. */
+ avail = (size_t) CHAIN_SPACE_LEN(chain);
+ chain = chain->next;
+ }
+
+
+ for (; chain; chain = next) {
+ next = chain->next;
+ EVUTIL_ASSERT(chain->off == 0);
+ evbuffer_chain_free(chain);
+ }
+ EVUTIL_ASSERT(datlen >= avail);
+ tmp = evbuffer_chain_new(datlen - avail);
+ if (tmp == NULL) {
+ if (rmv_all) {
+ ZERO_CHAIN(buf);
+ } else {
+ buf->last = *buf->last_with_datap;
+ (*buf->last_with_datap)->next = NULL;
+ }
+ return (-1);
+ }
+
+ if (rmv_all) {
+ buf->first = buf->last = tmp;
+ buf->last_with_datap = &buf->first;
+ } else {
+ (*buf->last_with_datap)->next = tmp;
+ buf->last = tmp;
+ }
+ return (0);
+ }
+}
+
+int
+evbuffer_expand(struct evbuffer *buf, size_t datlen)
+{
+ struct evbuffer_chain *chain;
+
+ EVBUFFER_LOCK(buf);
+ chain = evbuffer_expand_singlechain(buf, datlen);
+ EVBUFFER_UNLOCK(buf);
+ return chain ? 0 : -1;
+}
+
+/*
+ * Reads data from a file descriptor into a buffer.
+ */
+
+#if defined(EVENT__HAVE_SYS_UIO_H) || defined(_WIN32)
+#define USE_IOVEC_IMPL
+#endif
+
+#ifdef USE_IOVEC_IMPL
+
+#ifdef EVENT__HAVE_SYS_UIO_H
+/* number of iovec we use for writev, fragmentation is going to determine
+ * how much we end up writing */
+
+#define DEFAULT_WRITE_IOVEC 128
+
+#if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC
+#define NUM_WRITE_IOVEC UIO_MAXIOV
+#elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC
+#define NUM_WRITE_IOVEC IOV_MAX
+#else
+#define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC
+#endif
+
+#define IOV_TYPE struct iovec
+#define IOV_PTR_FIELD iov_base
+#define IOV_LEN_FIELD iov_len
+#define IOV_LEN_TYPE size_t
+#else
+#define NUM_WRITE_IOVEC 16
+#define IOV_TYPE WSABUF
+#define IOV_PTR_FIELD buf
+#define IOV_LEN_FIELD len
+#define IOV_LEN_TYPE unsigned long
+#endif
+#endif
+#define NUM_READ_IOVEC 4
+
+#define EVBUFFER_MAX_READ 4096
+
+/** Helper function to figure out which space to use for reading data into
+ an evbuffer. Internal use only.
+
+ @param buf The buffer to read into
+ @param howmuch How much we want to read.
+ @param vecs An array of two or more iovecs or WSABUFs.
+ @param n_vecs_avail The length of vecs
+ @param chainp A pointer to a variable to hold the first chain we're
+ reading into.
+ @param exact Boolean: if true, we do not provide more than 'howmuch'
+ space in the vectors, even if more space is available.
+ @return The number of buffers we're using.
+ */
+int
+evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch,
+ struct evbuffer_iovec *vecs, int n_vecs_avail,
+ struct evbuffer_chain ***chainp, int exact)
+{
+ struct evbuffer_chain *chain;
+ struct evbuffer_chain **firstchainp;
+ size_t so_far;
+ int i;
+ ASSERT_EVBUFFER_LOCKED(buf);
+
+ if (howmuch < 0)
+ return -1;
+
+ so_far = 0;
+ /* Let firstchain be the first chain with any space on it */
+ firstchainp = buf->last_with_datap;
+ if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
+ firstchainp = &(*firstchainp)->next;
+ }
+
+ chain = *firstchainp;
+ for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) {
+ size_t avail = (size_t) CHAIN_SPACE_LEN(chain);
+ if (avail > (howmuch - so_far) && exact)
+ avail = howmuch - so_far;
+ vecs[i].iov_base = CHAIN_SPACE_PTR(chain);
+ vecs[i].iov_len = avail;
+ so_far += avail;
+ chain = chain->next;
+ }
+
+ *chainp = firstchainp;
+ return i;
+}
+
+static int
+get_n_bytes_readable_on_socket(evutil_socket_t fd)
+{
+#if defined(FIONREAD) && defined(_WIN32)
+ unsigned long lng = EVBUFFER_MAX_READ;
+ if (ioctlsocket(fd, FIONREAD, &lng) < 0)
+ return -1;
+ /* Can overflow, but mostly harmlessly. XXXX */
+ return (int)lng;
+#elif defined(FIONREAD)
+ int n = EVBUFFER_MAX_READ;
+ if (ioctl(fd, FIONREAD, &n) < 0)
+ return -1;
+ return n;
+#else
+ return EVBUFFER_MAX_READ;
+#endif
+}
+
+/* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t
+ * as howmuch? */
+int
+evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch)
+{
+ struct evbuffer_chain **chainp;
+ int n;
+ int result;
+
+#ifdef USE_IOVEC_IMPL
+ int nvecs, i, remaining;
+#else
+ struct evbuffer_chain *chain;
+ unsigned char *p;
+#endif
+
+ EVBUFFER_LOCK(buf);
+
+ if (buf->freeze_end) {
+ result = -1;
+ goto done;
+ }
+
+ n = get_n_bytes_readable_on_socket(fd);
+ if (n <= 0 || n > EVBUFFER_MAX_READ)
+ n = EVBUFFER_MAX_READ;
+ if (howmuch < 0 || howmuch > n)
+ howmuch = n;
+
+#ifdef USE_IOVEC_IMPL
+ /* Since we can use iovecs, we're willing to use the last
+ * NUM_READ_IOVEC chains. */
+ if (evbuffer_expand_fast_(buf, howmuch, NUM_READ_IOVEC) == -1) {
+ result = -1;
+ goto done;
+ } else {
+ IOV_TYPE vecs[NUM_READ_IOVEC];
+#ifdef EVBUFFER_IOVEC_IS_NATIVE_
+ nvecs = evbuffer_read_setup_vecs_(buf, howmuch, vecs,
+ NUM_READ_IOVEC, &chainp, 1);
+#else
+ /* We aren't using the native struct iovec. Therefore,
+ we are on win32. */
+ struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC];
+ nvecs = evbuffer_read_setup_vecs_(buf, howmuch, ev_vecs, 2,
+ &chainp, 1);
+
+ for (i=0; i < nvecs; ++i)
+ WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]);
+#endif
+
+#ifdef _WIN32
+ {
+ DWORD bytesRead;
+ DWORD flags=0;
+ if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) {
+ /* The read failed. It might be a close,
+ * or it might be an error. */
+ if (WSAGetLastError() == WSAECONNABORTED)
+ n = 0;
+ else
+ n = -1;
+ } else
+ n = bytesRead;
+ }
+#else
+ n = readv(fd, vecs, nvecs);
+#endif
+ }
+
+#else /*!USE_IOVEC_IMPL*/
+ /* If we don't have FIONREAD, we might waste some space here */
+ /* XXX we _will_ waste some space here if there is any space left
+ * over on buf->last. */
+ if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) {
+ result = -1;
+ goto done;
+ }
+
+ /* We can append new data at this point */
+ p = chain->buffer + chain->misalign + chain->off;
+
+#ifndef _WIN32
+ n = read(fd, p, howmuch);
+#else
+ n = recv(fd, p, howmuch, 0);
+#endif
+#endif /* USE_IOVEC_IMPL */
+
+ if (n == -1) {
+ result = -1;
+ goto done;
+ }
+ if (n == 0) {
+ result = 0;
+ goto done;
+ }
+
+#ifdef USE_IOVEC_IMPL
+ remaining = n;
+ for (i=0; i < nvecs; ++i) {
+ /* can't overflow, since only mutable chains have
+ * huge misaligns. */
+ size_t space = (size_t) CHAIN_SPACE_LEN(*chainp);
+ /* XXXX This is a kludge that can waste space in perverse
+ * situations. */
+ if (space > EVBUFFER_CHAIN_MAX)
+ space = EVBUFFER_CHAIN_MAX;
+ if ((ev_ssize_t)space < remaining) {
+ (*chainp)->off += space;
+ remaining -= (int)space;
+ } else {
+ (*chainp)->off += remaining;
+ buf->last_with_datap = chainp;
+ break;
+ }
+ chainp = &(*chainp)->next;
+ }
+#else
+ chain->off += n;
+ advance_last_with_data(buf);
+#endif
+ buf->total_len += n;
+ buf->n_add_for_cb += n;
+
+ /* Tell someone about changes in this buffer */
+ evbuffer_invoke_callbacks_(buf);
+ result = n;
+done:
+ EVBUFFER_UNLOCK(buf);
+ return result;
+}
+
+#ifdef USE_IOVEC_IMPL
+static inline int
+evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd,
+ ev_ssize_t howmuch)
+{
+ IOV_TYPE iov[NUM_WRITE_IOVEC];
+ struct evbuffer_chain *chain = buffer->first;
+ int n, i = 0;
+
+ if (howmuch < 0)
+ return -1;
+
+ ASSERT_EVBUFFER_LOCKED(buffer);
+ /* XXX make this top out at some maximal data length? if the
+ * buffer has (say) 1MB in it, split over 128 chains, there's
+ * no way it all gets written in one go. */
+ while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) {
+#ifdef USE_SENDFILE
+ /* we cannot write the file info via writev */
+ if (chain->flags & EVBUFFER_SENDFILE)
+ break;
+#endif
+ iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign);
+ if ((size_t)howmuch >= chain->off) {
+ /* XXXcould be problematic when windows supports mmap*/
+ iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off;
+ howmuch -= chain->off;
+ } else {
+ /* XXXcould be problematic when windows supports mmap*/
+ iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch;
+ break;
+ }
+ chain = chain->next;
+ }
+ if (! i)
+ return 0;
+
+#ifdef _WIN32
+ {
+ DWORD bytesSent;
+ if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL))
+ n = -1;
+ else
+ n = bytesSent;
+ }
+#else
+ n = writev(fd, iov, i);
+#endif
+ return (n);
+}
+#endif
+
+#ifdef USE_SENDFILE
+static inline int
+evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t dest_fd,
+ ev_ssize_t howmuch)
+{
+ struct evbuffer_chain *chain = buffer->first;
+ struct evbuffer_chain_file_segment *info =
+ EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment,
+ chain);
+ const int source_fd = info->segment->fd;
+#if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD)
+ int res;
+ ev_off_t len = chain->off;
+#elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS)
+ ev_ssize_t res;
+ ev_off_t offset = chain->misalign;
+#endif
+
+ ASSERT_EVBUFFER_LOCKED(buffer);
+
+#if defined(SENDFILE_IS_MACOSX)
+ res = sendfile(source_fd, dest_fd, chain->misalign, &len, NULL, 0);
+ if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
+ return (-1);
+
+ return (len);
+#elif defined(SENDFILE_IS_FREEBSD)
+ res = sendfile(source_fd, dest_fd, chain->misalign, chain->off, NULL, &len, 0);
+ if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
+ return (-1);
+
+ return (len);
+#elif defined(SENDFILE_IS_LINUX)
+ /* TODO(niels): implement splice */
+ res = sendfile(dest_fd, source_fd, &offset, chain->off);
+ if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
+ /* if this is EAGAIN or EINTR return 0; otherwise, -1 */
+ return (0);
+ }
+ return (res);
+#elif defined(SENDFILE_IS_SOLARIS)
+ {
+ const off_t offset_orig = offset;
+ res = sendfile(dest_fd, source_fd, &offset, chain->off);
+ if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
+ if (offset - offset_orig)
+ return offset - offset_orig;
+ /* if this is EAGAIN or EINTR and no bytes were
+ * written, return 0 */
+ return (0);
+ }
+ return (res);
+ }
+#endif
+}
+#endif
+
+int
+evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd,
+ ev_ssize_t howmuch)
+{
+ int n = -1;
+
+ EVBUFFER_LOCK(buffer);
+
+ if (buffer->freeze_start) {
+ goto done;
+ }
+
+ if (howmuch < 0 || (size_t)howmuch > buffer->total_len)
+ howmuch = buffer->total_len;
+
+ if (howmuch > 0) {
+#ifdef USE_SENDFILE
+ struct evbuffer_chain *chain = buffer->first;
+ if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE))
+ n = evbuffer_write_sendfile(buffer, fd, howmuch);
+ else {
+#endif
+#ifdef USE_IOVEC_IMPL
+ n = evbuffer_write_iovec(buffer, fd, howmuch);
+#elif defined(_WIN32)
+ /* XXX(nickm) Don't disable this code until we know if
+ * the WSARecv code above works. */
+ void *p = evbuffer_pullup(buffer, howmuch);
+ EVUTIL_ASSERT(p || !howmuch);
+ n = send(fd, p, howmuch, 0);
+#else
+ void *p = evbuffer_pullup(buffer, howmuch);
+ EVUTIL_ASSERT(p || !howmuch);
+ n = write(fd, p, howmuch);
+#endif
+#ifdef USE_SENDFILE
+ }
+#endif
+ }
+
+ if (n > 0)
+ evbuffer_drain(buffer, n);
+
+done:
+ EVBUFFER_UNLOCK(buffer);
+ return (n);
+}
+
+int
+evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd)
+{
+ return evbuffer_write_atmost(buffer, fd, -1);
+}
+
+unsigned char *
+evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len)
+{
+ unsigned char *search;
+ struct evbuffer_ptr ptr;
+
+ EVBUFFER_LOCK(buffer);
+
+ ptr = evbuffer_search(buffer, (const char *)what, len, NULL);
+ if (ptr.pos < 0) {
+ search = NULL;
+ } else {
+ search = evbuffer_pullup(buffer, ptr.pos + len);
+ if (search)
+ search += ptr.pos;
+ }
+ EVBUFFER_UNLOCK(buffer);
+ return search;
+}
+
+/* Subract <b>howfar</b> from the position of <b>pos</b> within
+ * <b>buf</b>. Returns 0 on success, -1 on failure.
+ *
+ * This isn't exposed yet, because of potential inefficiency issues.
+ * Maybe it should be. */
+static int
+evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos,
+ size_t howfar)
+{
+ if (pos->pos < 0)
+ return -1;
+ if (howfar > (size_t)pos->pos)
+ return -1;
+ if (pos->internal_.chain && howfar <= pos->internal_.pos_in_chain) {
+ pos->internal_.pos_in_chain -= howfar;
+ pos->pos -= howfar;
+ return 0;
+ } else {
+ const size_t newpos = pos->pos - howfar;
+ /* Here's the inefficient part: it walks over the
+ * chains until we hit newpos. */
+ return evbuffer_ptr_set(buf, pos, newpos, EVBUFFER_PTR_SET);
+ }
+}
+
+int
+evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos,
+ size_t position, enum evbuffer_ptr_how how)
+{
+ size_t left = position;
+ struct evbuffer_chain *chain = NULL;
+ int result = 0;
+
+ EVBUFFER_LOCK(buf);
+
+ switch (how) {
+ case EVBUFFER_PTR_SET:
+ chain = buf->first;
+ pos->pos = position;
+ position = 0;
+ break;
+ case EVBUFFER_PTR_ADD:
+ /* this avoids iterating over all previous chains if
+ we just want to advance the position */
+ if (pos->pos < 0 || EV_SIZE_MAX - position < (size_t)pos->pos) {
+ EVBUFFER_UNLOCK(buf);
+ return -1;
+ }
+ chain = pos->internal_.chain;
+ pos->pos += position;
+ position = pos->internal_.pos_in_chain;
+ break;
+ }
+
+ EVUTIL_ASSERT(EV_SIZE_MAX - left >= position);
+ while (chain && position + left >= chain->off) {
+ left -= chain->off - position;
+ chain = chain->next;
+ position = 0;
+ }
+ if (chain) {
+ pos->internal_.chain = chain;
+ pos->internal_.pos_in_chain = position + left;
+ } else if (left == 0) {
+ /* The first byte in the (nonexistent) chain after the last chain */
+ pos->internal_.chain = NULL;
+ pos->internal_.pos_in_chain = 0;
+ } else {
+ PTR_NOT_FOUND(pos);
+ result = -1;
+ }
+
+ EVBUFFER_UNLOCK(buf);
+
+ return result;
+}
+
+/**
+ Compare the bytes in buf at position pos to the len bytes in mem. Return
+ less than 0, 0, or greater than 0 as memcmp.
+ */
+static int
+evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos,
+ const char *mem, size_t len)
+{
+ struct evbuffer_chain *chain;
+ size_t position;
+ int r;
+
+ ASSERT_EVBUFFER_LOCKED(buf);
+
+ if (pos->pos < 0 ||
+ EV_SIZE_MAX - len < (size_t)pos->pos ||
+ pos->pos + len > buf->total_len)
+ return -1;
+
+ chain = pos->internal_.chain;
+ position = pos->internal_.pos_in_chain;
+ while (len && chain) {
+ size_t n_comparable;
+ if (len + position > chain->off)
+ n_comparable = chain->off - position;
+ else
+ n_comparable = len;
+ r = memcmp(chain->buffer + chain->misalign + position, mem,
+ n_comparable);
+ if (r)
+ return r;
+ mem += n_comparable;
+ len -= n_comparable;
+ position = 0;
+ chain = chain->next;
+ }
+
+ return 0;
+}
+
+struct evbuffer_ptr
+evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start)
+{
+ return evbuffer_search_range(buffer, what, len, start, NULL);
+}
+
+struct evbuffer_ptr
+evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end)
+{
+ struct evbuffer_ptr pos;
+ struct evbuffer_chain *chain, *last_chain = NULL;
+ const unsigned char *p;
+ char first;
+
+ EVBUFFER_LOCK(buffer);
+
+ if (start) {
+ memcpy(&pos, start, sizeof(pos));
+ chain = pos.internal_.chain;
+ } else {
+ pos.pos = 0;
+ chain = pos.internal_.chain = buffer->first;
+ pos.internal_.pos_in_chain = 0;
+ }
+
+ if (end)
+ last_chain = end->internal_.chain;
+
+ if (!len || len > EV_SSIZE_MAX)
+ goto done;
+
+ first = what[0];
+
+ while (chain) {
+ const unsigned char *start_at =
+ chain->buffer + chain->misalign +
+ pos.internal_.pos_in_chain;
+ p = memchr(start_at, first,
+ chain->off - pos.internal_.pos_in_chain);
+ if (p) {
+ pos.pos += p - start_at;
+ pos.internal_.pos_in_chain += p - start_at;
+ if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) {
+ if (end && pos.pos + (ev_ssize_t)len > end->pos)
+ goto not_found;
+ else
+ goto done;
+ }
+ ++pos.pos;
+ ++pos.internal_.pos_in_chain;
+ if (pos.internal_.pos_in_chain == chain->off) {
+ chain = pos.internal_.chain = chain->next;
+ pos.internal_.pos_in_chain = 0;
+ }
+ } else {
+ if (chain == last_chain)
+ goto not_found;
+ pos.pos += chain->off - pos.internal_.pos_in_chain;
+ chain = pos.internal_.chain = chain->next;
+ pos.internal_.pos_in_chain = 0;
+ }
+ }
+
+not_found:
+ PTR_NOT_FOUND(&pos);
+done:
+ EVBUFFER_UNLOCK(buffer);
+ return pos;
+}
+
+int
+evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len,
+ struct evbuffer_ptr *start_at,
+ struct evbuffer_iovec *vec, int n_vec)
+{
+ struct evbuffer_chain *chain;
+ int idx = 0;
+ ev_ssize_t len_so_far = 0;
+
+ /* Avoid locking in trivial edge cases */
+ if (start_at && start_at->internal_.chain == NULL)
+ return 0;
+
+ EVBUFFER_LOCK(buffer);
+
+ if (start_at) {
+ chain = start_at->internal_.chain;
+ len_so_far = chain->off
+ - start_at->internal_.pos_in_chain;
+ idx = 1;
+ if (n_vec > 0) {
+ vec[0].iov_base = chain->buffer + chain->misalign
+ + start_at->internal_.pos_in_chain;
+ vec[0].iov_len = len_so_far;
+ }
+ chain = chain->next;
+ } else {
+ chain = buffer->first;
+ }
+
+ if (n_vec == 0 && len < 0) {
+ /* If no vectors are provided and they asked for "everything",
+ * pretend they asked for the actual available amount. */
+ len = buffer->total_len;
+ if (start_at) {
+ len -= start_at->pos;
+ }
+ }
+
+ while (chain) {
+ if (len >= 0 && len_so_far >= len)
+ break;
+ if (idx<n_vec) {
+ vec[idx].iov_base = chain->buffer + chain->misalign;
+ vec[idx].iov_len = chain->off;
+ } else if (len<0) {
+ break;
+ }
+ ++idx;
+ len_so_far += chain->off;
+ chain = chain->next;
+ }
+
+ EVBUFFER_UNLOCK(buffer);
+
+ return idx;
+}
+
+
+int
+evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap)
+{
+ char *buffer;
+ size_t space;
+ int sz, result = -1;
+ va_list aq;
+ struct evbuffer_chain *chain;
+
+
+ EVBUFFER_LOCK(buf);
+
+ if (buf->freeze_end) {
+ goto done;
+ }
+
+ /* make sure that at least some space is available */
+ if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL)
+ goto done;
+
+ for (;;) {
+#if 0
+ size_t used = chain->misalign + chain->off;
+ buffer = (char *)chain->buffer + chain->misalign + chain->off;
+ EVUTIL_ASSERT(chain->buffer_len >= used);
+ space = chain->buffer_len - used;
+#endif
+ buffer = (char*) CHAIN_SPACE_PTR(chain);
+ space = (size_t) CHAIN_SPACE_LEN(chain);
+
+#ifndef va_copy
+#define va_copy(dst, src) memcpy(&(dst), &(src), sizeof(va_list))
+#endif
+ va_copy(aq, ap);
+
+ sz = evutil_vsnprintf(buffer, space, fmt, aq);
+
+ va_end(aq);
+
+ if (sz < 0)
+ goto done;
+ if (INT_MAX >= EVBUFFER_CHAIN_MAX &&
+ (size_t)sz >= EVBUFFER_CHAIN_MAX)
+ goto done;
+ if ((size_t)sz < space) {
+ chain->off += sz;
+ buf->total_len += sz;
+ buf->n_add_for_cb += sz;
+
+ advance_last_with_data(buf);
+ evbuffer_invoke_callbacks_(buf);
+ result = sz;
+ goto done;
+ }
+ if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL)
+ goto done;
+ }
+ /* NOTREACHED */
+
+done:
+ EVBUFFER_UNLOCK(buf);
+ return result;
+}
+
+int
+evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...)
+{
+ int res = -1;
+ va_list ap;
+
+ va_start(ap, fmt);
+ res = evbuffer_add_vprintf(buf, fmt, ap);
+ va_end(ap);
+
+ return (res);
+}
+
+int
+evbuffer_add_reference(struct evbuffer *outbuf,
+ const void *data, size_t datlen,
+ evbuffer_ref_cleanup_cb cleanupfn, void *extra)
+{
+ struct evbuffer_chain *chain;
+ struct evbuffer_chain_reference *info;
+ int result = -1;
+
+ chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference));
+ if (!chain)
+ return (-1);
+ chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE;
+ chain->buffer = (unsigned char *)data;
+ chain->buffer_len = datlen;
+ chain->off = datlen;
+
+ info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain);
+ info->cleanupfn = cleanupfn;
+ info->extra = extra;
+
+ EVBUFFER_LOCK(outbuf);
+ if (outbuf->freeze_end) {
+ /* don't call chain_free; we do not want to actually invoke
+ * the cleanup function */
+ mm_free(chain);
+ goto done;
+ }
+ evbuffer_chain_insert(outbuf, chain);
+ outbuf->n_add_for_cb += datlen;
+
+ evbuffer_invoke_callbacks_(outbuf);
+
+ result = 0;
+done:
+ EVBUFFER_UNLOCK(outbuf);
+
+ return result;
+}
+
+/* TODO(niels): we may want to add to automagically convert to mmap, in
+ * case evbuffer_remove() or evbuffer_pullup() are being used.
+ */
+struct evbuffer_file_segment *
+evbuffer_file_segment_new(
+ int fd, ev_off_t offset, ev_off_t length, unsigned flags)
+{
+ struct evbuffer_file_segment *seg =
+ mm_calloc(sizeof(struct evbuffer_file_segment), 1);
+ if (!seg)
+ return NULL;
+ seg->refcnt = 1;
+ seg->fd = fd;
+ seg->flags = flags;
+ seg->file_offset = offset;
+ seg->cleanup_cb = NULL;
+ seg->cleanup_cb_arg = NULL;
+#ifdef _WIN32
+#ifndef lseek
+#define lseek _lseeki64
+#endif
+#ifndef fstat
+#define fstat _fstat
+#endif
+#ifndef stat
+#define stat _stat
+#endif
+#endif
+ if (length == -1) {
+ struct stat st;
+ if (fstat(fd, &st) < 0)
+ goto err;
+ length = st.st_size;
+ }
+ seg->length = length;
+
+ if (offset < 0 || length < 0 ||
+ ((ev_uint64_t)length > EVBUFFER_CHAIN_MAX) ||
+ (ev_uint64_t)offset > (ev_uint64_t)(EVBUFFER_CHAIN_MAX - length))
+ goto err;
+
+#if defined(USE_SENDFILE)
+ if (!(flags & EVBUF_FS_DISABLE_SENDFILE)) {
+ seg->can_sendfile = 1;
+ goto done;
+ }
+#endif
+
+ if (evbuffer_file_segment_materialize(seg)<0)
+ goto err;
+
+#if defined(USE_SENDFILE)
+done:
+#endif
+ if (!(flags & EVBUF_FS_DISABLE_LOCKING)) {
+ EVTHREAD_ALLOC_LOCK(seg->lock, 0);
+ }
+ return seg;
+err:
+ mm_free(seg);
+ return NULL;
+}
+
+#ifdef EVENT__HAVE_MMAP
+static long
+get_page_size(void)
+{
+#ifdef SC_PAGE_SIZE
+ return sysconf(SC_PAGE_SIZE);
+#elif defined(_SC_PAGE_SIZE)
+ return sysconf(_SC_PAGE_SIZE);
+#else
+ return 1;
+#endif
+}
+#endif
+
+/* DOCDOC */
+/* Requires lock */
+static int
+evbuffer_file_segment_materialize(struct evbuffer_file_segment *seg)
+{
+ const unsigned flags = seg->flags;
+ const int fd = seg->fd;
+ const ev_off_t length = seg->length;
+ const ev_off_t offset = seg->file_offset;
+
+ if (seg->contents)
+ return 0; /* already materialized */
+
+#if defined(EVENT__HAVE_MMAP)
+ if (!(flags & EVBUF_FS_DISABLE_MMAP)) {
+ off_t offset_rounded = 0, offset_leftover = 0;
+ void *mapped;
+ if (offset) {
+ /* mmap implementations don't generally like us
+ * to have an offset that isn't a round */
+ long page_size = get_page_size();
+ if (page_size == -1)
+ goto err;
+ offset_leftover = offset % page_size;
+ offset_rounded = offset - offset_leftover;
+ }
+ mapped = mmap(NULL, length + offset_leftover,
+ PROT_READ,
+#ifdef MAP_NOCACHE
+ MAP_NOCACHE | /* ??? */
+#endif
+#ifdef MAP_FILE
+ MAP_FILE |
+#endif
+ MAP_PRIVATE,
+ fd, offset_rounded);
+ if (mapped == MAP_FAILED) {
+ event_warn("%s: mmap(%d, %d, %zu) failed",
+ __func__, fd, 0, (size_t)(offset + length));
+ } else {
+ seg->mapping = mapped;
+ seg->contents = (char*)mapped+offset_leftover;
+ seg->mmap_offset = 0;
+ seg->is_mapping = 1;
+ goto done;
+ }
+ }
+#endif
+#ifdef _WIN32
+ if (!(flags & EVBUF_FS_DISABLE_MMAP)) {
+ intptr_t h = _get_osfhandle(fd);
+ HANDLE m;
+ ev_uint64_t total_size = length+offset;
+ if ((HANDLE)h == INVALID_HANDLE_VALUE)
+ goto err;
+ m = CreateFileMapping((HANDLE)h, NULL, PAGE_READONLY,
+ (total_size >> 32), total_size & 0xfffffffful,
+ NULL);
+ if (m != INVALID_HANDLE_VALUE) { /* Does h leak? */
+ seg->mapping_handle = m;
+ seg->mmap_offset = offset;
+ seg->is_mapping = 1;
+ goto done;
+ }
+ }
+#endif
+ {
+ ev_off_t start_pos = lseek(fd, 0, SEEK_CUR), pos;
+ ev_off_t read_so_far = 0;
+ char *mem;
+ int e;
+ ev_ssize_t n = 0;
+ if (!(mem = mm_malloc(length)))
+ goto err;
+ if (start_pos < 0) {
+ mm_free(mem);
+ goto err;
+ }
+ if (lseek(fd, offset, SEEK_SET) < 0) {
+ mm_free(mem);
+ goto err;
+ }
+ while (read_so_far < length) {
+ n = read(fd, mem+read_so_far, length-read_so_far);
+ if (n <= 0)
+ break;
+ read_so_far += n;
+ }
+
+ e = errno;
+ pos = lseek(fd, start_pos, SEEK_SET);
+ if (n < 0 || (n == 0 && length > read_so_far)) {
+ mm_free(mem);
+ errno = e;
+ goto err;
+ } else if (pos < 0) {
+ mm_free(mem);
+ goto err;
+ }
+
+ seg->contents = mem;
+ }
+
+done:
+ return 0;
+err:
+ return -1;
+}
+
+void evbuffer_file_segment_add_cleanup_cb(struct evbuffer_file_segment *seg,
+ evbuffer_file_segment_cleanup_cb cb, void* arg)
+{
+ EVUTIL_ASSERT(seg->refcnt > 0);
+ seg->cleanup_cb = cb;
+ seg->cleanup_cb_arg = arg;
+}
+
+void
+evbuffer_file_segment_free(struct evbuffer_file_segment *seg)
+{
+ int refcnt;
+ EVLOCK_LOCK(seg->lock, 0);
+ refcnt = --seg->refcnt;
+ EVLOCK_UNLOCK(seg->lock, 0);
+ if (refcnt > 0)
+ return;
+ EVUTIL_ASSERT(refcnt == 0);
+
+ if (seg->is_mapping) {
+#ifdef _WIN32
+ CloseHandle(seg->mapping_handle);
+#elif defined (EVENT__HAVE_MMAP)
+ off_t offset_leftover;
+ offset_leftover = seg->file_offset % get_page_size();
+ if (munmap(seg->mapping, seg->length + offset_leftover) == -1)
+ event_warn("%s: munmap failed", __func__);
+#endif
+ } else if (seg->contents) {
+ mm_free(seg->contents);
+ }
+
+ if ((seg->flags & EVBUF_FS_CLOSE_ON_FREE) && seg->fd >= 0) {
+ close(seg->fd);
+ }
+
+ if (seg->cleanup_cb) {
+ (*seg->cleanup_cb)((struct evbuffer_file_segment const*)seg,
+ seg->flags, seg->cleanup_cb_arg);
+ seg->cleanup_cb = NULL;
+ seg->cleanup_cb_arg = NULL;
+ }
+
+ EVTHREAD_FREE_LOCK(seg->lock, 0);
+ mm_free(seg);
+}
+
+int
+evbuffer_add_file_segment(struct evbuffer *buf,
+ struct evbuffer_file_segment *seg, ev_off_t offset, ev_off_t length)
+{
+ struct evbuffer_chain *chain;
+ struct evbuffer_chain_file_segment *extra;
+ int can_use_sendfile = 0;
+
+ EVBUFFER_LOCK(buf);
+ EVLOCK_LOCK(seg->lock, 0);
+ if (buf->flags & EVBUFFER_FLAG_DRAINS_TO_FD) {
+ can_use_sendfile = 1;
+ } else {
+ if (!seg->contents) {
+ if (evbuffer_file_segment_materialize(seg)<0) {
+ EVLOCK_UNLOCK(seg->lock, 0);
+ EVBUFFER_UNLOCK(buf);
+ return -1;
+ }
+ }
+ }
+ ++seg->refcnt;
+ EVLOCK_UNLOCK(seg->lock, 0);
+
+ if (buf->freeze_end)
+ goto err;
+
+ if (length < 0) {
+ if (offset > seg->length)
+ goto err;
+ length = seg->length - offset;
+ }
+
+ /* Can we actually add this? */
+ if (offset+length > seg->length)
+ goto err;
+
+ chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_file_segment));
+ if (!chain)
+ goto err;
+ extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_file_segment, chain);
+
+ chain->flags |= EVBUFFER_IMMUTABLE|EVBUFFER_FILESEGMENT;
+ if (can_use_sendfile && seg->can_sendfile) {
+ chain->flags |= EVBUFFER_SENDFILE;
+ chain->misalign = seg->file_offset + offset;
+ chain->off = length;
+ chain->buffer_len = chain->misalign + length;
+ } else if (seg->is_mapping) {
+#ifdef _WIN32
+ ev_uint64_t total_offset = seg->mmap_offset+offset;
+ ev_uint64_t offset_rounded=0, offset_remaining=0;
+ LPVOID data;
+ if (total_offset) {
+ SYSTEM_INFO si;
+ memset(&si, 0, sizeof(si)); /* cargo cult */
+ GetSystemInfo(&si);
+ offset_remaining = total_offset % si.dwAllocationGranularity;
+ offset_rounded = total_offset - offset_remaining;
+ }
+ data = MapViewOfFile(
+ seg->mapping_handle,
+ FILE_MAP_READ,
+ offset_rounded >> 32,
+ offset_rounded & 0xfffffffful,
+ length + offset_remaining);
+ if (data == NULL) {
+ mm_free(chain);
+ goto err;
+ }
+ chain->buffer = (unsigned char*) data;
+ chain->buffer_len = length+offset_remaining;
+ chain->misalign = offset_remaining;
+ chain->off = length;
+#else
+ chain->buffer = (unsigned char*)(seg->contents + offset);
+ chain->buffer_len = length;
+ chain->off = length;
+#endif
+ } else {
+ chain->buffer = (unsigned char*)(seg->contents + offset);
+ chain->buffer_len = length;
+ chain->off = length;
+ }
+
+ extra->segment = seg;
+ buf->n_add_for_cb += length;
+ evbuffer_chain_insert(buf, chain);
+
+ evbuffer_invoke_callbacks_(buf);
+
+ EVBUFFER_UNLOCK(buf);
+
+ return 0;
+err:
+ EVBUFFER_UNLOCK(buf);
+ evbuffer_file_segment_free(seg); /* Lowers the refcount */
+ return -1;
+}
+
+int
+evbuffer_add_file(struct evbuffer *buf, int fd, ev_off_t offset, ev_off_t length)
+{
+ struct evbuffer_file_segment *seg;
+ unsigned flags = EVBUF_FS_CLOSE_ON_FREE;
+ int r;
+
+ seg = evbuffer_file_segment_new(fd, offset, length, flags);
+ if (!seg)
+ return -1;
+ r = evbuffer_add_file_segment(buf, seg, 0, length);
+ if (r == 0)
+ evbuffer_file_segment_free(seg);
+ return r;
+}
+
+void
+evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg)
+{
+ EVBUFFER_LOCK(buffer);
+
+ if (!LIST_EMPTY(&buffer->callbacks))
+ evbuffer_remove_all_callbacks(buffer);
+
+ if (cb) {
+ struct evbuffer_cb_entry *ent =
+ evbuffer_add_cb(buffer, NULL, cbarg);
+ ent->cb.cb_obsolete = cb;
+ ent->flags |= EVBUFFER_CB_OBSOLETE;
+ }
+ EVBUFFER_UNLOCK(buffer);
+}
+
+struct evbuffer_cb_entry *
+evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
+{
+ struct evbuffer_cb_entry *e;
+ if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry))))
+ return NULL;
+ EVBUFFER_LOCK(buffer);
+ e->cb.cb_func = cb;
+ e->cbarg = cbarg;
+ e->flags = EVBUFFER_CB_ENABLED;
+ LIST_INSERT_HEAD(&buffer->callbacks, e, next);
+ EVBUFFER_UNLOCK(buffer);
+ return e;
+}
+
+int
+evbuffer_remove_cb_entry(struct evbuffer *buffer,
+ struct evbuffer_cb_entry *ent)
+{
+ EVBUFFER_LOCK(buffer);
+ LIST_REMOVE(ent, next);
+ EVBUFFER_UNLOCK(buffer);
+ mm_free(ent);
+ return 0;
+}
+
+int
+evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
+{
+ struct evbuffer_cb_entry *cbent;
+ int result = -1;
+ EVBUFFER_LOCK(buffer);
+ LIST_FOREACH(cbent, &buffer->callbacks, next) {
+ if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) {
+ result = evbuffer_remove_cb_entry(buffer, cbent);
+ goto done;
+ }
+ }
+done:
+ EVBUFFER_UNLOCK(buffer);
+ return result;
+}
+
+int
+evbuffer_cb_set_flags(struct evbuffer *buffer,
+ struct evbuffer_cb_entry *cb, ev_uint32_t flags)
+{
+ /* the user isn't allowed to mess with these. */
+ flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
+ EVBUFFER_LOCK(buffer);
+ cb->flags |= flags;
+ EVBUFFER_UNLOCK(buffer);
+ return 0;
+}
+
+int
+evbuffer_cb_clear_flags(struct evbuffer *buffer,
+ struct evbuffer_cb_entry *cb, ev_uint32_t flags)
+{
+ /* the user isn't allowed to mess with these. */
+ flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
+ EVBUFFER_LOCK(buffer);
+ cb->flags &= ~flags;
+ EVBUFFER_UNLOCK(buffer);
+ return 0;
+}
+
+int
+evbuffer_freeze(struct evbuffer *buffer, int start)
+{
+ EVBUFFER_LOCK(buffer);
+ if (start)
+ buffer->freeze_start = 1;
+ else
+ buffer->freeze_end = 1;
+ EVBUFFER_UNLOCK(buffer);
+ return 0;
+}
+
+int
+evbuffer_unfreeze(struct evbuffer *buffer, int start)
+{
+ EVBUFFER_LOCK(buffer);
+ if (start)
+ buffer->freeze_start = 0;
+ else
+ buffer->freeze_end = 0;
+ EVBUFFER_UNLOCK(buffer);
+ return 0;
+}
+
+#if 0
+void
+evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
+{
+ if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) {
+ cb->size_before_suspend = evbuffer_get_length(buffer);
+ cb->flags |= EVBUFFER_CB_SUSPENDED;
+ }
+}
+
+void
+evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
+{
+ if ((cb->flags & EVBUFFER_CB_SUSPENDED)) {
+ unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND);
+ size_t sz = cb->size_before_suspend;
+ cb->flags &= ~(EVBUFFER_CB_SUSPENDED|
+ EVBUFFER_CB_CALL_ON_UNSUSPEND);
+ cb->size_before_suspend = 0;
+ if (call && (cb->flags & EVBUFFER_CB_ENABLED)) {
+ cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg);
+ }
+ }
+}
+#endif
+
+int
+evbuffer_get_callbacks_(struct evbuffer *buffer, struct event_callback **cbs,
+ int max_cbs)
+{
+ int r = 0;
+ EVBUFFER_LOCK(buffer);
+ if (buffer->deferred_cbs) {
+ if (max_cbs < 1) {
+ r = -1;
+ goto done;
+ }
+ cbs[0] = &buffer->deferred;
+ r = 1;
+ }
+done:
+ EVBUFFER_UNLOCK(buffer);
+ return r;
+}
diff --git a/libs/libevent/src/buffer_iocp.c b/libs/libevent/src/buffer_iocp.c
new file mode 100644
index 0000000000..2d76a90e77
--- /dev/null
+++ b/libs/libevent/src/buffer_iocp.c
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ @file buffer_iocp.c
+
+ This module implements overlapped read and write functions for evbuffer
+ objects on Windows.
+*/
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include "event2/buffer.h"
+#include "event2/buffer_compat.h"
+#include "event2/util.h"
+#include "event2/thread.h"
+#include "util-internal.h"
+#include "evthread-internal.h"
+#include "evbuffer-internal.h"
+#include "iocp-internal.h"
+#include "mm-internal.h"
+
+#include <winsock2.h>
+#include <windows.h>
+#include <stdio.h>
+
+#define MAX_WSABUFS 16
+
+/** An evbuffer that can handle overlapped IO. */
+struct evbuffer_overlapped {
+ struct evbuffer buffer;
+ /** The socket that we're doing overlapped IO on. */
+ evutil_socket_t fd;
+
+ /** pending I/O type */
+ unsigned read_in_progress : 1;
+ unsigned write_in_progress : 1;
+
+ /** The first pinned chain in the buffer. */
+ struct evbuffer_chain *first_pinned;
+
+ /** How many chains are pinned; how many of the fields in buffers
+ * are we using. */
+ int n_buffers;
+ WSABUF buffers[MAX_WSABUFS];
+};
+
+/** Given an evbuffer, return the correponding evbuffer structure, or NULL if
+ * the evbuffer isn't overlapped. */
+static inline struct evbuffer_overlapped *
+upcast_evbuffer(struct evbuffer *buf)
+{
+ if (!buf || !buf->is_overlapped)
+ return NULL;
+ return EVUTIL_UPCAST(buf, struct evbuffer_overlapped, buffer);
+}
+
+/** Unpin all the chains noted as pinned in 'eo'. */
+static void
+pin_release(struct evbuffer_overlapped *eo, unsigned flag)
+{
+ int i;
+ struct evbuffer_chain *next, *chain = eo->first_pinned;
+
+ for (i = 0; i < eo->n_buffers; ++i) {
+ EVUTIL_ASSERT(chain);
+ next = chain->next;
+ evbuffer_chain_unpin_(chain, flag);
+ chain = next;
+ }
+}
+
+void
+evbuffer_commit_read_(struct evbuffer *evbuf, ev_ssize_t nBytes)
+{
+ struct evbuffer_overlapped *buf = upcast_evbuffer(evbuf);
+ struct evbuffer_chain **chainp;
+ size_t remaining, len;
+ unsigned i;
+
+ EVBUFFER_LOCK(evbuf);
+ EVUTIL_ASSERT(buf->read_in_progress && !buf->write_in_progress);
+ EVUTIL_ASSERT(nBytes >= 0); /* XXXX Can this be false? */
+
+ evbuffer_unfreeze(evbuf, 0);
+
+ chainp = evbuf->last_with_datap;
+ if (!((*chainp)->flags & EVBUFFER_MEM_PINNED_R))
+ chainp = &(*chainp)->next;
+ remaining = nBytes;
+ for (i = 0; remaining > 0 && i < (unsigned)buf->n_buffers; ++i) {
+ EVUTIL_ASSERT(*chainp);
+ len = buf->buffers[i].len;
+ if (remaining < len)
+ len = remaining;
+ (*chainp)->off += len;
+ evbuf->last_with_datap = chainp;
+ remaining -= len;
+ chainp = &(*chainp)->next;
+ }
+
+ pin_release(buf, EVBUFFER_MEM_PINNED_R);
+
+ buf->read_in_progress = 0;
+
+ evbuf->total_len += nBytes;
+ evbuf->n_add_for_cb += nBytes;
+
+ evbuffer_invoke_callbacks_(evbuf);
+
+ evbuffer_decref_and_unlock_(evbuf);
+}
+
+void
+evbuffer_commit_write_(struct evbuffer *evbuf, ev_ssize_t nBytes)
+{
+ struct evbuffer_overlapped *buf = upcast_evbuffer(evbuf);
+
+ EVBUFFER_LOCK(evbuf);
+ EVUTIL_ASSERT(buf->write_in_progress && !buf->read_in_progress);
+ evbuffer_unfreeze(evbuf, 1);
+ evbuffer_drain(evbuf, nBytes);
+ pin_release(buf,EVBUFFER_MEM_PINNED_W);
+ buf->write_in_progress = 0;
+ evbuffer_decref_and_unlock_(evbuf);
+}
+
+struct evbuffer *
+evbuffer_overlapped_new_(evutil_socket_t fd)
+{
+ struct evbuffer_overlapped *evo;
+
+ evo = mm_calloc(1, sizeof(struct evbuffer_overlapped));
+ if (!evo)
+ return NULL;
+
+ LIST_INIT(&evo->buffer.callbacks);
+ evo->buffer.refcnt = 1;
+ evo->buffer.last_with_datap = &evo->buffer.first;
+
+ evo->buffer.is_overlapped = 1;
+ evo->fd = fd;
+
+ return &evo->buffer;
+}
+
+int
+evbuffer_launch_write_(struct evbuffer *buf, ev_ssize_t at_most,
+ struct event_overlapped *ol)
+{
+ struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf);
+ int r = -1;
+ int i;
+ struct evbuffer_chain *chain;
+ DWORD bytesSent;
+
+ if (!buf) {
+ /* No buffer, or it isn't overlapped */
+ return -1;
+ }
+
+ EVBUFFER_LOCK(buf);
+ EVUTIL_ASSERT(!buf_o->read_in_progress);
+ if (buf->freeze_start || buf_o->write_in_progress)
+ goto done;
+ if (!buf->total_len) {
+ /* Nothing to write */
+ r = 0;
+ goto done;
+ } else if (at_most < 0 || (size_t)at_most > buf->total_len) {
+ at_most = buf->total_len;
+ }
+ evbuffer_freeze(buf, 1);
+
+ buf_o->first_pinned = NULL;
+ buf_o->n_buffers = 0;
+ memset(buf_o->buffers, 0, sizeof(buf_o->buffers));
+
+ chain = buf_o->first_pinned = buf->first;
+
+ for (i=0; i < MAX_WSABUFS && chain; ++i, chain=chain->next) {
+ WSABUF *b = &buf_o->buffers[i];
+ b->buf = (char*)( chain->buffer + chain->misalign );
+ evbuffer_chain_pin_(chain, EVBUFFER_MEM_PINNED_W);
+
+ if ((size_t)at_most > chain->off) {
+ /* XXXX Cast is safe for now, since win32 has no
+ mmaped chains. But later, we need to have this
+ add more WSAbufs if chain->off is greater than
+ ULONG_MAX */
+ b->len = (unsigned long)chain->off;
+ at_most -= chain->off;
+ } else {
+ b->len = (unsigned long)at_most;
+ ++i;
+ break;
+ }
+ }
+
+ buf_o->n_buffers = i;
+ evbuffer_incref_(buf);
+ if (WSASend(buf_o->fd, buf_o->buffers, i, &bytesSent, 0,
+ &ol->overlapped, NULL)) {
+ int error = WSAGetLastError();
+ if (error != WSA_IO_PENDING) {
+ /* An actual error. */
+ pin_release(buf_o, EVBUFFER_MEM_PINNED_W);
+ evbuffer_unfreeze(buf, 1);
+ evbuffer_free(buf); /* decref */
+ goto done;
+ }
+ }
+
+ buf_o->write_in_progress = 1;
+ r = 0;
+done:
+ EVBUFFER_UNLOCK(buf);
+ return r;
+}
+
+int
+evbuffer_launch_read_(struct evbuffer *buf, size_t at_most,
+ struct event_overlapped *ol)
+{
+ struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf);
+ int r = -1, i;
+ int nvecs;
+ int npin=0;
+ struct evbuffer_chain *chain=NULL, **chainp;
+ DWORD bytesRead;
+ DWORD flags = 0;
+ struct evbuffer_iovec vecs[MAX_WSABUFS];
+
+ if (!buf_o)
+ return -1;
+ EVBUFFER_LOCK(buf);
+ EVUTIL_ASSERT(!buf_o->write_in_progress);
+ if (buf->freeze_end || buf_o->read_in_progress)
+ goto done;
+
+ buf_o->first_pinned = NULL;
+ buf_o->n_buffers = 0;
+ memset(buf_o->buffers, 0, sizeof(buf_o->buffers));
+
+ if (evbuffer_expand_fast_(buf, at_most, MAX_WSABUFS) == -1)
+ goto done;
+ evbuffer_freeze(buf, 0);
+
+ nvecs = evbuffer_read_setup_vecs_(buf, at_most,
+ vecs, MAX_WSABUFS, &chainp, 1);
+ for (i=0;i<nvecs;++i) {
+ WSABUF_FROM_EVBUFFER_IOV(
+ &buf_o->buffers[i],
+ &vecs[i]);
+ }
+
+ buf_o->n_buffers = nvecs;
+ buf_o->first_pinned = chain = *chainp;
+
+ npin=0;
+ for ( ; chain; chain = chain->next) {
+ evbuffer_chain_pin_(chain, EVBUFFER_MEM_PINNED_R);
+ ++npin;
+ }
+ EVUTIL_ASSERT(npin == nvecs);
+
+ evbuffer_incref_(buf);
+ if (WSARecv(buf_o->fd, buf_o->buffers, nvecs, &bytesRead, &flags,
+ &ol->overlapped, NULL)) {
+ int error = WSAGetLastError();
+ if (error != WSA_IO_PENDING) {
+ /* An actual error. */
+ pin_release(buf_o, EVBUFFER_MEM_PINNED_R);
+ evbuffer_unfreeze(buf, 0);
+ evbuffer_free(buf); /* decref */
+ goto done;
+ }
+ }
+
+ buf_o->read_in_progress = 1;
+ r = 0;
+done:
+ EVBUFFER_UNLOCK(buf);
+ return r;
+}
+
+evutil_socket_t
+evbuffer_overlapped_get_fd_(struct evbuffer *buf)
+{
+ struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf);
+ return buf_o ? buf_o->fd : -1;
+}
+
+void
+evbuffer_overlapped_set_fd_(struct evbuffer *buf, evutil_socket_t fd)
+{
+ struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf);
+ EVBUFFER_LOCK(buf);
+ /* XXX is this right?, should it cancel current I/O operations? */
+ if (buf_o)
+ buf_o->fd = fd;
+ EVBUFFER_UNLOCK(buf);
+}
diff --git a/libs/libevent/src/bufferevent-internal.h b/libs/libevent/src/bufferevent-internal.h
new file mode 100644
index 0000000000..d9d9e66640
--- /dev/null
+++ b/libs/libevent/src/bufferevent-internal.h
@@ -0,0 +1,480 @@
+/*
+ * Copyright (c) 2008-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef BUFFEREVENT_INTERNAL_H_INCLUDED_
+#define BUFFEREVENT_INTERNAL_H_INCLUDED_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "event2/event-config.h"
+#include "event2/event_struct.h"
+#include "evconfig-private.h"
+#include "event2/util.h"
+#include "defer-internal.h"
+#include "evthread-internal.h"
+#include "event2/thread.h"
+#include "ratelim-internal.h"
+#include "event2/bufferevent_struct.h"
+
+#include "ipv6-internal.h"
+#ifdef _WIN32
+#include <ws2tcpip.h>
+#endif
+#ifdef EVENT__HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#endif
+#ifdef EVENT__HAVE_NETINET_IN6_H
+#include <netinet/in6.h>
+#endif
+
+/* These flags are reasons that we might be declining to actually enable
+ reading or writing on a bufferevent.
+ */
+
+/* On a all bufferevents, for reading: used when we have read up to the
+ watermark value.
+
+ On a filtering bufferevent, for writing: used when the underlying
+ bufferevent's write buffer has been filled up to its watermark
+ value.
+*/
+#define BEV_SUSPEND_WM 0x01
+/* On a base bufferevent: when we have emptied a bandwidth buckets */
+#define BEV_SUSPEND_BW 0x02
+/* On a base bufferevent: when we have emptied the group's bandwidth bucket. */
+#define BEV_SUSPEND_BW_GROUP 0x04
+/* On a socket bufferevent: can't do any operations while we're waiting for
+ * name lookup to finish. */
+#define BEV_SUSPEND_LOOKUP 0x08
+/* On a base bufferevent, for reading: used when a filter has choked this
+ * (underlying) bufferevent because it has stopped reading from it. */
+#define BEV_SUSPEND_FILT_READ 0x10
+
+typedef ev_uint16_t bufferevent_suspend_flags;
+
+struct bufferevent_rate_limit_group {
+ /** List of all members in the group */
+ LIST_HEAD(rlim_group_member_list, bufferevent_private) members;
+ /** Current limits for the group. */
+ struct ev_token_bucket rate_limit;
+ struct ev_token_bucket_cfg rate_limit_cfg;
+
+ /** True iff we don't want to read from any member of the group.until
+ * the token bucket refills. */
+ unsigned read_suspended : 1;
+ /** True iff we don't want to write from any member of the group.until
+ * the token bucket refills. */
+ unsigned write_suspended : 1;
+ /** True iff we were unable to suspend one of the bufferevents in the
+ * group for reading the last time we tried, and we should try
+ * again. */
+ unsigned pending_unsuspend_read : 1;
+ /** True iff we were unable to suspend one of the bufferevents in the
+ * group for writing the last time we tried, and we should try
+ * again. */
+ unsigned pending_unsuspend_write : 1;
+
+ /*@{*/
+ /** Total number of bytes read or written in this group since last
+ * reset. */
+ ev_uint64_t total_read;
+ ev_uint64_t total_written;
+ /*@}*/
+
+ /** The number of bufferevents in the group. */
+ int n_members;
+
+ /** The smallest number of bytes that any member of the group should
+ * be limited to read or write at a time. */
+ ev_ssize_t min_share;
+ ev_ssize_t configured_min_share;
+
+ /** Timeout event that goes off once a tick, when the bucket is ready
+ * to refill. */
+ struct event master_refill_event;
+
+ /** Seed for weak random number generator. Protected by 'lock' */
+ struct evutil_weakrand_state weakrand_seed;
+
+ /** Lock to protect the members of this group. This lock should nest
+ * within every bufferevent lock: if you are holding this lock, do
+ * not assume you can lock another bufferevent. */
+ void *lock;
+};
+
+/** Fields for rate-limiting a single bufferevent. */
+struct bufferevent_rate_limit {
+ /* Linked-list elements for storing this bufferevent_private in a
+ * group.
+ *
+ * Note that this field is supposed to be protected by the group
+ * lock */
+ LIST_ENTRY(bufferevent_private) next_in_group;
+ /** The rate-limiting group for this bufferevent, or NULL if it is
+ * only rate-limited on its own. */
+ struct bufferevent_rate_limit_group *group;
+
+ /* This bufferevent's current limits. */
+ struct ev_token_bucket limit;
+ /* Pointer to the rate-limit configuration for this bufferevent.
+ * Can be shared. XXX reference-count this? */
+ struct ev_token_bucket_cfg *cfg;
+
+ /* Timeout event used when one this bufferevent's buckets are
+ * empty. */
+ struct event refill_bucket_event;
+};
+
+/** Parts of the bufferevent structure that are shared among all bufferevent
+ * types, but not exposed in bufferevent_struct.h. */
+struct bufferevent_private {
+ /** The underlying bufferevent structure. */
+ struct bufferevent bev;
+
+ /** Evbuffer callback to enforce watermarks on input. */
+ struct evbuffer_cb_entry *read_watermarks_cb;
+
+ /** If set, we should free the lock when we free the bufferevent. */
+ unsigned own_lock : 1;
+
+ /** Flag: set if we have deferred callbacks and a read callback is
+ * pending. */
+ unsigned readcb_pending : 1;
+ /** Flag: set if we have deferred callbacks and a write callback is
+ * pending. */
+ unsigned writecb_pending : 1;
+ /** Flag: set if we are currently busy connecting. */
+ unsigned connecting : 1;
+ /** Flag: set if a connect failed prematurely; this is a hack for
+ * getting around the bufferevent abstraction. */
+ unsigned connection_refused : 1;
+ /** Set to the events pending if we have deferred callbacks and
+ * an events callback is pending. */
+ short eventcb_pending;
+
+ /** If set, read is suspended until one or more conditions are over.
+ * The actual value here is a bitfield of those conditions; see the
+ * BEV_SUSPEND_* flags above. */
+ bufferevent_suspend_flags read_suspended;
+
+ /** If set, writing is suspended until one or more conditions are over.
+ * The actual value here is a bitfield of those conditions; see the
+ * BEV_SUSPEND_* flags above. */
+ bufferevent_suspend_flags write_suspended;
+
+ /** Set to the current socket errno if we have deferred callbacks and
+ * an events callback is pending. */
+ int errno_pending;
+
+ /** The DNS error code for bufferevent_socket_connect_hostname */
+ int dns_error;
+
+ /** Used to implement deferred callbacks */
+ struct event_callback deferred;
+
+ /** The options this bufferevent was constructed with */
+ enum bufferevent_options options;
+
+ /** Current reference count for this bufferevent. */
+ int refcnt;
+
+ /** Lock for this bufferevent. Shared by the inbuf and the outbuf.
+ * If NULL, locking is disabled. */
+ void *lock;
+
+ /** No matter how big our bucket gets, don't try to read more than this
+ * much in a single read operation. */
+ ev_ssize_t max_single_read;
+
+ /** No matter how big our bucket gets, don't try to write more than this
+ * much in a single write operation. */
+ ev_ssize_t max_single_write;
+
+ /** Rate-limiting information for this bufferevent */
+ struct bufferevent_rate_limit *rate_limiting;
+
+ /* Saved conn_addr, to extract IP address from it.
+ *
+ * Because some servers may reset/close connection without waiting clients,
+ * in that case we can't extract IP address even in close_cb.
+ * So we need to save it, just after we connected to remote server, or
+ * after resolving (to avoid extra dns requests during retrying, since UDP
+ * is slow) */
+ union {
+ struct sockaddr_in6 in6;
+ struct sockaddr_in in;
+ } conn_address;
+};
+
+/** Possible operations for a control callback. */
+enum bufferevent_ctrl_op {
+ BEV_CTRL_SET_FD,
+ BEV_CTRL_GET_FD,
+ BEV_CTRL_GET_UNDERLYING,
+ BEV_CTRL_CANCEL_ALL
+};
+
+/** Possible data types for a control callback */
+union bufferevent_ctrl_data {
+ void *ptr;
+ evutil_socket_t fd;
+};
+
+/**
+ Implementation table for a bufferevent: holds function pointers and other
+ information to make the various bufferevent types work.
+*/
+struct bufferevent_ops {
+ /** The name of the bufferevent's type. */
+ const char *type;
+ /** At what offset into the implementation type will we find a
+ bufferevent structure?
+
+ Example: if the type is implemented as
+ struct bufferevent_x {
+ int extra_data;
+ struct bufferevent bev;
+ }
+ then mem_offset should be offsetof(struct bufferevent_x, bev)
+ */
+ off_t mem_offset;
+
+ /** Enables one or more of EV_READ|EV_WRITE on a bufferevent. Does
+ not need to adjust the 'enabled' field. Returns 0 on success, -1
+ on failure.
+ */
+ int (*enable)(struct bufferevent *, short);
+
+ /** Disables one or more of EV_READ|EV_WRITE on a bufferevent. Does
+ not need to adjust the 'enabled' field. Returns 0 on success, -1
+ on failure.
+ */
+ int (*disable)(struct bufferevent *, short);
+
+ /** Detatches the bufferevent from related data structures. Called as
+ * soon as its reference count reaches 0. */
+ void (*unlink)(struct bufferevent *);
+
+ /** Free any storage and deallocate any extra data or structures used
+ in this implementation. Called when the bufferevent is
+ finalized.
+ */
+ void (*destruct)(struct bufferevent *);
+
+ /** Called when the timeouts on the bufferevent have changed.*/
+ int (*adj_timeouts)(struct bufferevent *);
+
+ /** Called to flush data. */
+ int (*flush)(struct bufferevent *, short, enum bufferevent_flush_mode);
+
+ /** Called to access miscellaneous fields. */
+ int (*ctrl)(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
+
+};
+
+extern const struct bufferevent_ops bufferevent_ops_socket;
+extern const struct bufferevent_ops bufferevent_ops_filter;
+extern const struct bufferevent_ops bufferevent_ops_pair;
+
+#define BEV_IS_SOCKET(bevp) ((bevp)->be_ops == &bufferevent_ops_socket)
+#define BEV_IS_FILTER(bevp) ((bevp)->be_ops == &bufferevent_ops_filter)
+#define BEV_IS_PAIR(bevp) ((bevp)->be_ops == &bufferevent_ops_pair)
+
+#ifdef _WIN32
+extern const struct bufferevent_ops bufferevent_ops_async;
+#define BEV_IS_ASYNC(bevp) ((bevp)->be_ops == &bufferevent_ops_async)
+#else
+#define BEV_IS_ASYNC(bevp) 0
+#endif
+
+/** Initialize the shared parts of a bufferevent. */
+int bufferevent_init_common_(struct bufferevent_private *, struct event_base *, const struct bufferevent_ops *, enum bufferevent_options options);
+
+/** For internal use: temporarily stop all reads on bufev, until the conditions
+ * in 'what' are over. */
+void bufferevent_suspend_read_(struct bufferevent *bufev, bufferevent_suspend_flags what);
+/** For internal use: clear the conditions 'what' on bufev, and re-enable
+ * reading if there are no conditions left. */
+void bufferevent_unsuspend_read_(struct bufferevent *bufev, bufferevent_suspend_flags what);
+
+/** For internal use: temporarily stop all writes on bufev, until the conditions
+ * in 'what' are over. */
+void bufferevent_suspend_write_(struct bufferevent *bufev, bufferevent_suspend_flags what);
+/** For internal use: clear the conditions 'what' on bufev, and re-enable
+ * writing if there are no conditions left. */
+void bufferevent_unsuspend_write_(struct bufferevent *bufev, bufferevent_suspend_flags what);
+
+#define bufferevent_wm_suspend_read(b) \
+ bufferevent_suspend_read_((b), BEV_SUSPEND_WM)
+#define bufferevent_wm_unsuspend_read(b) \
+ bufferevent_unsuspend_read_((b), BEV_SUSPEND_WM)
+
+/*
+ Disable a bufferevent. Equivalent to bufferevent_disable(), but
+ first resets 'connecting' flag to force EV_WRITE down for sure.
+
+ XXXX this method will go away in the future; try not to add new users.
+ See comment in evhttp_connection_reset_() for discussion.
+
+ @param bufev the bufferevent to be disabled
+ @param event any combination of EV_READ | EV_WRITE.
+ @return 0 if successful, or -1 if an error occurred
+ @see bufferevent_disable()
+ */
+int bufferevent_disable_hard_(struct bufferevent *bufev, short event);
+
+/** Internal: Set up locking on a bufferevent. If lock is set, use it.
+ * Otherwise, use a new lock. */
+int bufferevent_enable_locking_(struct bufferevent *bufev, void *lock);
+/** Internal: backwards compat macro for the now public function
+ * Increment the reference count on bufev. */
+#define bufferevent_incref_(bufev) bufferevent_incref(bufev)
+/** Internal: Lock bufev and increase its reference count.
+ * unlocking it otherwise. */
+void bufferevent_incref_and_lock_(struct bufferevent *bufev);
+/** Internal: backwards compat macro for the now public function
+ * Decrement the reference count on bufev. Returns 1 if it freed
+ * the bufferevent.*/
+#define bufferevent_decref_(bufev) bufferevent_decref(bufev)
+
+/** Internal: Drop the reference count on bufev, freeing as necessary, and
+ * unlocking it otherwise. Returns 1 if it freed the bufferevent. */
+int bufferevent_decref_and_unlock_(struct bufferevent *bufev);
+
+/** Internal: If callbacks are deferred and we have a read callback, schedule
+ * a readcb. Otherwise just run the readcb. Ignores watermarks. */
+void bufferevent_run_readcb_(struct bufferevent *bufev, int options);
+/** Internal: If callbacks are deferred and we have a write callback, schedule
+ * a writecb. Otherwise just run the writecb. Ignores watermarks. */
+void bufferevent_run_writecb_(struct bufferevent *bufev, int options);
+/** Internal: If callbacks are deferred and we have an eventcb, schedule
+ * it to run with events "what". Otherwise just run the eventcb.
+ * See bufferevent_trigger_event for meaning of "options". */
+void bufferevent_run_eventcb_(struct bufferevent *bufev, short what, int options);
+
+/** Internal: Run or schedule (if deferred or options contain
+ * BEV_TRIG_DEFER_CALLBACKS) I/O callbacks specified in iotype.
+ * Must already hold the bufev lock. Honors watermarks unless
+ * BEV_TRIG_IGNORE_WATERMARKS is in options. */
+static inline void bufferevent_trigger_nolock_(struct bufferevent *bufev, short iotype, int options);
+
+/* Making this inline since all of the common-case calls to this function in
+ * libevent use constant arguments. */
+static inline void
+bufferevent_trigger_nolock_(struct bufferevent *bufev, short iotype, int options)
+{
+ if ((iotype & EV_READ) && ((options & BEV_TRIG_IGNORE_WATERMARKS) ||
+ evbuffer_get_length(bufev->input) >= bufev->wm_read.low))
+ bufferevent_run_readcb_(bufev, options);
+ if ((iotype & EV_WRITE) && ((options & BEV_TRIG_IGNORE_WATERMARKS) ||
+ evbuffer_get_length(bufev->output) <= bufev->wm_write.low))
+ bufferevent_run_writecb_(bufev, options);
+}
+
+/** Internal: Add the event 'ev' with timeout tv, unless tv is set to 0, in
+ * which case add ev with no timeout. */
+int bufferevent_add_event_(struct event *ev, const struct timeval *tv);
+
+/* =========
+ * These next functions implement timeouts for bufferevents that aren't doing
+ * anything else with ev_read and ev_write, to handle timeouts.
+ * ========= */
+/** Internal use: Set up the ev_read and ev_write callbacks so that
+ * the other "generic_timeout" functions will work on it. Call this from
+ * the constructor function. */
+void bufferevent_init_generic_timeout_cbs_(struct bufferevent *bev);
+/** Internal use: Add or delete the generic timeout events as appropriate.
+ * (If an event is enabled and a timeout is set, we add the event. Otherwise
+ * we delete it.) Call this from anything that changes the timeout values,
+ * that enabled EV_READ or EV_WRITE, or that disables EV_READ or EV_WRITE. */
+int bufferevent_generic_adj_timeouts_(struct bufferevent *bev);
+int bufferevent_generic_adj_existing_timeouts_(struct bufferevent *bev);
+
+enum bufferevent_options bufferevent_get_options_(struct bufferevent *bev);
+
+const struct sockaddr*
+bufferevent_socket_get_conn_address_(struct bufferevent *bev);
+
+/** Internal use: We have just successfully read data into an inbuf, so
+ * reset the read timeout (if any). */
+#define BEV_RESET_GENERIC_READ_TIMEOUT(bev) \
+ do { \
+ if (evutil_timerisset(&(bev)->timeout_read)) \
+ event_add(&(bev)->ev_read, &(bev)->timeout_read); \
+ } while (0)
+/** Internal use: We have just successfully written data from an inbuf, so
+ * reset the read timeout (if any). */
+#define BEV_RESET_GENERIC_WRITE_TIMEOUT(bev) \
+ do { \
+ if (evutil_timerisset(&(bev)->timeout_write)) \
+ event_add(&(bev)->ev_write, &(bev)->timeout_write); \
+ } while (0)
+#define BEV_DEL_GENERIC_READ_TIMEOUT(bev) \
+ event_del(&(bev)->ev_read)
+#define BEV_DEL_GENERIC_WRITE_TIMEOUT(bev) \
+ event_del(&(bev)->ev_write)
+
+
+/** Internal: Given a bufferevent, return its corresponding
+ * bufferevent_private. */
+#define BEV_UPCAST(b) EVUTIL_UPCAST((b), struct bufferevent_private, bev)
+
+#ifdef EVENT__DISABLE_THREAD_SUPPORT
+#define BEV_LOCK(b) EVUTIL_NIL_STMT_
+#define BEV_UNLOCK(b) EVUTIL_NIL_STMT_
+#else
+/** Internal: Grab the lock (if any) on a bufferevent */
+#define BEV_LOCK(b) do { \
+ struct bufferevent_private *locking = BEV_UPCAST(b); \
+ EVLOCK_LOCK(locking->lock, 0); \
+ } while (0)
+
+/** Internal: Release the lock (if any) on a bufferevent */
+#define BEV_UNLOCK(b) do { \
+ struct bufferevent_private *locking = BEV_UPCAST(b); \
+ EVLOCK_UNLOCK(locking->lock, 0); \
+ } while (0)
+#endif
+
+
+/* ==== For rate-limiting. */
+
+int bufferevent_decrement_write_buckets_(struct bufferevent_private *bev,
+ ev_ssize_t bytes);
+int bufferevent_decrement_read_buckets_(struct bufferevent_private *bev,
+ ev_ssize_t bytes);
+ev_ssize_t bufferevent_get_read_max_(struct bufferevent_private *bev);
+ev_ssize_t bufferevent_get_write_max_(struct bufferevent_private *bev);
+
+int bufferevent_ratelim_init_(struct bufferevent_private *bev);
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* BUFFEREVENT_INTERNAL_H_INCLUDED_ */
diff --git a/libs/libevent/src/bufferevent.c b/libs/libevent/src/bufferevent.c
new file mode 100644
index 0000000000..59ae24f143
--- /dev/null
+++ b/libs/libevent/src/bufferevent.c
@@ -0,0 +1,1016 @@
+/*
+ * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos, Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <sys/types.h>
+
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef EVENT__HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+
+#ifdef _WIN32
+#include <winsock2.h>
+#endif
+#include <errno.h>
+
+#include "event2/util.h"
+#include "event2/buffer.h"
+#include "event2/buffer_compat.h"
+#include "event2/bufferevent.h"
+#include "event2/bufferevent_struct.h"
+#include "event2/bufferevent_compat.h"
+#include "event2/event.h"
+#include "event-internal.h"
+#include "log-internal.h"
+#include "mm-internal.h"
+#include "bufferevent-internal.h"
+#include "evbuffer-internal.h"
+#include "util-internal.h"
+
+static void bufferevent_cancel_all_(struct bufferevent *bev);
+static void bufferevent_finalize_cb_(struct event_callback *evcb, void *arg_);
+
+void
+bufferevent_suspend_read_(struct bufferevent *bufev, bufferevent_suspend_flags what)
+{
+ struct bufferevent_private *bufev_private =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ BEV_LOCK(bufev);
+ if (!bufev_private->read_suspended)
+ bufev->be_ops->disable(bufev, EV_READ);
+ bufev_private->read_suspended |= what;
+ BEV_UNLOCK(bufev);
+}
+
+void
+bufferevent_unsuspend_read_(struct bufferevent *bufev, bufferevent_suspend_flags what)
+{
+ struct bufferevent_private *bufev_private =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ BEV_LOCK(bufev);
+ bufev_private->read_suspended &= ~what;
+ if (!bufev_private->read_suspended && (bufev->enabled & EV_READ))
+ bufev->be_ops->enable(bufev, EV_READ);
+ BEV_UNLOCK(bufev);
+}
+
+void
+bufferevent_suspend_write_(struct bufferevent *bufev, bufferevent_suspend_flags what)
+{
+ struct bufferevent_private *bufev_private =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ BEV_LOCK(bufev);
+ if (!bufev_private->write_suspended)
+ bufev->be_ops->disable(bufev, EV_WRITE);
+ bufev_private->write_suspended |= what;
+ BEV_UNLOCK(bufev);
+}
+
+void
+bufferevent_unsuspend_write_(struct bufferevent *bufev, bufferevent_suspend_flags what)
+{
+ struct bufferevent_private *bufev_private =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ BEV_LOCK(bufev);
+ bufev_private->write_suspended &= ~what;
+ if (!bufev_private->write_suspended && (bufev->enabled & EV_WRITE))
+ bufev->be_ops->enable(bufev, EV_WRITE);
+ BEV_UNLOCK(bufev);
+}
+
+
+/* Callback to implement watermarks on the input buffer. Only enabled
+ * if the watermark is set. */
+static void
+bufferevent_inbuf_wm_cb(struct evbuffer *buf,
+ const struct evbuffer_cb_info *cbinfo,
+ void *arg)
+{
+ struct bufferevent *bufev = arg;
+ size_t size;
+
+ size = evbuffer_get_length(buf);
+
+ if (size >= bufev->wm_read.high)
+ bufferevent_wm_suspend_read(bufev);
+ else
+ bufferevent_wm_unsuspend_read(bufev);
+}
+
+static void
+bufferevent_run_deferred_callbacks_locked(struct event_callback *cb, void *arg)
+{
+ struct bufferevent_private *bufev_private = arg;
+ struct bufferevent *bufev = &bufev_private->bev;
+
+ BEV_LOCK(bufev);
+ if ((bufev_private->eventcb_pending & BEV_EVENT_CONNECTED) &&
+ bufev->errorcb) {
+ /* The "connected" happened before any reads or writes, so
+ send it first. */
+ bufev_private->eventcb_pending &= ~BEV_EVENT_CONNECTED;
+ bufev->errorcb(bufev, BEV_EVENT_CONNECTED, bufev->cbarg);
+ }
+ if (bufev_private->readcb_pending && bufev->readcb) {
+ bufev_private->readcb_pending = 0;
+ bufev->readcb(bufev, bufev->cbarg);
+ }
+ if (bufev_private->writecb_pending && bufev->writecb) {
+ bufev_private->writecb_pending = 0;
+ bufev->writecb(bufev, bufev->cbarg);
+ }
+ if (bufev_private->eventcb_pending && bufev->errorcb) {
+ short what = bufev_private->eventcb_pending;
+ int err = bufev_private->errno_pending;
+ bufev_private->eventcb_pending = 0;
+ bufev_private->errno_pending = 0;
+ EVUTIL_SET_SOCKET_ERROR(err);
+ bufev->errorcb(bufev, what, bufev->cbarg);
+ }
+ bufferevent_decref_and_unlock_(bufev);
+}
+
+static void
+bufferevent_run_deferred_callbacks_unlocked(struct event_callback *cb, void *arg)
+{
+ struct bufferevent_private *bufev_private = arg;
+ struct bufferevent *bufev = &bufev_private->bev;
+
+ BEV_LOCK(bufev);
+#define UNLOCKED(stmt) \
+ do { BEV_UNLOCK(bufev); stmt; BEV_LOCK(bufev); } while(0)
+
+ if ((bufev_private->eventcb_pending & BEV_EVENT_CONNECTED) &&
+ bufev->errorcb) {
+ /* The "connected" happened before any reads or writes, so
+ send it first. */
+ bufferevent_event_cb errorcb = bufev->errorcb;
+ void *cbarg = bufev->cbarg;
+ bufev_private->eventcb_pending &= ~BEV_EVENT_CONNECTED;
+ UNLOCKED(errorcb(bufev, BEV_EVENT_CONNECTED, cbarg));
+ }
+ if (bufev_private->readcb_pending && bufev->readcb) {
+ bufferevent_data_cb readcb = bufev->readcb;
+ void *cbarg = bufev->cbarg;
+ bufev_private->readcb_pending = 0;
+ UNLOCKED(readcb(bufev, cbarg));
+ }
+ if (bufev_private->writecb_pending && bufev->writecb) {
+ bufferevent_data_cb writecb = bufev->writecb;
+ void *cbarg = bufev->cbarg;
+ bufev_private->writecb_pending = 0;
+ UNLOCKED(writecb(bufev, cbarg));
+ }
+ if (bufev_private->eventcb_pending && bufev->errorcb) {
+ bufferevent_event_cb errorcb = bufev->errorcb;
+ void *cbarg = bufev->cbarg;
+ short what = bufev_private->eventcb_pending;
+ int err = bufev_private->errno_pending;
+ bufev_private->eventcb_pending = 0;
+ bufev_private->errno_pending = 0;
+ EVUTIL_SET_SOCKET_ERROR(err);
+ UNLOCKED(errorcb(bufev,what,cbarg));
+ }
+ bufferevent_decref_and_unlock_(bufev);
+#undef UNLOCKED
+}
+
+#define SCHEDULE_DEFERRED(bevp) \
+ do { \
+ if (event_deferred_cb_schedule_( \
+ (bevp)->bev.ev_base, \
+ &(bevp)->deferred)) \
+ bufferevent_incref_(&(bevp)->bev); \
+ } while (0)
+
+
+void
+bufferevent_run_readcb_(struct bufferevent *bufev, int options)
+{
+ /* Requires that we hold the lock and a reference */
+ struct bufferevent_private *p =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ if (bufev->readcb == NULL)
+ return;
+ if ((p->options|options) & BEV_OPT_DEFER_CALLBACKS) {
+ p->readcb_pending = 1;
+ SCHEDULE_DEFERRED(p);
+ } else {
+ bufev->readcb(bufev, bufev->cbarg);
+ }
+}
+
+void
+bufferevent_run_writecb_(struct bufferevent *bufev, int options)
+{
+ /* Requires that we hold the lock and a reference */
+ struct bufferevent_private *p =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ if (bufev->writecb == NULL)
+ return;
+ if ((p->options|options) & BEV_OPT_DEFER_CALLBACKS) {
+ p->writecb_pending = 1;
+ SCHEDULE_DEFERRED(p);
+ } else {
+ bufev->writecb(bufev, bufev->cbarg);
+ }
+}
+
+#define BEV_TRIG_ALL_OPTS ( \
+ BEV_TRIG_IGNORE_WATERMARKS| \
+ BEV_TRIG_DEFER_CALLBACKS \
+ )
+
+void
+bufferevent_trigger(struct bufferevent *bufev, short iotype, int options)
+{
+ bufferevent_incref_and_lock_(bufev);
+ bufferevent_trigger_nolock_(bufev, iotype, options&BEV_TRIG_ALL_OPTS);
+ bufferevent_decref_and_unlock_(bufev);
+}
+
+void
+bufferevent_run_eventcb_(struct bufferevent *bufev, short what, int options)
+{
+ /* Requires that we hold the lock and a reference */
+ struct bufferevent_private *p =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ if (bufev->errorcb == NULL)
+ return;
+ if ((p->options|options) & BEV_OPT_DEFER_CALLBACKS) {
+ p->eventcb_pending |= what;
+ p->errno_pending = EVUTIL_SOCKET_ERROR();
+ SCHEDULE_DEFERRED(p);
+ } else {
+ bufev->errorcb(bufev, what, bufev->cbarg);
+ }
+}
+
+void
+bufferevent_trigger_event(struct bufferevent *bufev, short what, int options)
+{
+ bufferevent_incref_and_lock_(bufev);
+ bufferevent_run_eventcb_(bufev, what, options&BEV_TRIG_ALL_OPTS);
+ bufferevent_decref_and_unlock_(bufev);
+}
+
+int
+bufferevent_init_common_(struct bufferevent_private *bufev_private,
+ struct event_base *base,
+ const struct bufferevent_ops *ops,
+ enum bufferevent_options options)
+{
+ struct bufferevent *bufev = &bufev_private->bev;
+
+ if (!bufev->input) {
+ if ((bufev->input = evbuffer_new()) == NULL)
+ return -1;
+ }
+
+ if (!bufev->output) {
+ if ((bufev->output = evbuffer_new()) == NULL) {
+ evbuffer_free(bufev->input);
+ return -1;
+ }
+ }
+
+ bufev_private->refcnt = 1;
+ bufev->ev_base = base;
+
+ /* Disable timeouts. */
+ evutil_timerclear(&bufev->timeout_read);
+ evutil_timerclear(&bufev->timeout_write);
+
+ bufev->be_ops = ops;
+
+ bufferevent_ratelim_init_(bufev_private);
+
+ /*
+ * Set to EV_WRITE so that using bufferevent_write is going to
+ * trigger a callback. Reading needs to be explicitly enabled
+ * because otherwise no data will be available.
+ */
+ bufev->enabled = EV_WRITE;
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ if (options & BEV_OPT_THREADSAFE) {
+ if (bufferevent_enable_locking_(bufev, NULL) < 0) {
+ /* cleanup */
+ evbuffer_free(bufev->input);
+ evbuffer_free(bufev->output);
+ bufev->input = NULL;
+ bufev->output = NULL;
+ return -1;
+ }
+ }
+#endif
+ if ((options & (BEV_OPT_DEFER_CALLBACKS|BEV_OPT_UNLOCK_CALLBACKS))
+ == BEV_OPT_UNLOCK_CALLBACKS) {
+ event_warnx("UNLOCK_CALLBACKS requires DEFER_CALLBACKS");
+ return -1;
+ }
+ if (options & BEV_OPT_UNLOCK_CALLBACKS)
+ event_deferred_cb_init_(
+ &bufev_private->deferred,
+ event_base_get_npriorities(base) / 2,
+ bufferevent_run_deferred_callbacks_unlocked,
+ bufev_private);
+ else
+ event_deferred_cb_init_(
+ &bufev_private->deferred,
+ event_base_get_npriorities(base) / 2,
+ bufferevent_run_deferred_callbacks_locked,
+ bufev_private);
+
+ bufev_private->options = options;
+
+ evbuffer_set_parent_(bufev->input, bufev);
+ evbuffer_set_parent_(bufev->output, bufev);
+
+ return 0;
+}
+
+void
+bufferevent_setcb(struct bufferevent *bufev,
+ bufferevent_data_cb readcb, bufferevent_data_cb writecb,
+ bufferevent_event_cb eventcb, void *cbarg)
+{
+ BEV_LOCK(bufev);
+
+ bufev->readcb = readcb;
+ bufev->writecb = writecb;
+ bufev->errorcb = eventcb;
+
+ bufev->cbarg = cbarg;
+ BEV_UNLOCK(bufev);
+}
+
+void
+bufferevent_getcb(struct bufferevent *bufev,
+ bufferevent_data_cb *readcb_ptr,
+ bufferevent_data_cb *writecb_ptr,
+ bufferevent_event_cb *eventcb_ptr,
+ void **cbarg_ptr)
+{
+ BEV_LOCK(bufev);
+ if (readcb_ptr)
+ *readcb_ptr = bufev->readcb;
+ if (writecb_ptr)
+ *writecb_ptr = bufev->writecb;
+ if (eventcb_ptr)
+ *eventcb_ptr = bufev->errorcb;
+ if (cbarg_ptr)
+ *cbarg_ptr = bufev->cbarg;
+
+ BEV_UNLOCK(bufev);
+}
+
+struct evbuffer *
+bufferevent_get_input(struct bufferevent *bufev)
+{
+ return bufev->input;
+}
+
+struct evbuffer *
+bufferevent_get_output(struct bufferevent *bufev)
+{
+ return bufev->output;
+}
+
+struct event_base *
+bufferevent_get_base(struct bufferevent *bufev)
+{
+ return bufev->ev_base;
+}
+
+int
+bufferevent_get_priority(const struct bufferevent *bufev)
+{
+ if (event_initialized(&bufev->ev_read)) {
+ return event_get_priority(&bufev->ev_read);
+ } else {
+ return event_base_get_npriorities(bufev->ev_base) / 2;
+ }
+}
+
+int
+bufferevent_write(struct bufferevent *bufev, const void *data, size_t size)
+{
+ if (evbuffer_add(bufev->output, data, size) == -1)
+ return (-1);
+
+ return 0;
+}
+
+int
+bufferevent_write_buffer(struct bufferevent *bufev, struct evbuffer *buf)
+{
+ if (evbuffer_add_buffer(bufev->output, buf) == -1)
+ return (-1);
+
+ return 0;
+}
+
+size_t
+bufferevent_read(struct bufferevent *bufev, void *data, size_t size)
+{
+ return (evbuffer_remove(bufev->input, data, size));
+}
+
+int
+bufferevent_read_buffer(struct bufferevent *bufev, struct evbuffer *buf)
+{
+ return (evbuffer_add_buffer(buf, bufev->input));
+}
+
+int
+bufferevent_enable(struct bufferevent *bufev, short event)
+{
+ struct bufferevent_private *bufev_private =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ short impl_events = event;
+ int r = 0;
+
+ bufferevent_incref_and_lock_(bufev);
+ if (bufev_private->read_suspended)
+ impl_events &= ~EV_READ;
+ if (bufev_private->write_suspended)
+ impl_events &= ~EV_WRITE;
+
+ bufev->enabled |= event;
+
+ if (impl_events && bufev->be_ops->enable(bufev, impl_events) < 0)
+ r = -1;
+
+ bufferevent_decref_and_unlock_(bufev);
+ return r;
+}
+
+int
+bufferevent_set_timeouts(struct bufferevent *bufev,
+ const struct timeval *tv_read,
+ const struct timeval *tv_write)
+{
+ int r = 0;
+ BEV_LOCK(bufev);
+ if (tv_read) {
+ bufev->timeout_read = *tv_read;
+ } else {
+ evutil_timerclear(&bufev->timeout_read);
+ }
+ if (tv_write) {
+ bufev->timeout_write = *tv_write;
+ } else {
+ evutil_timerclear(&bufev->timeout_write);
+ }
+
+ if (bufev->be_ops->adj_timeouts)
+ r = bufev->be_ops->adj_timeouts(bufev);
+ BEV_UNLOCK(bufev);
+
+ return r;
+}
+
+
+/* Obsolete; use bufferevent_set_timeouts */
+void
+bufferevent_settimeout(struct bufferevent *bufev,
+ int timeout_read, int timeout_write)
+{
+ struct timeval tv_read, tv_write;
+ struct timeval *ptv_read = NULL, *ptv_write = NULL;
+
+ memset(&tv_read, 0, sizeof(tv_read));
+ memset(&tv_write, 0, sizeof(tv_write));
+
+ if (timeout_read) {
+ tv_read.tv_sec = timeout_read;
+ ptv_read = &tv_read;
+ }
+ if (timeout_write) {
+ tv_write.tv_sec = timeout_write;
+ ptv_write = &tv_write;
+ }
+
+ bufferevent_set_timeouts(bufev, ptv_read, ptv_write);
+}
+
+
+int
+bufferevent_disable_hard_(struct bufferevent *bufev, short event)
+{
+ int r = 0;
+ struct bufferevent_private *bufev_private =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+
+ BEV_LOCK(bufev);
+ bufev->enabled &= ~event;
+
+ bufev_private->connecting = 0;
+ if (bufev->be_ops->disable(bufev, event) < 0)
+ r = -1;
+
+ BEV_UNLOCK(bufev);
+ return r;
+}
+
+int
+bufferevent_disable(struct bufferevent *bufev, short event)
+{
+ int r = 0;
+
+ BEV_LOCK(bufev);
+ bufev->enabled &= ~event;
+
+ if (bufev->be_ops->disable(bufev, event) < 0)
+ r = -1;
+
+ BEV_UNLOCK(bufev);
+ return r;
+}
+
+/*
+ * Sets the water marks
+ */
+
+void
+bufferevent_setwatermark(struct bufferevent *bufev, short events,
+ size_t lowmark, size_t highmark)
+{
+ struct bufferevent_private *bufev_private =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+
+ BEV_LOCK(bufev);
+ if (events & EV_WRITE) {
+ bufev->wm_write.low = lowmark;
+ bufev->wm_write.high = highmark;
+ }
+
+ if (events & EV_READ) {
+ bufev->wm_read.low = lowmark;
+ bufev->wm_read.high = highmark;
+
+ if (highmark) {
+ /* There is now a new high-water mark for read.
+ enable the callback if needed, and see if we should
+ suspend/bufferevent_wm_unsuspend. */
+
+ if (bufev_private->read_watermarks_cb == NULL) {
+ bufev_private->read_watermarks_cb =
+ evbuffer_add_cb(bufev->input,
+ bufferevent_inbuf_wm_cb,
+ bufev);
+ }
+ evbuffer_cb_set_flags(bufev->input,
+ bufev_private->read_watermarks_cb,
+ EVBUFFER_CB_ENABLED|EVBUFFER_CB_NODEFER);
+
+ if (evbuffer_get_length(bufev->input) >= highmark)
+ bufferevent_wm_suspend_read(bufev);
+ else if (evbuffer_get_length(bufev->input) < highmark)
+ bufferevent_wm_unsuspend_read(bufev);
+ } else {
+ /* There is now no high-water mark for read. */
+ if (bufev_private->read_watermarks_cb)
+ evbuffer_cb_clear_flags(bufev->input,
+ bufev_private->read_watermarks_cb,
+ EVBUFFER_CB_ENABLED);
+ bufferevent_wm_unsuspend_read(bufev);
+ }
+ }
+ BEV_UNLOCK(bufev);
+}
+
+int
+bufferevent_getwatermark(struct bufferevent *bufev, short events,
+ size_t *lowmark, size_t *highmark)
+{
+ if (events == EV_WRITE) {
+ BEV_LOCK(bufev);
+ if (lowmark)
+ *lowmark = bufev->wm_write.low;
+ if (highmark)
+ *highmark = bufev->wm_write.high;
+ BEV_UNLOCK(bufev);
+ return 0;
+ }
+
+ if (events == EV_READ) {
+ BEV_LOCK(bufev);
+ if (lowmark)
+ *lowmark = bufev->wm_read.low;
+ if (highmark)
+ *highmark = bufev->wm_read.high;
+ BEV_UNLOCK(bufev);
+ return 0;
+ }
+ return -1;
+}
+
+int
+bufferevent_flush(struct bufferevent *bufev,
+ short iotype,
+ enum bufferevent_flush_mode mode)
+{
+ int r = -1;
+ BEV_LOCK(bufev);
+ if (bufev->be_ops->flush)
+ r = bufev->be_ops->flush(bufev, iotype, mode);
+ BEV_UNLOCK(bufev);
+ return r;
+}
+
+void
+bufferevent_incref_and_lock_(struct bufferevent *bufev)
+{
+ struct bufferevent_private *bufev_private =
+ BEV_UPCAST(bufev);
+ BEV_LOCK(bufev);
+ ++bufev_private->refcnt;
+}
+
+#if 0
+static void
+bufferevent_transfer_lock_ownership_(struct bufferevent *donor,
+ struct bufferevent *recipient)
+{
+ struct bufferevent_private *d = BEV_UPCAST(donor);
+ struct bufferevent_private *r = BEV_UPCAST(recipient);
+ if (d->lock != r->lock)
+ return;
+ if (r->own_lock)
+ return;
+ if (d->own_lock) {
+ d->own_lock = 0;
+ r->own_lock = 1;
+ }
+}
+#endif
+
+int
+bufferevent_decref_and_unlock_(struct bufferevent *bufev)
+{
+ struct bufferevent_private *bufev_private =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ int n_cbs = 0;
+#define MAX_CBS 16
+ struct event_callback *cbs[MAX_CBS];
+
+ EVUTIL_ASSERT(bufev_private->refcnt > 0);
+
+ if (--bufev_private->refcnt) {
+ BEV_UNLOCK(bufev);
+ return 0;
+ }
+
+ if (bufev->be_ops->unlink)
+ bufev->be_ops->unlink(bufev);
+
+ /* Okay, we're out of references. Let's finalize this once all the
+ * callbacks are done running. */
+ cbs[0] = &bufev->ev_read.ev_evcallback;
+ cbs[1] = &bufev->ev_write.ev_evcallback;
+ cbs[2] = &bufev_private->deferred;
+ n_cbs = 3;
+ if (bufev_private->rate_limiting) {
+ struct event *e = &bufev_private->rate_limiting->refill_bucket_event;
+ if (event_initialized(e))
+ cbs[n_cbs++] = &e->ev_evcallback;
+ }
+ n_cbs += evbuffer_get_callbacks_(bufev->input, cbs+n_cbs, MAX_CBS-n_cbs);
+ n_cbs += evbuffer_get_callbacks_(bufev->output, cbs+n_cbs, MAX_CBS-n_cbs);
+
+ event_callback_finalize_many_(bufev->ev_base, n_cbs, cbs,
+ bufferevent_finalize_cb_);
+
+#undef MAX_CBS
+ BEV_UNLOCK(bufev);
+
+ return 1;
+}
+
+static void
+bufferevent_finalize_cb_(struct event_callback *evcb, void *arg_)
+{
+ struct bufferevent *bufev = arg_;
+ struct bufferevent *underlying;
+ struct bufferevent_private *bufev_private =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+
+ BEV_LOCK(bufev);
+ underlying = bufferevent_get_underlying(bufev);
+
+ /* Clean up the shared info */
+ if (bufev->be_ops->destruct)
+ bufev->be_ops->destruct(bufev);
+
+ /* XXX what happens if refcnt for these buffers is > 1?
+ * The buffers can share a lock with this bufferevent object,
+ * but the lock might be destroyed below. */
+ /* evbuffer will free the callbacks */
+ evbuffer_free(bufev->input);
+ evbuffer_free(bufev->output);
+
+ if (bufev_private->rate_limiting) {
+ if (bufev_private->rate_limiting->group)
+ bufferevent_remove_from_rate_limit_group_internal_(bufev,0);
+ mm_free(bufev_private->rate_limiting);
+ bufev_private->rate_limiting = NULL;
+ }
+
+
+ BEV_UNLOCK(bufev);
+
+ if (bufev_private->own_lock)
+ EVTHREAD_FREE_LOCK(bufev_private->lock,
+ EVTHREAD_LOCKTYPE_RECURSIVE);
+
+ /* Free the actual allocated memory. */
+ mm_free(((char*)bufev) - bufev->be_ops->mem_offset);
+
+ /* Release the reference to underlying now that we no longer need the
+ * reference to it. We wait this long mainly in case our lock is
+ * shared with underlying.
+ *
+ * The 'destruct' function will also drop a reference to underlying
+ * if BEV_OPT_CLOSE_ON_FREE is set.
+ *
+ * XXX Should we/can we just refcount evbuffer/bufferevent locks?
+ * It would probably save us some headaches.
+ */
+ if (underlying)
+ bufferevent_decref_(underlying);
+}
+
+int
+bufferevent_decref(struct bufferevent *bufev)
+{
+ BEV_LOCK(bufev);
+ return bufferevent_decref_and_unlock_(bufev);
+}
+
+void
+bufferevent_free(struct bufferevent *bufev)
+{
+ BEV_LOCK(bufev);
+ bufferevent_setcb(bufev, NULL, NULL, NULL, NULL);
+ bufferevent_cancel_all_(bufev);
+ bufferevent_decref_and_unlock_(bufev);
+}
+
+void
+bufferevent_incref(struct bufferevent *bufev)
+{
+ struct bufferevent_private *bufev_private =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+
+ /* XXX: now that this function is public, we might want to
+ * - return the count from this function
+ * - create a new function to atomically grab the current refcount
+ */
+ BEV_LOCK(bufev);
+ ++bufev_private->refcnt;
+ BEV_UNLOCK(bufev);
+}
+
+int
+bufferevent_enable_locking_(struct bufferevent *bufev, void *lock)
+{
+#ifdef EVENT__DISABLE_THREAD_SUPPORT
+ return -1;
+#else
+ struct bufferevent *underlying;
+
+ if (BEV_UPCAST(bufev)->lock)
+ return -1;
+ underlying = bufferevent_get_underlying(bufev);
+
+ if (!lock && underlying && BEV_UPCAST(underlying)->lock) {
+ lock = BEV_UPCAST(underlying)->lock;
+ BEV_UPCAST(bufev)->lock = lock;
+ BEV_UPCAST(bufev)->own_lock = 0;
+ } else if (!lock) {
+ EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ if (!lock)
+ return -1;
+ BEV_UPCAST(bufev)->lock = lock;
+ BEV_UPCAST(bufev)->own_lock = 1;
+ } else {
+ BEV_UPCAST(bufev)->lock = lock;
+ BEV_UPCAST(bufev)->own_lock = 0;
+ }
+ evbuffer_enable_locking(bufev->input, lock);
+ evbuffer_enable_locking(bufev->output, lock);
+
+ if (underlying && !BEV_UPCAST(underlying)->lock)
+ bufferevent_enable_locking_(underlying, lock);
+
+ return 0;
+#endif
+}
+
+int
+bufferevent_setfd(struct bufferevent *bev, evutil_socket_t fd)
+{
+ union bufferevent_ctrl_data d;
+ int res = -1;
+ d.fd = fd;
+ BEV_LOCK(bev);
+ if (bev->be_ops->ctrl)
+ res = bev->be_ops->ctrl(bev, BEV_CTRL_SET_FD, &d);
+ BEV_UNLOCK(bev);
+ return res;
+}
+
+evutil_socket_t
+bufferevent_getfd(struct bufferevent *bev)
+{
+ union bufferevent_ctrl_data d;
+ int res = -1;
+ d.fd = -1;
+ BEV_LOCK(bev);
+ if (bev->be_ops->ctrl)
+ res = bev->be_ops->ctrl(bev, BEV_CTRL_GET_FD, &d);
+ BEV_UNLOCK(bev);
+ return (res<0) ? -1 : d.fd;
+}
+
+enum bufferevent_options
+bufferevent_get_options_(struct bufferevent *bev)
+{
+ struct bufferevent_private *bev_p =
+ EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
+ enum bufferevent_options options;
+
+ BEV_LOCK(bev);
+ options = bev_p->options;
+ BEV_UNLOCK(bev);
+ return options;
+}
+
+
+static void
+bufferevent_cancel_all_(struct bufferevent *bev)
+{
+ union bufferevent_ctrl_data d;
+ memset(&d, 0, sizeof(d));
+ BEV_LOCK(bev);
+ if (bev->be_ops->ctrl)
+ bev->be_ops->ctrl(bev, BEV_CTRL_CANCEL_ALL, &d);
+ BEV_UNLOCK(bev);
+}
+
+short
+bufferevent_get_enabled(struct bufferevent *bufev)
+{
+ short r;
+ BEV_LOCK(bufev);
+ r = bufev->enabled;
+ BEV_UNLOCK(bufev);
+ return r;
+}
+
+struct bufferevent *
+bufferevent_get_underlying(struct bufferevent *bev)
+{
+ union bufferevent_ctrl_data d;
+ int res = -1;
+ d.ptr = NULL;
+ BEV_LOCK(bev);
+ if (bev->be_ops->ctrl)
+ res = bev->be_ops->ctrl(bev, BEV_CTRL_GET_UNDERLYING, &d);
+ BEV_UNLOCK(bev);
+ return (res<0) ? NULL : d.ptr;
+}
+
+static void
+bufferevent_generic_read_timeout_cb(evutil_socket_t fd, short event, void *ctx)
+{
+ struct bufferevent *bev = ctx;
+ bufferevent_incref_and_lock_(bev);
+ bufferevent_disable(bev, EV_READ);
+ bufferevent_run_eventcb_(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_READING, 0);
+ bufferevent_decref_and_unlock_(bev);
+}
+static void
+bufferevent_generic_write_timeout_cb(evutil_socket_t fd, short event, void *ctx)
+{
+ struct bufferevent *bev = ctx;
+ bufferevent_incref_and_lock_(bev);
+ bufferevent_disable(bev, EV_WRITE);
+ bufferevent_run_eventcb_(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_WRITING, 0);
+ bufferevent_decref_and_unlock_(bev);
+}
+
+void
+bufferevent_init_generic_timeout_cbs_(struct bufferevent *bev)
+{
+ event_assign(&bev->ev_read, bev->ev_base, -1, EV_FINALIZE,
+ bufferevent_generic_read_timeout_cb, bev);
+ event_assign(&bev->ev_write, bev->ev_base, -1, EV_FINALIZE,
+ bufferevent_generic_write_timeout_cb, bev);
+}
+
+int
+bufferevent_generic_adj_timeouts_(struct bufferevent *bev)
+{
+ const short enabled = bev->enabled;
+ struct bufferevent_private *bev_p =
+ EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
+ int r1=0, r2=0;
+ if ((enabled & EV_READ) && !bev_p->read_suspended &&
+ evutil_timerisset(&bev->timeout_read))
+ r1 = event_add(&bev->ev_read, &bev->timeout_read);
+ else
+ r1 = event_del(&bev->ev_read);
+
+ if ((enabled & EV_WRITE) && !bev_p->write_suspended &&
+ evutil_timerisset(&bev->timeout_write) &&
+ evbuffer_get_length(bev->output))
+ r2 = event_add(&bev->ev_write, &bev->timeout_write);
+ else
+ r2 = event_del(&bev->ev_write);
+ if (r1 < 0 || r2 < 0)
+ return -1;
+ return 0;
+}
+
+int
+bufferevent_generic_adj_existing_timeouts_(struct bufferevent *bev)
+{
+ int r = 0;
+ if (event_pending(&bev->ev_read, EV_READ, NULL)) {
+ if (evutil_timerisset(&bev->timeout_read)) {
+ if (bufferevent_add_event_(&bev->ev_read, &bev->timeout_read) < 0)
+ r = -1;
+ } else {
+ event_remove_timer(&bev->ev_read);
+ }
+ }
+ if (event_pending(&bev->ev_write, EV_WRITE, NULL)) {
+ if (evutil_timerisset(&bev->timeout_write)) {
+ if (bufferevent_add_event_(&bev->ev_write, &bev->timeout_write) < 0)
+ r = -1;
+ } else {
+ event_remove_timer(&bev->ev_write);
+ }
+ }
+ return r;
+}
+
+int
+bufferevent_add_event_(struct event *ev, const struct timeval *tv)
+{
+ if (!evutil_timerisset(tv))
+ return event_add(ev, NULL);
+ else
+ return event_add(ev, tv);
+}
+
+/* For use by user programs only; internally, we should be calling
+ either bufferevent_incref_and_lock_(), or BEV_LOCK. */
+void
+bufferevent_lock(struct bufferevent *bev)
+{
+ bufferevent_incref_and_lock_(bev);
+}
+
+void
+bufferevent_unlock(struct bufferevent *bev)
+{
+ bufferevent_decref_and_unlock_(bev);
+}
diff --git a/libs/libevent/src/bufferevent_async.c b/libs/libevent/src/bufferevent_async.c
new file mode 100644
index 0000000000..6395e57a9f
--- /dev/null
+++ b/libs/libevent/src/bufferevent_async.c
@@ -0,0 +1,686 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef EVENT__HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#endif
+
+#include <sys/queue.h>
+
+#include "event2/util.h"
+#include "event2/bufferevent.h"
+#include "event2/buffer.h"
+#include "event2/bufferevent_struct.h"
+#include "event2/event.h"
+#include "event2/util.h"
+#include "event-internal.h"
+#include "log-internal.h"
+#include "mm-internal.h"
+#include "bufferevent-internal.h"
+#include "util-internal.h"
+#include "iocp-internal.h"
+
+#ifndef SO_UPDATE_CONNECT_CONTEXT
+/* Mingw is sometimes missing this */
+#define SO_UPDATE_CONNECT_CONTEXT 0x7010
+#endif
+
+/* prototypes */
+static int be_async_enable(struct bufferevent *, short);
+static int be_async_disable(struct bufferevent *, short);
+static void be_async_destruct(struct bufferevent *);
+static int be_async_flush(struct bufferevent *, short, enum bufferevent_flush_mode);
+static int be_async_ctrl(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
+
+struct bufferevent_async {
+ struct bufferevent_private bev;
+ struct event_overlapped connect_overlapped;
+ struct event_overlapped read_overlapped;
+ struct event_overlapped write_overlapped;
+ size_t read_in_progress;
+ size_t write_in_progress;
+ unsigned ok : 1;
+ unsigned read_added : 1;
+ unsigned write_added : 1;
+};
+
+const struct bufferevent_ops bufferevent_ops_async = {
+ "socket_async",
+ evutil_offsetof(struct bufferevent_async, bev.bev),
+ be_async_enable,
+ be_async_disable,
+ NULL, /* Unlink */
+ be_async_destruct,
+ bufferevent_generic_adj_timeouts_,
+ be_async_flush,
+ be_async_ctrl,
+};
+
+static inline struct bufferevent_async *
+upcast(struct bufferevent *bev)
+{
+ struct bufferevent_async *bev_a;
+ if (bev->be_ops != &bufferevent_ops_async)
+ return NULL;
+ bev_a = EVUTIL_UPCAST(bev, struct bufferevent_async, bev.bev);
+ return bev_a;
+}
+
+static inline struct bufferevent_async *
+upcast_connect(struct event_overlapped *eo)
+{
+ struct bufferevent_async *bev_a;
+ bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, connect_overlapped);
+ EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
+ return bev_a;
+}
+
+static inline struct bufferevent_async *
+upcast_read(struct event_overlapped *eo)
+{
+ struct bufferevent_async *bev_a;
+ bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, read_overlapped);
+ EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
+ return bev_a;
+}
+
+static inline struct bufferevent_async *
+upcast_write(struct event_overlapped *eo)
+{
+ struct bufferevent_async *bev_a;
+ bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, write_overlapped);
+ EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
+ return bev_a;
+}
+
+static void
+bev_async_del_write(struct bufferevent_async *beva)
+{
+ struct bufferevent *bev = &beva->bev.bev;
+
+ if (beva->write_added) {
+ beva->write_added = 0;
+ event_base_del_virtual_(bev->ev_base);
+ }
+}
+
+static void
+bev_async_del_read(struct bufferevent_async *beva)
+{
+ struct bufferevent *bev = &beva->bev.bev;
+
+ if (beva->read_added) {
+ beva->read_added = 0;
+ event_base_del_virtual_(bev->ev_base);
+ }
+}
+
+static void
+bev_async_add_write(struct bufferevent_async *beva)
+{
+ struct bufferevent *bev = &beva->bev.bev;
+
+ if (!beva->write_added) {
+ beva->write_added = 1;
+ event_base_add_virtual_(bev->ev_base);
+ }
+}
+
+static void
+bev_async_add_read(struct bufferevent_async *beva)
+{
+ struct bufferevent *bev = &beva->bev.bev;
+
+ if (!beva->read_added) {
+ beva->read_added = 1;
+ event_base_add_virtual_(bev->ev_base);
+ }
+}
+
+static void
+bev_async_consider_writing(struct bufferevent_async *beva)
+{
+ size_t at_most;
+ int limit;
+ struct bufferevent *bev = &beva->bev.bev;
+
+ /* Don't write if there's a write in progress, or we do not
+ * want to write, or when there's nothing left to write. */
+ if (beva->write_in_progress || beva->bev.connecting)
+ return;
+ if (!beva->ok || !(bev->enabled&EV_WRITE) ||
+ !evbuffer_get_length(bev->output)) {
+ bev_async_del_write(beva);
+ return;
+ }
+
+ at_most = evbuffer_get_length(bev->output);
+
+ /* This is safe so long as bufferevent_get_write_max never returns
+ * more than INT_MAX. That's true for now. XXXX */
+ limit = (int)bufferevent_get_write_max_(&beva->bev);
+ if (at_most >= (size_t)limit && limit >= 0)
+ at_most = limit;
+
+ if (beva->bev.write_suspended) {
+ bev_async_del_write(beva);
+ return;
+ }
+
+ /* XXXX doesn't respect low-water mark very well. */
+ bufferevent_incref_(bev);
+ if (evbuffer_launch_write_(bev->output, at_most,
+ &beva->write_overlapped)) {
+ bufferevent_decref_(bev);
+ beva->ok = 0;
+ bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR, 0);
+ } else {
+ beva->write_in_progress = at_most;
+ bufferevent_decrement_write_buckets_(&beva->bev, at_most);
+ bev_async_add_write(beva);
+ }
+}
+
+static void
+bev_async_consider_reading(struct bufferevent_async *beva)
+{
+ size_t cur_size;
+ size_t read_high;
+ size_t at_most;
+ int limit;
+ struct bufferevent *bev = &beva->bev.bev;
+
+ /* Don't read if there is a read in progress, or we do not
+ * want to read. */
+ if (beva->read_in_progress || beva->bev.connecting)
+ return;
+ if (!beva->ok || !(bev->enabled&EV_READ)) {
+ bev_async_del_read(beva);
+ return;
+ }
+
+ /* Don't read if we're full */
+ cur_size = evbuffer_get_length(bev->input);
+ read_high = bev->wm_read.high;
+ if (read_high) {
+ if (cur_size >= read_high) {
+ bev_async_del_read(beva);
+ return;
+ }
+ at_most = read_high - cur_size;
+ } else {
+ at_most = 16384; /* FIXME totally magic. */
+ }
+
+ /* XXXX This over-commits. */
+ /* XXXX see also not above on cast on bufferevent_get_write_max_() */
+ limit = (int)bufferevent_get_read_max_(&beva->bev);
+ if (at_most >= (size_t)limit && limit >= 0)
+ at_most = limit;
+
+ if (beva->bev.read_suspended) {
+ bev_async_del_read(beva);
+ return;
+ }
+
+ bufferevent_incref_(bev);
+ if (evbuffer_launch_read_(bev->input, at_most, &beva->read_overlapped)) {
+ beva->ok = 0;
+ bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR, 0);
+ bufferevent_decref_(bev);
+ } else {
+ beva->read_in_progress = at_most;
+ bufferevent_decrement_read_buckets_(&beva->bev, at_most);
+ bev_async_add_read(beva);
+ }
+
+ return;
+}
+
+static void
+be_async_outbuf_callback(struct evbuffer *buf,
+ const struct evbuffer_cb_info *cbinfo,
+ void *arg)
+{
+ struct bufferevent *bev = arg;
+ struct bufferevent_async *bev_async = upcast(bev);
+
+ /* If we added data to the outbuf and were not writing before,
+ * we may want to write now. */
+
+ bufferevent_incref_and_lock_(bev);
+
+ if (cbinfo->n_added)
+ bev_async_consider_writing(bev_async);
+
+ bufferevent_decref_and_unlock_(bev);
+}
+
+static void
+be_async_inbuf_callback(struct evbuffer *buf,
+ const struct evbuffer_cb_info *cbinfo,
+ void *arg)
+{
+ struct bufferevent *bev = arg;
+ struct bufferevent_async *bev_async = upcast(bev);
+
+ /* If we drained data from the inbuf and were not reading before,
+ * we may want to read now */
+
+ bufferevent_incref_and_lock_(bev);
+
+ if (cbinfo->n_deleted)
+ bev_async_consider_reading(bev_async);
+
+ bufferevent_decref_and_unlock_(bev);
+}
+
+static int
+be_async_enable(struct bufferevent *buf, short what)
+{
+ struct bufferevent_async *bev_async = upcast(buf);
+
+ if (!bev_async->ok)
+ return -1;
+
+ if (bev_async->bev.connecting) {
+ /* Don't launch anything during connection attempts. */
+ return 0;
+ }
+
+ if (what & EV_READ)
+ BEV_RESET_GENERIC_READ_TIMEOUT(buf);
+ if (what & EV_WRITE)
+ BEV_RESET_GENERIC_WRITE_TIMEOUT(buf);
+
+ /* If we newly enable reading or writing, and we aren't reading or
+ writing already, consider launching a new read or write. */
+
+ if (what & EV_READ)
+ bev_async_consider_reading(bev_async);
+ if (what & EV_WRITE)
+ bev_async_consider_writing(bev_async);
+ return 0;
+}
+
+static int
+be_async_disable(struct bufferevent *bev, short what)
+{
+ struct bufferevent_async *bev_async = upcast(bev);
+ /* XXXX If we disable reading or writing, we may want to consider
+ * canceling any in-progress read or write operation, though it might
+ * not work. */
+
+ if (what & EV_READ) {
+ BEV_DEL_GENERIC_READ_TIMEOUT(bev);
+ bev_async_del_read(bev_async);
+ }
+ if (what & EV_WRITE) {
+ BEV_DEL_GENERIC_WRITE_TIMEOUT(bev);
+ bev_async_del_write(bev_async);
+ }
+
+ return 0;
+}
+
+static void
+be_async_destruct(struct bufferevent *bev)
+{
+ struct bufferevent_async *bev_async = upcast(bev);
+ struct bufferevent_private *bev_p = BEV_UPCAST(bev);
+ evutil_socket_t fd;
+
+ EVUTIL_ASSERT(!upcast(bev)->write_in_progress &&
+ !upcast(bev)->read_in_progress);
+
+ bev_async_del_read(bev_async);
+ bev_async_del_write(bev_async);
+
+ fd = evbuffer_overlapped_get_fd_(bev->input);
+ if (fd != (evutil_socket_t)INVALID_SOCKET &&
+ (bev_p->options & BEV_OPT_CLOSE_ON_FREE)) {
+ evutil_closesocket(fd);
+ evbuffer_overlapped_set_fd_(bev->input, INVALID_SOCKET);
+ }
+}
+
+/* GetQueuedCompletionStatus doesn't reliably yield WSA error codes, so
+ * we use WSAGetOverlappedResult to translate. */
+static void
+bev_async_set_wsa_error(struct bufferevent *bev, struct event_overlapped *eo)
+{
+ DWORD bytes, flags;
+ evutil_socket_t fd;
+
+ fd = evbuffer_overlapped_get_fd_(bev->input);
+ WSAGetOverlappedResult(fd, &eo->overlapped, &bytes, FALSE, &flags);
+}
+
+static int
+be_async_flush(struct bufferevent *bev, short what,
+ enum bufferevent_flush_mode mode)
+{
+ return 0;
+}
+
+static void
+connect_complete(struct event_overlapped *eo, ev_uintptr_t key,
+ ev_ssize_t nbytes, int ok)
+{
+ struct bufferevent_async *bev_a = upcast_connect(eo);
+ struct bufferevent *bev = &bev_a->bev.bev;
+ evutil_socket_t sock;
+
+ BEV_LOCK(bev);
+
+ EVUTIL_ASSERT(bev_a->bev.connecting);
+ bev_a->bev.connecting = 0;
+ sock = evbuffer_overlapped_get_fd_(bev_a->bev.bev.input);
+ /* XXXX Handle error? */
+ setsockopt(sock, SOL_SOCKET, SO_UPDATE_CONNECT_CONTEXT, NULL, 0);
+
+ if (ok)
+ bufferevent_async_set_connected_(bev);
+ else
+ bev_async_set_wsa_error(bev, eo);
+
+ bufferevent_run_eventcb_(bev,
+ ok? BEV_EVENT_CONNECTED : BEV_EVENT_ERROR, 0);
+
+ event_base_del_virtual_(bev->ev_base);
+
+ bufferevent_decref_and_unlock_(bev);
+}
+
+static void
+read_complete(struct event_overlapped *eo, ev_uintptr_t key,
+ ev_ssize_t nbytes, int ok)
+{
+ struct bufferevent_async *bev_a = upcast_read(eo);
+ struct bufferevent *bev = &bev_a->bev.bev;
+ short what = BEV_EVENT_READING;
+ ev_ssize_t amount_unread;
+ BEV_LOCK(bev);
+ EVUTIL_ASSERT(bev_a->read_in_progress);
+
+ amount_unread = bev_a->read_in_progress - nbytes;
+ evbuffer_commit_read_(bev->input, nbytes);
+ bev_a->read_in_progress = 0;
+ if (amount_unread)
+ bufferevent_decrement_read_buckets_(&bev_a->bev, -amount_unread);
+
+ if (!ok)
+ bev_async_set_wsa_error(bev, eo);
+
+ if (bev_a->ok) {
+ if (ok && nbytes) {
+ BEV_RESET_GENERIC_READ_TIMEOUT(bev);
+ bufferevent_trigger_nolock_(bev, EV_READ, 0);
+ bev_async_consider_reading(bev_a);
+ } else if (!ok) {
+ what |= BEV_EVENT_ERROR;
+ bev_a->ok = 0;
+ bufferevent_run_eventcb_(bev, what, 0);
+ } else if (!nbytes) {
+ what |= BEV_EVENT_EOF;
+ bev_a->ok = 0;
+ bufferevent_run_eventcb_(bev, what, 0);
+ }
+ }
+
+ bufferevent_decref_and_unlock_(bev);
+}
+
+static void
+write_complete(struct event_overlapped *eo, ev_uintptr_t key,
+ ev_ssize_t nbytes, int ok)
+{
+ struct bufferevent_async *bev_a = upcast_write(eo);
+ struct bufferevent *bev = &bev_a->bev.bev;
+ short what = BEV_EVENT_WRITING;
+ ev_ssize_t amount_unwritten;
+
+ BEV_LOCK(bev);
+ EVUTIL_ASSERT(bev_a->write_in_progress);
+
+ amount_unwritten = bev_a->write_in_progress - nbytes;
+ evbuffer_commit_write_(bev->output, nbytes);
+ bev_a->write_in_progress = 0;
+
+ if (amount_unwritten)
+ bufferevent_decrement_write_buckets_(&bev_a->bev,
+ -amount_unwritten);
+
+
+ if (!ok)
+ bev_async_set_wsa_error(bev, eo);
+
+ if (bev_a->ok) {
+ if (ok && nbytes) {
+ BEV_RESET_GENERIC_WRITE_TIMEOUT(bev);
+ bufferevent_trigger_nolock_(bev, EV_WRITE, 0);
+ bev_async_consider_writing(bev_a);
+ } else if (!ok) {
+ what |= BEV_EVENT_ERROR;
+ bev_a->ok = 0;
+ bufferevent_run_eventcb_(bev, what, 0);
+ } else if (!nbytes) {
+ what |= BEV_EVENT_EOF;
+ bev_a->ok = 0;
+ bufferevent_run_eventcb_(bev, what, 0);
+ }
+ }
+
+ bufferevent_decref_and_unlock_(bev);
+}
+
+struct bufferevent *
+bufferevent_async_new_(struct event_base *base,
+ evutil_socket_t fd, int options)
+{
+ struct bufferevent_async *bev_a;
+ struct bufferevent *bev;
+ struct event_iocp_port *iocp;
+
+ options |= BEV_OPT_THREADSAFE;
+
+ if (!(iocp = event_base_get_iocp_(base)))
+ return NULL;
+
+ if (fd >= 0 && event_iocp_port_associate_(iocp, fd, 1)<0) {
+ int err = GetLastError();
+ /* We may have alrady associated this fd with a port.
+ * Let's hope it's this port, and that the error code
+ * for doing this neer changes. */
+ if (err != ERROR_INVALID_PARAMETER)
+ return NULL;
+ }
+
+ if (!(bev_a = mm_calloc(1, sizeof(struct bufferevent_async))))
+ return NULL;
+
+ bev = &bev_a->bev.bev;
+ if (!(bev->input = evbuffer_overlapped_new_(fd))) {
+ mm_free(bev_a);
+ return NULL;
+ }
+ if (!(bev->output = evbuffer_overlapped_new_(fd))) {
+ evbuffer_free(bev->input);
+ mm_free(bev_a);
+ return NULL;
+ }
+
+ if (bufferevent_init_common_(&bev_a->bev, base, &bufferevent_ops_async,
+ options)<0)
+ goto err;
+
+ evbuffer_add_cb(bev->input, be_async_inbuf_callback, bev);
+ evbuffer_add_cb(bev->output, be_async_outbuf_callback, bev);
+
+ event_overlapped_init_(&bev_a->connect_overlapped, connect_complete);
+ event_overlapped_init_(&bev_a->read_overlapped, read_complete);
+ event_overlapped_init_(&bev_a->write_overlapped, write_complete);
+
+ bufferevent_init_generic_timeout_cbs_(bev);
+
+ bev_a->ok = fd >= 0;
+
+ return bev;
+err:
+ bufferevent_free(&bev_a->bev.bev);
+ return NULL;
+}
+
+void
+bufferevent_async_set_connected_(struct bufferevent *bev)
+{
+ struct bufferevent_async *bev_async = upcast(bev);
+ bev_async->ok = 1;
+ bufferevent_init_generic_timeout_cbs_(bev);
+ /* Now's a good time to consider reading/writing */
+ be_async_enable(bev, bev->enabled);
+}
+
+int
+bufferevent_async_can_connect_(struct bufferevent *bev)
+{
+ const struct win32_extension_fns *ext =
+ event_get_win32_extension_fns_();
+
+ if (BEV_IS_ASYNC(bev) &&
+ event_base_get_iocp_(bev->ev_base) &&
+ ext && ext->ConnectEx)
+ return 1;
+
+ return 0;
+}
+
+int
+bufferevent_async_connect_(struct bufferevent *bev, evutil_socket_t fd,
+ const struct sockaddr *sa, int socklen)
+{
+ BOOL rc;
+ struct bufferevent_async *bev_async = upcast(bev);
+ struct sockaddr_storage ss;
+ const struct win32_extension_fns *ext =
+ event_get_win32_extension_fns_();
+
+ EVUTIL_ASSERT(ext && ext->ConnectEx && fd >= 0 && sa != NULL);
+
+ /* ConnectEx() requires that the socket be bound to an address
+ * with bind() before using, otherwise it will fail. We attempt
+ * to issue a bind() here, taking into account that the error
+ * code is set to WSAEINVAL when the socket is already bound. */
+ memset(&ss, 0, sizeof(ss));
+ if (sa->sa_family == AF_INET) {
+ struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
+ sin->sin_family = AF_INET;
+ sin->sin_addr.s_addr = INADDR_ANY;
+ } else if (sa->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_addr = in6addr_any;
+ } else {
+ /* Well, the user will have to bind() */
+ return -1;
+ }
+ if (bind(fd, (struct sockaddr *)&ss, sizeof(ss)) < 0 &&
+ WSAGetLastError() != WSAEINVAL)
+ return -1;
+
+ event_base_add_virtual_(bev->ev_base);
+ bufferevent_incref_(bev);
+ rc = ext->ConnectEx(fd, sa, socklen, NULL, 0, NULL,
+ &bev_async->connect_overlapped.overlapped);
+ if (rc || WSAGetLastError() == ERROR_IO_PENDING)
+ return 0;
+
+ event_base_del_virtual_(bev->ev_base);
+ bufferevent_decref_(bev);
+
+ return -1;
+}
+
+static int
+be_async_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op,
+ union bufferevent_ctrl_data *data)
+{
+ switch (op) {
+ case BEV_CTRL_GET_FD:
+ data->fd = evbuffer_overlapped_get_fd_(bev->input);
+ return 0;
+ case BEV_CTRL_SET_FD: {
+ struct event_iocp_port *iocp;
+
+ if (data->fd == evbuffer_overlapped_get_fd_(bev->input))
+ return 0;
+ if (!(iocp = event_base_get_iocp_(bev->ev_base)))
+ return -1;
+ if (event_iocp_port_associate_(iocp, data->fd, 1) < 0)
+ return -1;
+ evbuffer_overlapped_set_fd_(bev->input, data->fd);
+ evbuffer_overlapped_set_fd_(bev->output, data->fd);
+ return 0;
+ }
+ case BEV_CTRL_CANCEL_ALL: {
+ struct bufferevent_async *bev_a = upcast(bev);
+ evutil_socket_t fd = evbuffer_overlapped_get_fd_(bev->input);
+ if (fd != (evutil_socket_t)INVALID_SOCKET &&
+ (bev_a->bev.options & BEV_OPT_CLOSE_ON_FREE)) {
+ closesocket(fd);
+ evbuffer_overlapped_set_fd_(bev->input, INVALID_SOCKET);
+ }
+ bev_a->ok = 0;
+ return 0;
+ }
+ case BEV_CTRL_GET_UNDERLYING:
+ default:
+ return -1;
+ }
+}
+
+
diff --git a/libs/libevent/src/bufferevent_filter.c b/libs/libevent/src/bufferevent_filter.c
new file mode 100644
index 0000000000..6c3ffc4f2d
--- /dev/null
+++ b/libs/libevent/src/bufferevent_filter.c
@@ -0,0 +1,555 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ * Copyright (c) 2002-2006 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "evconfig-private.h"
+
+#include <sys/types.h>
+
+#include "event2/event-config.h"
+
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef EVENT__HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+
+#ifdef _WIN32
+#include <winsock2.h>
+#endif
+
+#include "event2/util.h"
+#include "event2/bufferevent.h"
+#include "event2/buffer.h"
+#include "event2/bufferevent_struct.h"
+#include "event2/event.h"
+#include "log-internal.h"
+#include "mm-internal.h"
+#include "bufferevent-internal.h"
+#include "util-internal.h"
+
+/* prototypes */
+static int be_filter_enable(struct bufferevent *, short);
+static int be_filter_disable(struct bufferevent *, short);
+static void be_filter_unlink(struct bufferevent *);
+static void be_filter_destruct(struct bufferevent *);
+
+static void be_filter_readcb(struct bufferevent *, void *);
+static void be_filter_writecb(struct bufferevent *, void *);
+static void be_filter_eventcb(struct bufferevent *, short, void *);
+static int be_filter_flush(struct bufferevent *bufev,
+ short iotype, enum bufferevent_flush_mode mode);
+static int be_filter_ctrl(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
+
+static void bufferevent_filtered_outbuf_cb(struct evbuffer *buf,
+ const struct evbuffer_cb_info *info, void *arg);
+
+struct bufferevent_filtered {
+ struct bufferevent_private bev;
+
+ /** The bufferevent that we read/write filtered data from/to. */
+ struct bufferevent *underlying;
+ /** A callback on our outbuf to notice when somebody adds data */
+ struct evbuffer_cb_entry *outbuf_cb;
+ /** True iff we have received an EOF callback from the underlying
+ * bufferevent. */
+ unsigned got_eof;
+
+ /** Function to free context when we're done. */
+ void (*free_context)(void *);
+ /** Input filter */
+ bufferevent_filter_cb process_in;
+ /** Output filter */
+ bufferevent_filter_cb process_out;
+ /** User-supplied argument to the filters. */
+ void *context;
+};
+
+const struct bufferevent_ops bufferevent_ops_filter = {
+ "filter",
+ evutil_offsetof(struct bufferevent_filtered, bev.bev),
+ be_filter_enable,
+ be_filter_disable,
+ be_filter_unlink,
+ be_filter_destruct,
+ bufferevent_generic_adj_timeouts_,
+ be_filter_flush,
+ be_filter_ctrl,
+};
+
+/* Given a bufferevent that's really the bev filter of a bufferevent_filtered,
+ * return that bufferevent_filtered. Returns NULL otherwise.*/
+static inline struct bufferevent_filtered *
+upcast(struct bufferevent *bev)
+{
+ struct bufferevent_filtered *bev_f;
+ if (bev->be_ops != &bufferevent_ops_filter)
+ return NULL;
+ bev_f = (void*)( ((char*)bev) -
+ evutil_offsetof(struct bufferevent_filtered, bev.bev));
+ EVUTIL_ASSERT(bev_f->bev.bev.be_ops == &bufferevent_ops_filter);
+ return bev_f;
+}
+
+#define downcast(bev_f) (&(bev_f)->bev.bev)
+
+/** Return 1 iff bevf's underlying bufferevent's output buffer is at or
+ * over its high watermark such that we should not write to it in a given
+ * flush mode. */
+static int
+be_underlying_writebuf_full(struct bufferevent_filtered *bevf,
+ enum bufferevent_flush_mode state)
+{
+ struct bufferevent *u = bevf->underlying;
+ return state == BEV_NORMAL &&
+ u->wm_write.high &&
+ evbuffer_get_length(u->output) >= u->wm_write.high;
+}
+
+/** Return 1 if our input buffer is at or over its high watermark such that we
+ * should not write to it in a given flush mode. */
+static int
+be_readbuf_full(struct bufferevent_filtered *bevf,
+ enum bufferevent_flush_mode state)
+{
+ struct bufferevent *bufev = downcast(bevf);
+ return state == BEV_NORMAL &&
+ bufev->wm_read.high &&
+ evbuffer_get_length(bufev->input) >= bufev->wm_read.high;
+}
+
+
+/* Filter to use when we're created with a NULL filter. */
+static enum bufferevent_filter_result
+be_null_filter(struct evbuffer *src, struct evbuffer *dst, ev_ssize_t lim,
+ enum bufferevent_flush_mode state, void *ctx)
+{
+ (void)state;
+ if (evbuffer_remove_buffer(src, dst, lim) == 0)
+ return BEV_OK;
+ else
+ return BEV_ERROR;
+}
+
+struct bufferevent *
+bufferevent_filter_new(struct bufferevent *underlying,
+ bufferevent_filter_cb input_filter,
+ bufferevent_filter_cb output_filter,
+ int options,
+ void (*free_context)(void *),
+ void *ctx)
+{
+ struct bufferevent_filtered *bufev_f;
+ int tmp_options = options & ~BEV_OPT_THREADSAFE;
+
+ if (!underlying)
+ return NULL;
+
+ if (!input_filter)
+ input_filter = be_null_filter;
+ if (!output_filter)
+ output_filter = be_null_filter;
+
+ bufev_f = mm_calloc(1, sizeof(struct bufferevent_filtered));
+ if (!bufev_f)
+ return NULL;
+
+ if (bufferevent_init_common_(&bufev_f->bev, underlying->ev_base,
+ &bufferevent_ops_filter, tmp_options) < 0) {
+ mm_free(bufev_f);
+ return NULL;
+ }
+ if (options & BEV_OPT_THREADSAFE) {
+ bufferevent_enable_locking_(downcast(bufev_f), NULL);
+ }
+
+ bufev_f->underlying = underlying;
+
+ bufev_f->process_in = input_filter;
+ bufev_f->process_out = output_filter;
+ bufev_f->free_context = free_context;
+ bufev_f->context = ctx;
+
+ bufferevent_setcb(bufev_f->underlying,
+ be_filter_readcb, be_filter_writecb, be_filter_eventcb, bufev_f);
+
+ bufev_f->outbuf_cb = evbuffer_add_cb(downcast(bufev_f)->output,
+ bufferevent_filtered_outbuf_cb, bufev_f);
+
+ bufferevent_init_generic_timeout_cbs_(downcast(bufev_f));
+ bufferevent_incref_(underlying);
+
+ bufferevent_enable(underlying, EV_READ|EV_WRITE);
+ bufferevent_suspend_read_(underlying, BEV_SUSPEND_FILT_READ);
+
+ return downcast(bufev_f);
+}
+
+static void
+be_filter_unlink(struct bufferevent *bev)
+{
+ struct bufferevent_filtered *bevf = upcast(bev);
+ EVUTIL_ASSERT(bevf);
+
+ if (bevf->bev.options & BEV_OPT_CLOSE_ON_FREE) {
+ /* Yes, there is also a decref in bufferevent_decref_.
+ * That decref corresponds to the incref when we set
+ * underlying for the first time. This decref is an
+ * extra one to remove the last reference.
+ */
+ if (BEV_UPCAST(bevf->underlying)->refcnt < 2) {
+ event_warnx("BEV_OPT_CLOSE_ON_FREE set on an "
+ "bufferevent with too few references");
+ } else {
+ bufferevent_free(bevf->underlying);
+ }
+ } else {
+ if (bevf->underlying) {
+ if (bevf->underlying->errorcb == be_filter_eventcb)
+ bufferevent_setcb(bevf->underlying,
+ NULL, NULL, NULL, NULL);
+ bufferevent_unsuspend_read_(bevf->underlying,
+ BEV_SUSPEND_FILT_READ);
+ }
+ }
+}
+
+static void
+be_filter_destruct(struct bufferevent *bev)
+{
+ struct bufferevent_filtered *bevf = upcast(bev);
+ EVUTIL_ASSERT(bevf);
+ if (bevf->free_context)
+ bevf->free_context(bevf->context);
+}
+
+static int
+be_filter_enable(struct bufferevent *bev, short event)
+{
+ struct bufferevent_filtered *bevf = upcast(bev);
+ if (event & EV_WRITE)
+ BEV_RESET_GENERIC_WRITE_TIMEOUT(bev);
+
+ if (event & EV_READ) {
+ BEV_RESET_GENERIC_READ_TIMEOUT(bev);
+ bufferevent_unsuspend_read_(bevf->underlying,
+ BEV_SUSPEND_FILT_READ);
+ }
+ return 0;
+}
+
+static int
+be_filter_disable(struct bufferevent *bev, short event)
+{
+ struct bufferevent_filtered *bevf = upcast(bev);
+ if (event & EV_WRITE)
+ BEV_DEL_GENERIC_WRITE_TIMEOUT(bev);
+ if (event & EV_READ) {
+ BEV_DEL_GENERIC_READ_TIMEOUT(bev);
+ bufferevent_suspend_read_(bevf->underlying,
+ BEV_SUSPEND_FILT_READ);
+ }
+ return 0;
+}
+
+static enum bufferevent_filter_result
+be_filter_process_input(struct bufferevent_filtered *bevf,
+ enum bufferevent_flush_mode state,
+ int *processed_out)
+{
+ enum bufferevent_filter_result res;
+ struct bufferevent *bev = downcast(bevf);
+
+ if (state == BEV_NORMAL) {
+ /* If we're in 'normal' mode, don't urge data on the filter
+ * unless we're reading data and under our high-water mark.*/
+ if (!(bev->enabled & EV_READ) ||
+ be_readbuf_full(bevf, state))
+ return BEV_OK;
+ }
+
+ do {
+ ev_ssize_t limit = -1;
+ if (state == BEV_NORMAL && bev->wm_read.high)
+ limit = bev->wm_read.high -
+ evbuffer_get_length(bev->input);
+
+ res = bevf->process_in(bevf->underlying->input,
+ bev->input, limit, state, bevf->context);
+
+ if (res == BEV_OK)
+ *processed_out = 1;
+ } while (res == BEV_OK &&
+ (bev->enabled & EV_READ) &&
+ evbuffer_get_length(bevf->underlying->input) &&
+ !be_readbuf_full(bevf, state));
+
+ if (*processed_out)
+ BEV_RESET_GENERIC_READ_TIMEOUT(bev);
+
+ return res;
+}
+
+
+static enum bufferevent_filter_result
+be_filter_process_output(struct bufferevent_filtered *bevf,
+ enum bufferevent_flush_mode state,
+ int *processed_out)
+{
+ /* Requires references and lock: might call writecb */
+ enum bufferevent_filter_result res = BEV_OK;
+ struct bufferevent *bufev = downcast(bevf);
+ int again = 0;
+
+ if (state == BEV_NORMAL) {
+ /* If we're in 'normal' mode, don't urge data on the
+ * filter unless we're writing data, and the underlying
+ * bufferevent is accepting data, and we have data to
+ * give the filter. If we're in 'flush' or 'finish',
+ * call the filter no matter what. */
+ if (!(bufev->enabled & EV_WRITE) ||
+ be_underlying_writebuf_full(bevf, state) ||
+ !evbuffer_get_length(bufev->output))
+ return BEV_OK;
+ }
+
+ /* disable the callback that calls this function
+ when the user adds to the output buffer. */
+ evbuffer_cb_set_flags(bufev->output, bevf->outbuf_cb, 0);
+
+ do {
+ int processed = 0;
+ again = 0;
+
+ do {
+ ev_ssize_t limit = -1;
+ if (state == BEV_NORMAL &&
+ bevf->underlying->wm_write.high)
+ limit = bevf->underlying->wm_write.high -
+ evbuffer_get_length(bevf->underlying->output);
+
+ res = bevf->process_out(downcast(bevf)->output,
+ bevf->underlying->output,
+ limit,
+ state,
+ bevf->context);
+
+ if (res == BEV_OK)
+ processed = *processed_out = 1;
+ } while (/* Stop if the filter wasn't successful...*/
+ res == BEV_OK &&
+ /* Or if we aren't writing any more. */
+ (bufev->enabled & EV_WRITE) &&
+ /* Of if we have nothing more to write and we are
+ * not flushing. */
+ evbuffer_get_length(bufev->output) &&
+ /* Or if we have filled the underlying output buffer. */
+ !be_underlying_writebuf_full(bevf,state));
+
+ if (processed) {
+ /* call the write callback.*/
+ bufferevent_trigger_nolock_(bufev, EV_WRITE, 0);
+
+ if (res == BEV_OK &&
+ (bufev->enabled & EV_WRITE) &&
+ evbuffer_get_length(bufev->output) &&
+ !be_underlying_writebuf_full(bevf, state)) {
+ again = 1;
+ }
+ }
+ } while (again);
+
+ /* reenable the outbuf_cb */
+ evbuffer_cb_set_flags(bufev->output,bevf->outbuf_cb,
+ EVBUFFER_CB_ENABLED);
+
+ if (*processed_out)
+ BEV_RESET_GENERIC_WRITE_TIMEOUT(bufev);
+
+ return res;
+}
+
+/* Called when the size of our outbuf changes. */
+static void
+bufferevent_filtered_outbuf_cb(struct evbuffer *buf,
+ const struct evbuffer_cb_info *cbinfo, void *arg)
+{
+ struct bufferevent_filtered *bevf = arg;
+ struct bufferevent *bev = downcast(bevf);
+
+ if (cbinfo->n_added) {
+ int processed_any = 0;
+ /* Somebody added more data to the output buffer. Try to
+ * process it, if we should. */
+ bufferevent_incref_and_lock_(bev);
+ be_filter_process_output(bevf, BEV_NORMAL, &processed_any);
+ bufferevent_decref_and_unlock_(bev);
+ }
+}
+
+/* Called when the underlying socket has read. */
+static void
+be_filter_readcb(struct bufferevent *underlying, void *me_)
+{
+ struct bufferevent_filtered *bevf = me_;
+ enum bufferevent_filter_result res;
+ enum bufferevent_flush_mode state;
+ struct bufferevent *bufev = downcast(bevf);
+ struct bufferevent_private *bufev_private = BEV_UPCAST(bufev);
+ int processed_any = 0;
+
+ BEV_LOCK(bufev);
+
+ // It's possible our refcount is 0 at this point if another thread free'd our filterevent
+ EVUTIL_ASSERT(bufev_private->refcnt >= 0);
+
+ // If our refcount is > 0
+ if (bufev_private->refcnt > 0) {
+
+ if (bevf->got_eof)
+ state = BEV_FINISHED;
+ else
+ state = BEV_NORMAL;
+
+ /* XXXX use return value */
+ res = be_filter_process_input(bevf, state, &processed_any);
+ (void)res;
+
+ /* XXX This should be in process_input, not here. There are
+ * other places that can call process-input, and they should
+ * force readcb calls as needed. */
+ if (processed_any)
+ bufferevent_trigger_nolock_(bufev, EV_READ, 0);
+ }
+
+ BEV_UNLOCK(bufev);
+}
+
+/* Called when the underlying socket has drained enough that we can write to
+ it. */
+static void
+be_filter_writecb(struct bufferevent *underlying, void *me_)
+{
+ struct bufferevent_filtered *bevf = me_;
+ struct bufferevent *bev = downcast(bevf);
+ struct bufferevent_private *bufev_private = BEV_UPCAST(bev);
+ int processed_any = 0;
+
+ BEV_LOCK(bev);
+
+ // It's possible our refcount is 0 at this point if another thread free'd our filterevent
+ EVUTIL_ASSERT(bufev_private->refcnt >= 0);
+
+ // If our refcount is > 0
+ if (bufev_private->refcnt > 0) {
+ be_filter_process_output(bevf, BEV_NORMAL, &processed_any);
+ }
+
+ BEV_UNLOCK(bev);
+}
+
+/* Called when the underlying socket has given us an error */
+static void
+be_filter_eventcb(struct bufferevent *underlying, short what, void *me_)
+{
+ struct bufferevent_filtered *bevf = me_;
+ struct bufferevent *bev = downcast(bevf);
+ struct bufferevent_private *bufev_private = BEV_UPCAST(bev);
+
+ BEV_LOCK(bev);
+
+ // It's possible our refcount is 0 at this point if another thread free'd our filterevent
+ EVUTIL_ASSERT(bufev_private->refcnt >= 0);
+
+ // If our refcount is > 0
+ if (bufev_private->refcnt > 0) {
+
+ /* All we can really to is tell our own eventcb. */
+ bufferevent_run_eventcb_(bev, what, 0);
+ }
+
+ BEV_UNLOCK(bev);
+}
+
+static int
+be_filter_flush(struct bufferevent *bufev,
+ short iotype, enum bufferevent_flush_mode mode)
+{
+ struct bufferevent_filtered *bevf = upcast(bufev);
+ int processed_any = 0;
+ EVUTIL_ASSERT(bevf);
+
+ bufferevent_incref_and_lock_(bufev);
+
+ if (iotype & EV_READ) {
+ be_filter_process_input(bevf, mode, &processed_any);
+ }
+ if (iotype & EV_WRITE) {
+ be_filter_process_output(bevf, mode, &processed_any);
+ }
+ /* XXX check the return value? */
+ /* XXX does this want to recursively call lower-level flushes? */
+ bufferevent_flush(bevf->underlying, iotype, mode);
+
+ bufferevent_decref_and_unlock_(bufev);
+
+ return processed_any;
+}
+
+static int
+be_filter_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op,
+ union bufferevent_ctrl_data *data)
+{
+ struct bufferevent_filtered *bevf;
+ switch (op) {
+ case BEV_CTRL_GET_UNDERLYING:
+ bevf = upcast(bev);
+ data->ptr = bevf->underlying;
+ return 0;
+ case BEV_CTRL_SET_FD:
+ bevf = upcast(bev);
+
+ if (bevf->underlying &&
+ bevf->underlying->be_ops &&
+ bevf->underlying->be_ops->ctrl) {
+ return (bevf->underlying->be_ops->ctrl)(bevf->underlying, op, data);
+ }
+
+ case BEV_CTRL_GET_FD:
+ case BEV_CTRL_CANCEL_ALL:
+ default:
+ return -1;
+ }
+
+ return -1;
+}
diff --git a/libs/libevent/src/bufferevent_openssl.c b/libs/libevent/src/bufferevent_openssl.c
new file mode 100644
index 0000000000..37478b6a83
--- /dev/null
+++ b/libs/libevent/src/bufferevent_openssl.c
@@ -0,0 +1,1484 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Get rid of OSX 10.7 and greater deprecation warnings.
+#if defined(__APPLE__) && defined(__clang__)
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+#endif
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <sys/types.h>
+
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef EVENT__HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#ifdef _WIN32
+#include <winsock2.h>
+#endif
+
+#include "event2/bufferevent.h"
+#include "event2/bufferevent_struct.h"
+#include "event2/bufferevent_ssl.h"
+#include "event2/buffer.h"
+#include "event2/event.h"
+
+#include "mm-internal.h"
+#include "bufferevent-internal.h"
+#include "log-internal.h"
+
+#include <openssl/bio.h>
+#include <openssl/ssl.h>
+#include <openssl/err.h>
+
+/*
+ * Define an OpenSSL bio that targets a bufferevent.
+ */
+
+/* --------------------
+ A BIO is an OpenSSL abstraction that handles reading and writing data. The
+ library will happily speak SSL over anything that implements a BIO
+ interface.
+
+ Here we define a BIO implementation that directs its output to a
+ bufferevent. We'll want to use this only when none of OpenSSL's built-in
+ IO mechanisms work for us.
+ -------------------- */
+
+/* every BIO type needs its own integer type value. */
+#define BIO_TYPE_LIBEVENT 57
+/* ???? Arguably, we should set BIO_TYPE_FILTER or BIO_TYPE_SOURCE_SINK on
+ * this. */
+
+#if 0
+static void
+print_err(int val)
+{
+ int err;
+ printf("Error was %d\n", val);
+
+ while ((err = ERR_get_error())) {
+ const char *msg = (const char*)ERR_reason_error_string(err);
+ const char *lib = (const char*)ERR_lib_error_string(err);
+ const char *func = (const char*)ERR_func_error_string(err);
+
+ printf("%s in %s %s\n", msg, lib, func);
+ }
+}
+#else
+#define print_err(v) ((void)0)
+#endif
+
+/* Called to initialize a new BIO */
+static int
+bio_bufferevent_new(BIO *b)
+{
+ b->init = 0;
+ b->num = -1;
+ b->ptr = NULL; /* We'll be putting the bufferevent in this field.*/
+ b->flags = 0;
+ return 1;
+}
+
+/* Called to uninitialize the BIO. */
+static int
+bio_bufferevent_free(BIO *b)
+{
+ if (!b)
+ return 0;
+ if (b->shutdown) {
+ if (b->init && b->ptr)
+ bufferevent_free(b->ptr);
+ b->init = 0;
+ b->flags = 0;
+ b->ptr = NULL;
+ }
+ return 1;
+}
+
+/* Called to extract data from the BIO. */
+static int
+bio_bufferevent_read(BIO *b, char *out, int outlen)
+{
+ int r = 0;
+ struct evbuffer *input;
+
+ BIO_clear_retry_flags(b);
+
+ if (!out)
+ return 0;
+ if (!b->ptr)
+ return -1;
+
+ input = bufferevent_get_input(b->ptr);
+ if (evbuffer_get_length(input) == 0) {
+ /* If there's no data to read, say so. */
+ BIO_set_retry_read(b);
+ return -1;
+ } else {
+ r = evbuffer_remove(input, out, outlen);
+ }
+
+ return r;
+}
+
+/* Called to write data info the BIO */
+static int
+bio_bufferevent_write(BIO *b, const char *in, int inlen)
+{
+ struct bufferevent *bufev = b->ptr;
+ struct evbuffer *output;
+ size_t outlen;
+
+ BIO_clear_retry_flags(b);
+
+ if (!b->ptr)
+ return -1;
+
+ output = bufferevent_get_output(bufev);
+ outlen = evbuffer_get_length(output);
+
+ /* Copy only as much data onto the output buffer as can fit under the
+ * high-water mark. */
+ if (bufev->wm_write.high && bufev->wm_write.high <= (outlen+inlen)) {
+ if (bufev->wm_write.high <= outlen) {
+ /* If no data can fit, we'll need to retry later. */
+ BIO_set_retry_write(b);
+ return -1;
+ }
+ inlen = bufev->wm_write.high - outlen;
+ }
+
+ EVUTIL_ASSERT(inlen > 0);
+ evbuffer_add(output, in, inlen);
+ return inlen;
+}
+
+/* Called to handle various requests */
+static long
+bio_bufferevent_ctrl(BIO *b, int cmd, long num, void *ptr)
+{
+ struct bufferevent *bufev = b->ptr;
+ long ret = 1;
+
+ switch (cmd) {
+ case BIO_CTRL_GET_CLOSE:
+ ret = b->shutdown;
+ break;
+ case BIO_CTRL_SET_CLOSE:
+ b->shutdown = (int)num;
+ break;
+ case BIO_CTRL_PENDING:
+ ret = evbuffer_get_length(bufferevent_get_input(bufev)) != 0;
+ break;
+ case BIO_CTRL_WPENDING:
+ ret = evbuffer_get_length(bufferevent_get_output(bufev)) != 0;
+ break;
+ /* XXXX These two are given a special-case treatment because
+ * of cargo-cultism. I should come up with a better reason. */
+ case BIO_CTRL_DUP:
+ case BIO_CTRL_FLUSH:
+ ret = 1;
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+
+/* Called to write a string to the BIO */
+static int
+bio_bufferevent_puts(BIO *b, const char *s)
+{
+ return bio_bufferevent_write(b, s, strlen(s));
+}
+
+/* Method table for the bufferevent BIO */
+static BIO_METHOD methods_bufferevent = {
+ BIO_TYPE_LIBEVENT, "bufferevent",
+ bio_bufferevent_write,
+ bio_bufferevent_read,
+ bio_bufferevent_puts,
+ NULL /* bio_bufferevent_gets */,
+ bio_bufferevent_ctrl,
+ bio_bufferevent_new,
+ bio_bufferevent_free,
+ NULL /* callback_ctrl */,
+};
+
+/* Return the method table for the bufferevents BIO */
+static BIO_METHOD *
+BIO_s_bufferevent(void)
+{
+ return &methods_bufferevent;
+}
+
+/* Create a new BIO to wrap communication around a bufferevent. If close_flag
+ * is true, the bufferevent will be freed when the BIO is closed. */
+static BIO *
+BIO_new_bufferevent(struct bufferevent *bufferevent, int close_flag)
+{
+ BIO *result;
+ if (!bufferevent)
+ return NULL;
+ if (!(result = BIO_new(BIO_s_bufferevent())))
+ return NULL;
+ result->init = 1;
+ result->ptr = bufferevent;
+ result->shutdown = close_flag ? 1 : 0;
+ return result;
+}
+
+/* --------------------
+ Now, here's the OpenSSL-based implementation of bufferevent.
+
+ The implementation comes in two flavors: one that connects its SSL object
+ to an underlying bufferevent using a BIO_bufferevent, and one that has the
+ SSL object connect to a socket directly. The latter should generally be
+ faster, except on Windows, where your best bet is using a
+ bufferevent_async.
+
+ (OpenSSL supports many other BIO types, too. But we can't use any unless
+ we have a good way to get notified when they become readable/writable.)
+ -------------------- */
+
+struct bio_data_counts {
+ unsigned long n_written;
+ unsigned long n_read;
+};
+
+struct bufferevent_openssl {
+ /* Shared fields with common bufferevent implementation code.
+ If we were set up with an underlying bufferevent, we use the
+ events here as timers only. If we have an SSL, then we use
+ the events as socket events.
+ */
+ struct bufferevent_private bev;
+ /* An underlying bufferevent that we're directing our output to.
+ If it's NULL, then we're connected to an fd, not an evbuffer. */
+ struct bufferevent *underlying;
+ /* The SSL object doing our encryption. */
+ SSL *ssl;
+
+ /* A callback that's invoked when data arrives on our outbuf so we
+ know to write data to the SSL. */
+ struct evbuffer_cb_entry *outbuf_cb;
+
+ /* A count of how much data the bios have read/written total. Used
+ for rate-limiting. */
+ struct bio_data_counts counts;
+
+ /* If this value is greater than 0, then the last SSL_write blocked,
+ * and we need to try it again with this many bytes. */
+ ev_ssize_t last_write;
+
+#define NUM_ERRORS 3
+ ev_uint32_t errors[NUM_ERRORS];
+
+ /* When we next get available space, we should say "read" instead of
+ "write". This can happen if there's a renegotiation during a read
+ operation. */
+ unsigned read_blocked_on_write : 1;
+ /* When we next get data, we should say "write" instead of "read". */
+ unsigned write_blocked_on_read : 1;
+ /* Treat TCP close before SSL close on SSL >= v3 as clean EOF. */
+ unsigned allow_dirty_shutdown : 1;
+ /* XXX */
+ unsigned n_errors : 2;
+
+ /* Are we currently connecting, accepting, or doing IO? */
+ unsigned state : 2;
+};
+
+static int be_openssl_enable(struct bufferevent *, short);
+static int be_openssl_disable(struct bufferevent *, short);
+static void be_openssl_unlink(struct bufferevent *);
+static void be_openssl_destruct(struct bufferevent *);
+static int be_openssl_adj_timeouts(struct bufferevent *);
+static int be_openssl_flush(struct bufferevent *bufev,
+ short iotype, enum bufferevent_flush_mode mode);
+static int be_openssl_ctrl(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
+
+const struct bufferevent_ops bufferevent_ops_openssl = {
+ "ssl",
+ evutil_offsetof(struct bufferevent_openssl, bev.bev),
+ be_openssl_enable,
+ be_openssl_disable,
+ be_openssl_unlink,
+ be_openssl_destruct,
+ be_openssl_adj_timeouts,
+ be_openssl_flush,
+ be_openssl_ctrl,
+};
+
+/* Given a bufferevent, return a pointer to the bufferevent_openssl that
+ * contains it, if any. */
+static inline struct bufferevent_openssl *
+upcast(struct bufferevent *bev)
+{
+ struct bufferevent_openssl *bev_o;
+ if (bev->be_ops != &bufferevent_ops_openssl)
+ return NULL;
+ bev_o = (void*)( ((char*)bev) -
+ evutil_offsetof(struct bufferevent_openssl, bev.bev));
+ EVUTIL_ASSERT(bev_o->bev.bev.be_ops == &bufferevent_ops_openssl);
+ return bev_o;
+}
+
+static inline void
+put_error(struct bufferevent_openssl *bev_ssl, unsigned long err)
+{
+ if (bev_ssl->n_errors == NUM_ERRORS)
+ return;
+ /* The error type according to openssl is "unsigned long", but
+ openssl never uses more than 32 bits of it. It _can't_ use more
+ than 32 bits of it, since it needs to report errors on systems
+ where long is only 32 bits.
+ */
+ bev_ssl->errors[bev_ssl->n_errors++] = (ev_uint32_t) err;
+}
+
+/* Have the base communications channel (either the underlying bufferevent or
+ * ev_read and ev_write) start reading. Take the read-blocked-on-write flag
+ * into account. */
+static int
+start_reading(struct bufferevent_openssl *bev_ssl)
+{
+ if (bev_ssl->underlying) {
+ bufferevent_unsuspend_read_(bev_ssl->underlying,
+ BEV_SUSPEND_FILT_READ);
+ return 0;
+ } else {
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+ int r;
+ r = bufferevent_add_event_(&bev->ev_read, &bev->timeout_read);
+ if (r == 0 && bev_ssl->read_blocked_on_write)
+ r = bufferevent_add_event_(&bev->ev_write,
+ &bev->timeout_write);
+ return r;
+ }
+}
+
+/* Have the base communications channel (either the underlying bufferevent or
+ * ev_read and ev_write) start writing. Take the write-blocked-on-read flag
+ * into account. */
+static int
+start_writing(struct bufferevent_openssl *bev_ssl)
+{
+ int r = 0;
+ if (bev_ssl->underlying) {
+ ;
+ } else {
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+ r = bufferevent_add_event_(&bev->ev_write, &bev->timeout_write);
+ if (!r && bev_ssl->write_blocked_on_read)
+ r = bufferevent_add_event_(&bev->ev_read,
+ &bev->timeout_read);
+ }
+ return r;
+}
+
+static void
+stop_reading(struct bufferevent_openssl *bev_ssl)
+{
+ if (bev_ssl->write_blocked_on_read)
+ return;
+ if (bev_ssl->underlying) {
+ bufferevent_suspend_read_(bev_ssl->underlying,
+ BEV_SUSPEND_FILT_READ);
+ } else {
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+ event_del(&bev->ev_read);
+ }
+}
+
+static void
+stop_writing(struct bufferevent_openssl *bev_ssl)
+{
+ if (bev_ssl->read_blocked_on_write)
+ return;
+ if (bev_ssl->underlying) {
+ ;
+ } else {
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+ event_del(&bev->ev_write);
+ }
+}
+
+static int
+set_rbow(struct bufferevent_openssl *bev_ssl)
+{
+ if (!bev_ssl->underlying)
+ stop_reading(bev_ssl);
+ bev_ssl->read_blocked_on_write = 1;
+ return start_writing(bev_ssl);
+}
+
+static int
+set_wbor(struct bufferevent_openssl *bev_ssl)
+{
+ if (!bev_ssl->underlying)
+ stop_writing(bev_ssl);
+ bev_ssl->write_blocked_on_read = 1;
+ return start_reading(bev_ssl);
+}
+
+static int
+clear_rbow(struct bufferevent_openssl *bev_ssl)
+{
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+ int r = 0;
+ bev_ssl->read_blocked_on_write = 0;
+ if (!(bev->enabled & EV_WRITE))
+ stop_writing(bev_ssl);
+ if (bev->enabled & EV_READ)
+ r = start_reading(bev_ssl);
+ return r;
+}
+
+
+static int
+clear_wbor(struct bufferevent_openssl *bev_ssl)
+{
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+ int r = 0;
+ bev_ssl->write_blocked_on_read = 0;
+ if (!(bev->enabled & EV_READ))
+ stop_reading(bev_ssl);
+ if (bev->enabled & EV_WRITE)
+ r = start_writing(bev_ssl);
+ return r;
+}
+
+static void
+conn_closed(struct bufferevent_openssl *bev_ssl, int when, int errcode, int ret)
+{
+ int event = BEV_EVENT_ERROR;
+ int dirty_shutdown = 0;
+ unsigned long err;
+
+ switch (errcode) {
+ case SSL_ERROR_ZERO_RETURN:
+ /* Possibly a clean shutdown. */
+ if (SSL_get_shutdown(bev_ssl->ssl) & SSL_RECEIVED_SHUTDOWN)
+ event = BEV_EVENT_EOF;
+ else
+ dirty_shutdown = 1;
+ break;
+ case SSL_ERROR_SYSCALL:
+ /* IO error; possibly a dirty shutdown. */
+ if (ret == 0 && ERR_peek_error() == 0)
+ dirty_shutdown = 1;
+ break;
+ case SSL_ERROR_SSL:
+ /* Protocol error. */
+ break;
+ case SSL_ERROR_WANT_X509_LOOKUP:
+ /* XXXX handle this. */
+ break;
+ case SSL_ERROR_NONE:
+ case SSL_ERROR_WANT_READ:
+ case SSL_ERROR_WANT_WRITE:
+ case SSL_ERROR_WANT_CONNECT:
+ case SSL_ERROR_WANT_ACCEPT:
+ default:
+ /* should be impossible; treat as normal error. */
+ event_warnx("BUG: Unexpected OpenSSL error code %d", errcode);
+ break;
+ }
+
+ while ((err = ERR_get_error())) {
+ put_error(bev_ssl, err);
+ }
+
+ if (dirty_shutdown && bev_ssl->allow_dirty_shutdown)
+ event = BEV_EVENT_EOF;
+
+ stop_reading(bev_ssl);
+ stop_writing(bev_ssl);
+
+ /* when is BEV_EVENT_{READING|WRITING} */
+ event = when | event;
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev, event, 0);
+}
+
+static void
+init_bio_counts(struct bufferevent_openssl *bev_ssl)
+{
+ bev_ssl->counts.n_written =
+ BIO_number_written(SSL_get_wbio(bev_ssl->ssl));
+ bev_ssl->counts.n_read =
+ BIO_number_read(SSL_get_rbio(bev_ssl->ssl));
+}
+
+static inline void
+decrement_buckets(struct bufferevent_openssl *bev_ssl)
+{
+ unsigned long num_w = BIO_number_written(SSL_get_wbio(bev_ssl->ssl));
+ unsigned long num_r = BIO_number_read(SSL_get_rbio(bev_ssl->ssl));
+ /* These next two subtractions can wrap around. That's okay. */
+ unsigned long w = num_w - bev_ssl->counts.n_written;
+ unsigned long r = num_r - bev_ssl->counts.n_read;
+ if (w)
+ bufferevent_decrement_write_buckets_(&bev_ssl->bev, w);
+ if (r)
+ bufferevent_decrement_read_buckets_(&bev_ssl->bev, r);
+ bev_ssl->counts.n_written = num_w;
+ bev_ssl->counts.n_read = num_r;
+}
+
+#define OP_MADE_PROGRESS 1
+#define OP_BLOCKED 2
+#define OP_ERR 4
+
+/* Return a bitmask of OP_MADE_PROGRESS (if we read anything); OP_BLOCKED (if
+ we're now blocked); and OP_ERR (if an error occurred). */
+static int
+do_read(struct bufferevent_openssl *bev_ssl, int n_to_read) {
+ /* Requires lock */
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+ struct evbuffer *input = bev->input;
+ int r, n, i, n_used = 0, atmost;
+ struct evbuffer_iovec space[2];
+ int result = 0;
+
+ if (bev_ssl->bev.read_suspended)
+ return 0;
+
+ atmost = bufferevent_get_read_max_(&bev_ssl->bev);
+ if (n_to_read > atmost)
+ n_to_read = atmost;
+
+ n = evbuffer_reserve_space(input, n_to_read, space, 2);
+ if (n < 0)
+ return OP_ERR;
+
+ for (i=0; i<n; ++i) {
+ if (bev_ssl->bev.read_suspended)
+ break;
+ r = SSL_read(bev_ssl->ssl, space[i].iov_base, space[i].iov_len);
+ if (r>0) {
+ result |= OP_MADE_PROGRESS;
+ if (bev_ssl->read_blocked_on_write)
+ if (clear_rbow(bev_ssl) < 0)
+ return OP_ERR | result;
+ ++n_used;
+ space[i].iov_len = r;
+ decrement_buckets(bev_ssl);
+ } else {
+ int err = SSL_get_error(bev_ssl->ssl, r);
+ print_err(err);
+ switch (err) {
+ case SSL_ERROR_WANT_READ:
+ /* Can't read until underlying has more data. */
+ if (bev_ssl->read_blocked_on_write)
+ if (clear_rbow(bev_ssl) < 0)
+ return OP_ERR | result;
+ break;
+ case SSL_ERROR_WANT_WRITE:
+ /* This read operation requires a write, and the
+ * underlying is full */
+ if (!bev_ssl->read_blocked_on_write)
+ if (set_rbow(bev_ssl) < 0)
+ return OP_ERR | result;
+ break;
+ default:
+ conn_closed(bev_ssl, BEV_EVENT_READING, err, r);
+ break;
+ }
+ result |= OP_BLOCKED;
+ break; /* out of the loop */
+ }
+ }
+
+ if (n_used) {
+ evbuffer_commit_space(input, space, n_used);
+ if (bev_ssl->underlying)
+ BEV_RESET_GENERIC_READ_TIMEOUT(bev);
+ }
+
+ return result;
+}
+
+/* Return a bitmask of OP_MADE_PROGRESS (if we wrote anything); OP_BLOCKED (if
+ we're now blocked); and OP_ERR (if an error occurred). */
+static int
+do_write(struct bufferevent_openssl *bev_ssl, int atmost)
+{
+ int i, r, n, n_written = 0;
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+ struct evbuffer *output = bev->output;
+ struct evbuffer_iovec space[8];
+ int result = 0;
+
+ if (bev_ssl->last_write > 0)
+ atmost = bev_ssl->last_write;
+ else
+ atmost = bufferevent_get_write_max_(&bev_ssl->bev);
+
+ n = evbuffer_peek(output, atmost, NULL, space, 8);
+ if (n < 0)
+ return OP_ERR | result;
+
+ if (n > 8)
+ n = 8;
+ for (i=0; i < n; ++i) {
+ if (bev_ssl->bev.write_suspended)
+ break;
+
+ /* SSL_write will (reasonably) return 0 if we tell it to
+ send 0 data. Skip this case so we don't interpret the
+ result as an error */
+ if (space[i].iov_len == 0)
+ continue;
+
+ r = SSL_write(bev_ssl->ssl, space[i].iov_base,
+ space[i].iov_len);
+ if (r > 0) {
+ result |= OP_MADE_PROGRESS;
+ if (bev_ssl->write_blocked_on_read)
+ if (clear_wbor(bev_ssl) < 0)
+ return OP_ERR | result;
+ n_written += r;
+ bev_ssl->last_write = -1;
+ decrement_buckets(bev_ssl);
+ } else {
+ int err = SSL_get_error(bev_ssl->ssl, r);
+ print_err(err);
+ switch (err) {
+ case SSL_ERROR_WANT_WRITE:
+ /* Can't read until underlying has more data. */
+ if (bev_ssl->write_blocked_on_read)
+ if (clear_wbor(bev_ssl) < 0)
+ return OP_ERR | result;
+ bev_ssl->last_write = space[i].iov_len;
+ break;
+ case SSL_ERROR_WANT_READ:
+ /* This read operation requires a write, and the
+ * underlying is full */
+ if (!bev_ssl->write_blocked_on_read)
+ if (set_wbor(bev_ssl) < 0)
+ return OP_ERR | result;
+ bev_ssl->last_write = space[i].iov_len;
+ break;
+ default:
+ conn_closed(bev_ssl, BEV_EVENT_WRITING, err, r);
+ bev_ssl->last_write = -1;
+ break;
+ }
+ result |= OP_BLOCKED;
+ break;
+ }
+ }
+ if (n_written) {
+ evbuffer_drain(output, n_written);
+ if (bev_ssl->underlying)
+ BEV_RESET_GENERIC_WRITE_TIMEOUT(bev);
+
+ bufferevent_trigger_nolock_(bev, EV_WRITE, 0);
+ }
+ return result;
+}
+
+#define WRITE_FRAME 15000
+
+#define READ_DEFAULT 4096
+
+/* Try to figure out how many bytes to read; return 0 if we shouldn't be
+ * reading. */
+static int
+bytes_to_read(struct bufferevent_openssl *bev)
+{
+ struct evbuffer *input = bev->bev.bev.input;
+ struct event_watermark *wm = &bev->bev.bev.wm_read;
+ int result = READ_DEFAULT;
+ ev_ssize_t limit;
+ /* XXX 99% of this is generic code that nearly all bufferevents will
+ * want. */
+
+ if (bev->write_blocked_on_read) {
+ return 0;
+ }
+
+ if (! (bev->bev.bev.enabled & EV_READ)) {
+ return 0;
+ }
+
+ if (bev->bev.read_suspended) {
+ return 0;
+ }
+
+ if (wm->high) {
+ if (evbuffer_get_length(input) >= wm->high) {
+ return 0;
+ }
+
+ result = wm->high - evbuffer_get_length(input);
+ } else {
+ result = READ_DEFAULT;
+ }
+
+ /* Respect the rate limit */
+ limit = bufferevent_get_read_max_(&bev->bev);
+ if (result > limit) {
+ result = limit;
+ }
+
+ return result;
+}
+
+
+/* Things look readable. If write is blocked on read, write till it isn't.
+ * Read from the underlying buffer until we block or we hit our high-water
+ * mark.
+ */
+static void
+consider_reading(struct bufferevent_openssl *bev_ssl)
+{
+ int r;
+ int n_to_read;
+ int all_result_flags = 0;
+
+ while (bev_ssl->write_blocked_on_read) {
+ r = do_write(bev_ssl, WRITE_FRAME);
+ if (r & (OP_BLOCKED|OP_ERR))
+ break;
+ }
+ if (bev_ssl->write_blocked_on_read)
+ return;
+
+ n_to_read = bytes_to_read(bev_ssl);
+
+ while (n_to_read) {
+ r = do_read(bev_ssl, n_to_read);
+ all_result_flags |= r;
+
+ if (r & (OP_BLOCKED|OP_ERR))
+ break;
+
+ if (bev_ssl->bev.read_suspended)
+ break;
+
+ /* Read all pending data. This won't hit the network
+ * again, and will (most importantly) put us in a state
+ * where we don't need to read anything else until the
+ * socket is readable again. It'll potentially make us
+ * overrun our read high-watermark (somewhat
+ * regrettable). The damage to the rate-limit has
+ * already been done, since OpenSSL went and read a
+ * whole SSL record anyway. */
+ n_to_read = SSL_pending(bev_ssl->ssl);
+
+ /* XXX This if statement is actually a bad bug, added to avoid
+ * XXX a worse bug.
+ *
+ * The bad bug: It can potentially cause resource unfairness
+ * by reading too much data from the underlying bufferevent;
+ * it can potentially cause read looping if the underlying
+ * bufferevent is a bufferevent_pair and deferred callbacks
+ * aren't used.
+ *
+ * The worse bug: If we didn't do this, then we would
+ * potentially not read any more from bev_ssl->underlying
+ * until more data arrived there, which could lead to us
+ * waiting forever.
+ */
+ if (!n_to_read && bev_ssl->underlying)
+ n_to_read = bytes_to_read(bev_ssl);
+ }
+
+ if (all_result_flags & OP_MADE_PROGRESS) {
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+
+ bufferevent_trigger_nolock_(bev, EV_READ, 0);
+ }
+
+ if (!bev_ssl->underlying) {
+ /* Should be redundant, but let's avoid busy-looping */
+ if (bev_ssl->bev.read_suspended ||
+ !(bev_ssl->bev.bev.enabled & EV_READ)) {
+ event_del(&bev_ssl->bev.bev.ev_read);
+ }
+ }
+}
+
+static void
+consider_writing(struct bufferevent_openssl *bev_ssl)
+{
+ int r;
+ struct evbuffer *output = bev_ssl->bev.bev.output;
+ struct evbuffer *target = NULL;
+ struct event_watermark *wm = NULL;
+
+ while (bev_ssl->read_blocked_on_write) {
+ r = do_read(bev_ssl, 1024); /* XXXX 1024 is a hack */
+ if (r & OP_MADE_PROGRESS) {
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+
+ bufferevent_trigger_nolock_(bev, EV_READ, 0);
+ }
+ if (r & (OP_ERR|OP_BLOCKED))
+ break;
+ }
+ if (bev_ssl->read_blocked_on_write)
+ return;
+ if (bev_ssl->underlying) {
+ target = bev_ssl->underlying->output;
+ wm = &bev_ssl->underlying->wm_write;
+ }
+ while ((bev_ssl->bev.bev.enabled & EV_WRITE) &&
+ (! bev_ssl->bev.write_suspended) &&
+ evbuffer_get_length(output) &&
+ (!target || (! wm->high || evbuffer_get_length(target) < wm->high))) {
+ int n_to_write;
+ if (wm && wm->high)
+ n_to_write = wm->high - evbuffer_get_length(target);
+ else
+ n_to_write = WRITE_FRAME;
+ r = do_write(bev_ssl, n_to_write);
+ if (r & (OP_BLOCKED|OP_ERR))
+ break;
+ }
+
+ if (!bev_ssl->underlying) {
+ if (evbuffer_get_length(output) == 0) {
+ event_del(&bev_ssl->bev.bev.ev_write);
+ } else if (bev_ssl->bev.write_suspended ||
+ !(bev_ssl->bev.bev.enabled & EV_WRITE)) {
+ /* Should be redundant, but let's avoid busy-looping */
+ event_del(&bev_ssl->bev.bev.ev_write);
+ }
+ }
+}
+
+static void
+be_openssl_readcb(struct bufferevent *bev_base, void *ctx)
+{
+ struct bufferevent_openssl *bev_ssl = ctx;
+ consider_reading(bev_ssl);
+}
+
+static void
+be_openssl_writecb(struct bufferevent *bev_base, void *ctx)
+{
+ struct bufferevent_openssl *bev_ssl = ctx;
+ consider_writing(bev_ssl);
+}
+
+static void
+be_openssl_eventcb(struct bufferevent *bev_base, short what, void *ctx)
+{
+ struct bufferevent_openssl *bev_ssl = ctx;
+ int event = 0;
+
+ if (what & BEV_EVENT_EOF) {
+ if (bev_ssl->allow_dirty_shutdown)
+ event = BEV_EVENT_EOF;
+ else
+ event = BEV_EVENT_ERROR;
+ } else if (what & BEV_EVENT_TIMEOUT) {
+ /* We sure didn't set this. Propagate it to the user. */
+ event = what;
+ } else if (what & BEV_EVENT_ERROR) {
+ /* An error occurred on the connection. Propagate it to the user. */
+ event = what;
+ } else if (what & BEV_EVENT_CONNECTED) {
+ /* Ignore it. We're saying SSL_connect() already, which will
+ eat it. */
+ }
+ if (event)
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev, event, 0);
+}
+
+static void
+be_openssl_readeventcb(evutil_socket_t fd, short what, void *ptr)
+{
+ struct bufferevent_openssl *bev_ssl = ptr;
+ bufferevent_incref_and_lock_(&bev_ssl->bev.bev);
+ if (what == EV_TIMEOUT) {
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev,
+ BEV_EVENT_TIMEOUT|BEV_EVENT_READING, 0);
+ } else {
+ consider_reading(bev_ssl);
+ }
+ bufferevent_decref_and_unlock_(&bev_ssl->bev.bev);
+}
+
+static void
+be_openssl_writeeventcb(evutil_socket_t fd, short what, void *ptr)
+{
+ struct bufferevent_openssl *bev_ssl = ptr;
+ bufferevent_incref_and_lock_(&bev_ssl->bev.bev);
+ if (what == EV_TIMEOUT) {
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev,
+ BEV_EVENT_TIMEOUT|BEV_EVENT_WRITING, 0);
+ } else {
+ consider_writing(bev_ssl);
+ }
+ bufferevent_decref_and_unlock_(&bev_ssl->bev.bev);
+}
+
+static int
+be_openssl_auto_fd(struct bufferevent_openssl *bev_ssl, int fd)
+{
+ if (!bev_ssl->underlying) {
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+ if (event_initialized(&bev->ev_read) && fd < 0) {
+ fd = event_get_fd(&bev->ev_read);
+ }
+ }
+ return fd;
+}
+
+static int
+set_open_callbacks(struct bufferevent_openssl *bev_ssl, evutil_socket_t fd)
+{
+ if (bev_ssl->underlying) {
+ bufferevent_setcb(bev_ssl->underlying,
+ be_openssl_readcb, be_openssl_writecb, be_openssl_eventcb,
+ bev_ssl);
+ return 0;
+ } else {
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+ int rpending=0, wpending=0, r1=0, r2=0;
+
+ if (event_initialized(&bev->ev_read)) {
+ rpending = event_pending(&bev->ev_read, EV_READ, NULL);
+ wpending = event_pending(&bev->ev_write, EV_WRITE, NULL);
+
+ event_del(&bev->ev_read);
+ event_del(&bev->ev_write);
+ }
+
+ event_assign(&bev->ev_read, bev->ev_base, fd,
+ EV_READ|EV_PERSIST|EV_FINALIZE,
+ be_openssl_readeventcb, bev_ssl);
+ event_assign(&bev->ev_write, bev->ev_base, fd,
+ EV_WRITE|EV_PERSIST|EV_FINALIZE,
+ be_openssl_writeeventcb, bev_ssl);
+
+ if (rpending)
+ r1 = bufferevent_add_event_(&bev->ev_read, &bev->timeout_read);
+ if (wpending)
+ r2 = bufferevent_add_event_(&bev->ev_write, &bev->timeout_write);
+
+ return (r1<0 || r2<0) ? -1 : 0;
+ }
+}
+static int
+set_open_callbacks_auto(struct bufferevent_openssl *bev_ssl, evutil_socket_t fd)
+{
+ fd = be_openssl_auto_fd(bev_ssl, fd);
+ return set_open_callbacks(bev_ssl, fd);
+}
+
+static int
+do_handshake(struct bufferevent_openssl *bev_ssl)
+{
+ int r;
+
+ switch (bev_ssl->state) {
+ default:
+ case BUFFEREVENT_SSL_OPEN:
+ EVUTIL_ASSERT(0);
+ return -1;
+ case BUFFEREVENT_SSL_CONNECTING:
+ case BUFFEREVENT_SSL_ACCEPTING:
+ r = SSL_do_handshake(bev_ssl->ssl);
+ break;
+ }
+ decrement_buckets(bev_ssl);
+
+ if (r==1) {
+ int fd = event_get_fd(&bev_ssl->bev.bev.ev_read);
+ /* We're done! */
+ bev_ssl->state = BUFFEREVENT_SSL_OPEN;
+ set_open_callbacks(bev_ssl, fd); /* XXXX handle failure */
+ /* Call do_read and do_write as needed */
+ bufferevent_enable(&bev_ssl->bev.bev, bev_ssl->bev.bev.enabled);
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev,
+ BEV_EVENT_CONNECTED, 0);
+ return 1;
+ } else {
+ int err = SSL_get_error(bev_ssl->ssl, r);
+ print_err(err);
+ switch (err) {
+ case SSL_ERROR_WANT_WRITE:
+ if (!bev_ssl->underlying) {
+ stop_reading(bev_ssl);
+ return start_writing(bev_ssl);
+ }
+ return 0;
+ case SSL_ERROR_WANT_READ:
+ if (!bev_ssl->underlying) {
+ stop_writing(bev_ssl);
+ return start_reading(bev_ssl);
+ }
+ return 0;
+ default:
+ conn_closed(bev_ssl, BEV_EVENT_READING, err, r);
+ return -1;
+ }
+ }
+}
+
+static void
+be_openssl_handshakecb(struct bufferevent *bev_base, void *ctx)
+{
+ struct bufferevent_openssl *bev_ssl = ctx;
+ do_handshake(bev_ssl);/* XXX handle failure */
+}
+
+static void
+be_openssl_handshakeeventcb(evutil_socket_t fd, short what, void *ptr)
+{
+ struct bufferevent_openssl *bev_ssl = ptr;
+
+ bufferevent_incref_and_lock_(&bev_ssl->bev.bev);
+ if (what & EV_TIMEOUT) {
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev, BEV_EVENT_TIMEOUT, 0);
+ } else
+ do_handshake(bev_ssl);/* XXX handle failure */
+ bufferevent_decref_and_unlock_(&bev_ssl->bev.bev);
+}
+
+static int
+set_handshake_callbacks(struct bufferevent_openssl *bev_ssl, evutil_socket_t fd)
+{
+ if (bev_ssl->underlying) {
+ bufferevent_setcb(bev_ssl->underlying,
+ be_openssl_handshakecb, be_openssl_handshakecb,
+ be_openssl_eventcb,
+ bev_ssl);
+ return do_handshake(bev_ssl);
+ } else {
+ struct bufferevent *bev = &bev_ssl->bev.bev;
+
+ if (event_initialized(&bev->ev_read)) {
+ event_del(&bev->ev_read);
+ event_del(&bev->ev_write);
+ }
+
+ event_assign(&bev->ev_read, bev->ev_base, fd,
+ EV_READ|EV_PERSIST|EV_FINALIZE,
+ be_openssl_handshakeeventcb, bev_ssl);
+ event_assign(&bev->ev_write, bev->ev_base, fd,
+ EV_WRITE|EV_PERSIST|EV_FINALIZE,
+ be_openssl_handshakeeventcb, bev_ssl);
+ if (fd >= 0)
+ bufferevent_enable(bev, bev->enabled);
+ return 0;
+ }
+}
+
+static int
+set_handshake_callbacks_auto(struct bufferevent_openssl *bev_ssl, evutil_socket_t fd)
+{
+ fd = be_openssl_auto_fd(bev_ssl, fd);
+ return set_handshake_callbacks(bev_ssl, fd);
+}
+
+int
+bufferevent_ssl_renegotiate(struct bufferevent *bev)
+{
+ struct bufferevent_openssl *bev_ssl = upcast(bev);
+ if (!bev_ssl)
+ return -1;
+ if (SSL_renegotiate(bev_ssl->ssl) < 0)
+ return -1;
+ bev_ssl->state = BUFFEREVENT_SSL_CONNECTING;
+ if (set_handshake_callbacks_auto(bev_ssl, -1) < 0)
+ return -1;
+ if (!bev_ssl->underlying)
+ return do_handshake(bev_ssl);
+ return 0;
+}
+
+static void
+be_openssl_outbuf_cb(struct evbuffer *buf,
+ const struct evbuffer_cb_info *cbinfo, void *arg)
+{
+ struct bufferevent_openssl *bev_ssl = arg;
+ int r = 0;
+ /* XXX need to hold a reference here. */
+
+ if (cbinfo->n_added && bev_ssl->state == BUFFEREVENT_SSL_OPEN &&
+ cbinfo->orig_size == 0) {
+ r = bufferevent_add_event_(&bev_ssl->bev.bev.ev_write,
+ &bev_ssl->bev.bev.timeout_write);
+ }
+ /* XXX Handle r < 0 */
+ (void)r;
+}
+
+
+static int
+be_openssl_enable(struct bufferevent *bev, short events)
+{
+ struct bufferevent_openssl *bev_ssl = upcast(bev);
+ int r1 = 0, r2 = 0;
+
+ if (events & EV_READ)
+ r1 = start_reading(bev_ssl);
+ if (events & EV_WRITE)
+ r2 = start_writing(bev_ssl);
+
+ if (bev_ssl->underlying) {
+ if (events & EV_READ)
+ BEV_RESET_GENERIC_READ_TIMEOUT(bev);
+ if (events & EV_WRITE)
+ BEV_RESET_GENERIC_WRITE_TIMEOUT(bev);
+
+ if (events & EV_READ)
+ consider_reading(bev_ssl);
+ if (events & EV_WRITE)
+ consider_writing(bev_ssl);
+ }
+ return (r1 < 0 || r2 < 0) ? -1 : 0;
+}
+
+static int
+be_openssl_disable(struct bufferevent *bev, short events)
+{
+ struct bufferevent_openssl *bev_ssl = upcast(bev);
+
+ if (events & EV_READ)
+ stop_reading(bev_ssl);
+ if (events & EV_WRITE)
+ stop_writing(bev_ssl);
+
+ if (bev_ssl->underlying) {
+ if (events & EV_READ)
+ BEV_DEL_GENERIC_READ_TIMEOUT(bev);
+ if (events & EV_WRITE)
+ BEV_DEL_GENERIC_WRITE_TIMEOUT(bev);
+ }
+ return 0;
+}
+
+static void
+be_openssl_unlink(struct bufferevent *bev)
+{
+ struct bufferevent_openssl *bev_ssl = upcast(bev);
+
+ if (bev_ssl->bev.options & BEV_OPT_CLOSE_ON_FREE) {
+ if (bev_ssl->underlying) {
+ if (BEV_UPCAST(bev_ssl->underlying)->refcnt < 2) {
+ event_warnx("BEV_OPT_CLOSE_ON_FREE set on an "
+ "bufferevent with too few references");
+ } else {
+ bufferevent_free(bev_ssl->underlying);
+ /* We still have a reference to it, via our
+ * BIO. So we don't drop this. */
+ // bev_ssl->underlying = NULL;
+ }
+ }
+ } else {
+ if (bev_ssl->underlying) {
+ if (bev_ssl->underlying->errorcb == be_openssl_eventcb)
+ bufferevent_setcb(bev_ssl->underlying,
+ NULL,NULL,NULL,NULL);
+ bufferevent_unsuspend_read_(bev_ssl->underlying,
+ BEV_SUSPEND_FILT_READ);
+ }
+ }
+}
+
+static void
+be_openssl_destruct(struct bufferevent *bev)
+{
+ struct bufferevent_openssl *bev_ssl = upcast(bev);
+
+ if (bev_ssl->bev.options & BEV_OPT_CLOSE_ON_FREE) {
+ if (! bev_ssl->underlying) {
+ evutil_socket_t fd = -1;
+ BIO *bio = SSL_get_wbio(bev_ssl->ssl);
+ if (bio)
+ fd = BIO_get_fd(bio, NULL);
+ if (fd >= 0)
+ evutil_closesocket(fd);
+ }
+ SSL_free(bev_ssl->ssl);
+ }
+}
+
+static int
+be_openssl_adj_timeouts(struct bufferevent *bev)
+{
+ struct bufferevent_openssl *bev_ssl = upcast(bev);
+
+ if (bev_ssl->underlying) {
+ return bufferevent_generic_adj_timeouts_(bev);
+ } else {
+ return bufferevent_generic_adj_existing_timeouts_(bev);
+ }
+}
+
+static int
+be_openssl_flush(struct bufferevent *bufev,
+ short iotype, enum bufferevent_flush_mode mode)
+{
+ /* XXXX Implement this. */
+ return 0;
+}
+
+static int
+be_openssl_ctrl(struct bufferevent *bev,
+ enum bufferevent_ctrl_op op, union bufferevent_ctrl_data *data)
+{
+ struct bufferevent_openssl *bev_ssl = upcast(bev);
+ switch (op) {
+ case BEV_CTRL_SET_FD:
+ if (bev_ssl->underlying)
+ return -1;
+ {
+ BIO *bio;
+ bio = BIO_new_socket(data->fd, 0);
+ SSL_set_bio(bev_ssl->ssl, bio, bio);
+ }
+ if (bev_ssl->state == BUFFEREVENT_SSL_OPEN && data->fd >= 0)
+ return set_open_callbacks(bev_ssl, data->fd);
+ else {
+ return set_handshake_callbacks(bev_ssl, data->fd);
+ }
+ case BEV_CTRL_GET_FD:
+ data->fd = event_get_fd(&bev->ev_read);
+ return 0;
+ case BEV_CTRL_GET_UNDERLYING:
+ data->ptr = bev_ssl->underlying;
+ return 0;
+ case BEV_CTRL_CANCEL_ALL:
+ default:
+ return -1;
+ }
+}
+
+SSL *
+bufferevent_openssl_get_ssl(struct bufferevent *bufev)
+{
+ struct bufferevent_openssl *bev_ssl = upcast(bufev);
+ if (!bev_ssl)
+ return NULL;
+ return bev_ssl->ssl;
+}
+
+static struct bufferevent *
+bufferevent_openssl_new_impl(struct event_base *base,
+ struct bufferevent *underlying,
+ evutil_socket_t fd,
+ SSL *ssl,
+ enum bufferevent_ssl_state state,
+ int options)
+{
+ struct bufferevent_openssl *bev_ssl = NULL;
+ struct bufferevent_private *bev_p = NULL;
+ int tmp_options = options & ~BEV_OPT_THREADSAFE;
+
+ if (underlying != NULL && fd >= 0)
+ return NULL; /* Only one can be set. */
+
+ if (!(bev_ssl = mm_calloc(1, sizeof(struct bufferevent_openssl))))
+ goto err;
+
+ bev_p = &bev_ssl->bev;
+
+ if (bufferevent_init_common_(bev_p, base,
+ &bufferevent_ops_openssl, tmp_options) < 0)
+ goto err;
+
+ /* Don't explode if we decide to realloc a chunk we're writing from in
+ * the output buffer. */
+ SSL_set_mode(ssl, SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER);
+
+ bev_ssl->underlying = underlying;
+ bev_ssl->ssl = ssl;
+
+ bev_ssl->outbuf_cb = evbuffer_add_cb(bev_p->bev.output,
+ be_openssl_outbuf_cb, bev_ssl);
+
+ if (options & BEV_OPT_THREADSAFE)
+ bufferevent_enable_locking_(&bev_ssl->bev.bev, NULL);
+
+ if (underlying) {
+ bufferevent_init_generic_timeout_cbs_(&bev_ssl->bev.bev);
+ bufferevent_incref_(underlying);
+ }
+
+ bev_ssl->state = state;
+ bev_ssl->last_write = -1;
+
+ init_bio_counts(bev_ssl);
+
+ switch (state) {
+ case BUFFEREVENT_SSL_ACCEPTING:
+ SSL_set_accept_state(bev_ssl->ssl);
+ if (set_handshake_callbacks_auto(bev_ssl, fd) < 0)
+ goto err;
+ break;
+ case BUFFEREVENT_SSL_CONNECTING:
+ SSL_set_connect_state(bev_ssl->ssl);
+ if (set_handshake_callbacks_auto(bev_ssl, fd) < 0)
+ goto err;
+ break;
+ case BUFFEREVENT_SSL_OPEN:
+ if (set_open_callbacks_auto(bev_ssl, fd) < 0)
+ goto err;
+ break;
+ default:
+ goto err;
+ }
+
+ if (underlying) {
+ bufferevent_setwatermark(underlying, EV_READ, 0, 0);
+ bufferevent_enable(underlying, EV_READ|EV_WRITE);
+ if (state == BUFFEREVENT_SSL_OPEN)
+ bufferevent_suspend_read_(underlying,
+ BEV_SUSPEND_FILT_READ);
+ }
+
+ return &bev_ssl->bev.bev;
+err:
+ if (bev_ssl)
+ bufferevent_free(&bev_ssl->bev.bev);
+ return NULL;
+}
+
+struct bufferevent *
+bufferevent_openssl_filter_new(struct event_base *base,
+ struct bufferevent *underlying,
+ SSL *ssl,
+ enum bufferevent_ssl_state state,
+ int options)
+{
+ /* We don't tell the BIO to close the bufferevent; we do it ourselves
+ * on be_openssl_destruct */
+ int close_flag = 0; /* options & BEV_OPT_CLOSE_ON_FREE; */
+ BIO *bio;
+ if (!underlying)
+ return NULL;
+ if (!(bio = BIO_new_bufferevent(underlying, close_flag)))
+ return NULL;
+
+ SSL_set_bio(ssl, bio, bio);
+
+ return bufferevent_openssl_new_impl(
+ base, underlying, -1, ssl, state, options);
+}
+
+struct bufferevent *
+bufferevent_openssl_socket_new(struct event_base *base,
+ evutil_socket_t fd,
+ SSL *ssl,
+ enum bufferevent_ssl_state state,
+ int options)
+{
+ /* Does the SSL already have an fd? */
+ BIO *bio = SSL_get_wbio(ssl);
+ long have_fd = -1;
+
+ if (bio)
+ have_fd = BIO_get_fd(bio, NULL);
+
+ if (have_fd >= 0) {
+ /* The SSL is already configured with an fd. */
+ if (fd < 0) {
+ /* We should learn the fd from the SSL. */
+ fd = (evutil_socket_t) have_fd;
+ } else if (have_fd == (long)fd) {
+ /* We already know the fd from the SSL; do nothing */
+ } else {
+ /* We specified an fd different from that of the SSL.
+ This is probably an error on our part. Fail. */
+ return NULL;
+ }
+ (void) BIO_set_close(bio, 0);
+ } else {
+ /* The SSL isn't configured with a BIO with an fd. */
+ if (fd >= 0) {
+ /* ... and we have an fd we want to use. */
+ bio = BIO_new_socket(fd, 0);
+ SSL_set_bio(ssl, bio, bio);
+ } else {
+ /* Leave the fd unset. */
+ }
+ }
+
+ return bufferevent_openssl_new_impl(
+ base, NULL, fd, ssl, state, options);
+}
+
+int
+bufferevent_openssl_get_allow_dirty_shutdown(struct bufferevent *bev)
+{
+ int allow_dirty_shutdown = -1;
+ struct bufferevent_openssl *bev_ssl;
+ BEV_LOCK(bev);
+ bev_ssl = upcast(bev);
+ if (bev_ssl)
+ allow_dirty_shutdown = bev_ssl->allow_dirty_shutdown;
+ BEV_UNLOCK(bev);
+ return allow_dirty_shutdown;
+}
+
+void
+bufferevent_openssl_set_allow_dirty_shutdown(struct bufferevent *bev,
+ int allow_dirty_shutdown)
+{
+ struct bufferevent_openssl *bev_ssl;
+ BEV_LOCK(bev);
+ bev_ssl = upcast(bev);
+ if (bev_ssl)
+ bev_ssl->allow_dirty_shutdown = !!allow_dirty_shutdown;
+ BEV_UNLOCK(bev);
+}
+
+unsigned long
+bufferevent_get_openssl_error(struct bufferevent *bev)
+{
+ unsigned long err = 0;
+ struct bufferevent_openssl *bev_ssl;
+ BEV_LOCK(bev);
+ bev_ssl = upcast(bev);
+ if (bev_ssl && bev_ssl->n_errors) {
+ err = bev_ssl->errors[--bev_ssl->n_errors];
+ }
+ BEV_UNLOCK(bev);
+ return err;
+}
diff --git a/libs/libevent/src/bufferevent_pair.c b/libs/libevent/src/bufferevent_pair.c
new file mode 100644
index 0000000000..d80e5f81d6
--- /dev/null
+++ b/libs/libevent/src/bufferevent_pair.c
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos, Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <sys/types.h>
+
+#ifdef _WIN32
+#include <winsock2.h>
+#endif
+
+#include "event2/util.h"
+#include "event2/buffer.h"
+#include "event2/bufferevent.h"
+#include "event2/bufferevent_struct.h"
+#include "event2/event.h"
+#include "defer-internal.h"
+#include "bufferevent-internal.h"
+#include "mm-internal.h"
+#include "util-internal.h"
+
+struct bufferevent_pair {
+ struct bufferevent_private bev;
+ struct bufferevent_pair *partner;
+ /* For ->destruct() lock checking */
+ struct bufferevent_pair *unlinked_partner;
+};
+
+
+/* Given a bufferevent that's really a bev part of a bufferevent_pair,
+ * return that bufferevent_filtered. Returns NULL otherwise.*/
+static inline struct bufferevent_pair *
+upcast(struct bufferevent *bev)
+{
+ struct bufferevent_pair *bev_p;
+ if (bev->be_ops != &bufferevent_ops_pair)
+ return NULL;
+ bev_p = EVUTIL_UPCAST(bev, struct bufferevent_pair, bev.bev);
+ EVUTIL_ASSERT(bev_p->bev.bev.be_ops == &bufferevent_ops_pair);
+ return bev_p;
+}
+
+#define downcast(bev_pair) (&(bev_pair)->bev.bev)
+
+static inline void
+incref_and_lock(struct bufferevent *b)
+{
+ struct bufferevent_pair *bevp;
+ bufferevent_incref_and_lock_(b);
+ bevp = upcast(b);
+ if (bevp->partner)
+ bufferevent_incref_and_lock_(downcast(bevp->partner));
+}
+
+static inline void
+decref_and_unlock(struct bufferevent *b)
+{
+ struct bufferevent_pair *bevp = upcast(b);
+ if (bevp->partner)
+ bufferevent_decref_and_unlock_(downcast(bevp->partner));
+ bufferevent_decref_and_unlock_(b);
+}
+
+/* XXX Handle close */
+
+static void be_pair_outbuf_cb(struct evbuffer *,
+ const struct evbuffer_cb_info *, void *);
+
+static struct bufferevent_pair *
+bufferevent_pair_elt_new(struct event_base *base,
+ int options)
+{
+ struct bufferevent_pair *bufev;
+ if (! (bufev = mm_calloc(1, sizeof(struct bufferevent_pair))))
+ return NULL;
+ if (bufferevent_init_common_(&bufev->bev, base, &bufferevent_ops_pair,
+ options)) {
+ mm_free(bufev);
+ return NULL;
+ }
+ if (!evbuffer_add_cb(bufev->bev.bev.output, be_pair_outbuf_cb, bufev)) {
+ bufferevent_free(downcast(bufev));
+ return NULL;
+ }
+
+ bufferevent_init_generic_timeout_cbs_(&bufev->bev.bev);
+
+ return bufev;
+}
+
+int
+bufferevent_pair_new(struct event_base *base, int options,
+ struct bufferevent *pair[2])
+{
+ struct bufferevent_pair *bufev1 = NULL, *bufev2 = NULL;
+ int tmp_options;
+
+ options |= BEV_OPT_DEFER_CALLBACKS;
+ tmp_options = options & ~BEV_OPT_THREADSAFE;
+
+ bufev1 = bufferevent_pair_elt_new(base, options);
+ if (!bufev1)
+ return -1;
+ bufev2 = bufferevent_pair_elt_new(base, tmp_options);
+ if (!bufev2) {
+ bufferevent_free(downcast(bufev1));
+ return -1;
+ }
+
+ if (options & BEV_OPT_THREADSAFE) {
+ /*XXXX check return */
+ bufferevent_enable_locking_(downcast(bufev2), bufev1->bev.lock);
+ }
+
+ bufev1->partner = bufev2;
+ bufev2->partner = bufev1;
+
+ evbuffer_freeze(downcast(bufev1)->input, 0);
+ evbuffer_freeze(downcast(bufev1)->output, 1);
+ evbuffer_freeze(downcast(bufev2)->input, 0);
+ evbuffer_freeze(downcast(bufev2)->output, 1);
+
+ pair[0] = downcast(bufev1);
+ pair[1] = downcast(bufev2);
+
+ return 0;
+}
+
+static void
+be_pair_transfer(struct bufferevent *src, struct bufferevent *dst,
+ int ignore_wm)
+{
+ size_t dst_size;
+ size_t n;
+
+ evbuffer_unfreeze(src->output, 1);
+ evbuffer_unfreeze(dst->input, 0);
+
+ if (dst->wm_read.high) {
+ dst_size = evbuffer_get_length(dst->input);
+ if (dst_size < dst->wm_read.high) {
+ n = dst->wm_read.high - dst_size;
+ evbuffer_remove_buffer(src->output, dst->input, n);
+ } else {
+ if (!ignore_wm)
+ goto done;
+ n = evbuffer_get_length(src->output);
+ evbuffer_add_buffer(dst->input, src->output);
+ }
+ } else {
+ n = evbuffer_get_length(src->output);
+ evbuffer_add_buffer(dst->input, src->output);
+ }
+
+ if (n) {
+ BEV_RESET_GENERIC_READ_TIMEOUT(dst);
+
+ if (evbuffer_get_length(dst->output))
+ BEV_RESET_GENERIC_WRITE_TIMEOUT(dst);
+ else
+ BEV_DEL_GENERIC_WRITE_TIMEOUT(dst);
+ }
+
+ bufferevent_trigger_nolock_(dst, EV_READ, 0);
+ bufferevent_trigger_nolock_(src, EV_WRITE, 0);
+done:
+ evbuffer_freeze(src->output, 1);
+ evbuffer_freeze(dst->input, 0);
+}
+
+static inline int
+be_pair_wants_to_talk(struct bufferevent_pair *src,
+ struct bufferevent_pair *dst)
+{
+ return (downcast(src)->enabled & EV_WRITE) &&
+ (downcast(dst)->enabled & EV_READ) &&
+ !dst->bev.read_suspended &&
+ evbuffer_get_length(downcast(src)->output);
+}
+
+static void
+be_pair_outbuf_cb(struct evbuffer *outbuf,
+ const struct evbuffer_cb_info *info, void *arg)
+{
+ struct bufferevent_pair *bev_pair = arg;
+ struct bufferevent_pair *partner = bev_pair->partner;
+
+ incref_and_lock(downcast(bev_pair));
+
+ if (info->n_added > info->n_deleted && partner) {
+ /* We got more data. If the other side's reading, then
+ hand it over. */
+ if (be_pair_wants_to_talk(bev_pair, partner)) {
+ be_pair_transfer(downcast(bev_pair), downcast(partner), 0);
+ }
+ }
+
+ decref_and_unlock(downcast(bev_pair));
+}
+
+static int
+be_pair_enable(struct bufferevent *bufev, short events)
+{
+ struct bufferevent_pair *bev_p = upcast(bufev);
+ struct bufferevent_pair *partner = bev_p->partner;
+
+ incref_and_lock(bufev);
+
+ if (events & EV_READ) {
+ BEV_RESET_GENERIC_READ_TIMEOUT(bufev);
+ }
+ if ((events & EV_WRITE) && evbuffer_get_length(bufev->output))
+ BEV_RESET_GENERIC_WRITE_TIMEOUT(bufev);
+
+ /* We're starting to read! Does the other side have anything to write?*/
+ if ((events & EV_READ) && partner &&
+ be_pair_wants_to_talk(partner, bev_p)) {
+ be_pair_transfer(downcast(partner), bufev, 0);
+ }
+ /* We're starting to write! Does the other side want to read? */
+ if ((events & EV_WRITE) && partner &&
+ be_pair_wants_to_talk(bev_p, partner)) {
+ be_pair_transfer(bufev, downcast(partner), 0);
+ }
+ decref_and_unlock(bufev);
+ return 0;
+}
+
+static int
+be_pair_disable(struct bufferevent *bev, short events)
+{
+ if (events & EV_READ) {
+ BEV_DEL_GENERIC_READ_TIMEOUT(bev);
+ }
+ if (events & EV_WRITE) {
+ BEV_DEL_GENERIC_WRITE_TIMEOUT(bev);
+ }
+ return 0;
+}
+
+static void
+be_pair_unlink(struct bufferevent *bev)
+{
+ struct bufferevent_pair *bev_p = upcast(bev);
+
+ if (bev_p->partner) {
+ bev_p->unlinked_partner = bev_p->partner;
+ bev_p->partner->partner = NULL;
+ bev_p->partner = NULL;
+ }
+}
+
+/* Free *shared* lock in the latest be (since we share it between two of them). */
+static void
+be_pair_destruct(struct bufferevent *bev)
+{
+ struct bufferevent_pair *bev_p = upcast(bev);
+
+ /* Transfer ownership of the lock into partner, otherwise we will use
+ * already free'd lock during freeing second bev, see next example:
+ *
+ * bev1->own_lock = 1
+ * bev2->own_lock = 0
+ * bev2->lock = bev1->lock
+ *
+ * bufferevent_free(bev1) # refcnt == 0 -> unlink
+ * bufferevent_free(bev2) # refcnt == 0 -> unlink
+ *
+ * event_base_free() -> finilizers -> EVTHREAD_FREE_LOCK(bev1->lock)
+ * -> BEV_LOCK(bev2->lock) <-- already freed
+ *
+ * Where bev1 == pair[0], bev2 == pair[1].
+ */
+ if (bev_p->unlinked_partner && bev_p->bev.own_lock) {
+ bev_p->unlinked_partner->bev.own_lock = 1;
+ bev_p->bev.own_lock = 0;
+ }
+ bev_p->unlinked_partner = NULL;
+}
+
+static int
+be_pair_flush(struct bufferevent *bev, short iotype,
+ enum bufferevent_flush_mode mode)
+{
+ struct bufferevent_pair *bev_p = upcast(bev);
+ struct bufferevent *partner;
+
+ if (!bev_p->partner)
+ return -1;
+
+ if (mode == BEV_NORMAL)
+ return 0;
+
+ incref_and_lock(bev);
+
+ partner = downcast(bev_p->partner);
+
+ if ((iotype & EV_READ) != 0)
+ be_pair_transfer(partner, bev, 1);
+
+ if ((iotype & EV_WRITE) != 0)
+ be_pair_transfer(bev, partner, 1);
+
+ if (mode == BEV_FINISHED) {
+ bufferevent_run_eventcb_(partner, iotype|BEV_EVENT_EOF, 0);
+ }
+ decref_and_unlock(bev);
+ return 0;
+}
+
+struct bufferevent *
+bufferevent_pair_get_partner(struct bufferevent *bev)
+{
+ struct bufferevent_pair *bev_p;
+ struct bufferevent *partner = NULL;
+ bev_p = upcast(bev);
+ if (! bev_p)
+ return NULL;
+
+ incref_and_lock(bev);
+ if (bev_p->partner)
+ partner = downcast(bev_p->partner);
+ decref_and_unlock(bev);
+ return partner;
+}
+
+const struct bufferevent_ops bufferevent_ops_pair = {
+ "pair_elt",
+ evutil_offsetof(struct bufferevent_pair, bev.bev),
+ be_pair_enable,
+ be_pair_disable,
+ be_pair_unlink,
+ be_pair_destruct,
+ bufferevent_generic_adj_timeouts_,
+ be_pair_flush,
+ NULL, /* ctrl */
+};
diff --git a/libs/libevent/src/bufferevent_ratelim.c b/libs/libevent/src/bufferevent_ratelim.c
new file mode 100644
index 0000000000..bde192021b
--- /dev/null
+++ b/libs/libevent/src/bufferevent_ratelim.c
@@ -0,0 +1,1092 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ * Copyright (c) 2002-2006 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "evconfig-private.h"
+
+#include <sys/types.h>
+#include <limits.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include "event2/event.h"
+#include "event2/event_struct.h"
+#include "event2/util.h"
+#include "event2/bufferevent.h"
+#include "event2/bufferevent_struct.h"
+#include "event2/buffer.h"
+
+#include "ratelim-internal.h"
+
+#include "bufferevent-internal.h"
+#include "mm-internal.h"
+#include "util-internal.h"
+#include "event-internal.h"
+
+int
+ev_token_bucket_init_(struct ev_token_bucket *bucket,
+ const struct ev_token_bucket_cfg *cfg,
+ ev_uint32_t current_tick,
+ int reinitialize)
+{
+ if (reinitialize) {
+ /* on reinitialization, we only clip downwards, since we've
+ already used who-knows-how-much bandwidth this tick. We
+ leave "last_updated" as it is; the next update will add the
+ appropriate amount of bandwidth to the bucket.
+ */
+ if (bucket->read_limit > (ev_int64_t) cfg->read_maximum)
+ bucket->read_limit = cfg->read_maximum;
+ if (bucket->write_limit > (ev_int64_t) cfg->write_maximum)
+ bucket->write_limit = cfg->write_maximum;
+ } else {
+ bucket->read_limit = cfg->read_rate;
+ bucket->write_limit = cfg->write_rate;
+ bucket->last_updated = current_tick;
+ }
+ return 0;
+}
+
+int
+ev_token_bucket_update_(struct ev_token_bucket *bucket,
+ const struct ev_token_bucket_cfg *cfg,
+ ev_uint32_t current_tick)
+{
+ /* It's okay if the tick number overflows, since we'll just
+ * wrap around when we do the unsigned substraction. */
+ unsigned n_ticks = current_tick - bucket->last_updated;
+
+ /* Make sure some ticks actually happened, and that time didn't
+ * roll back. */
+ if (n_ticks == 0 || n_ticks > INT_MAX)
+ return 0;
+
+ /* Naively, we would say
+ bucket->limit += n_ticks * cfg->rate;
+
+ if (bucket->limit > cfg->maximum)
+ bucket->limit = cfg->maximum;
+
+ But we're worried about overflow, so we do it like this:
+ */
+
+ if ((cfg->read_maximum - bucket->read_limit) / n_ticks < cfg->read_rate)
+ bucket->read_limit = cfg->read_maximum;
+ else
+ bucket->read_limit += n_ticks * cfg->read_rate;
+
+
+ if ((cfg->write_maximum - bucket->write_limit) / n_ticks < cfg->write_rate)
+ bucket->write_limit = cfg->write_maximum;
+ else
+ bucket->write_limit += n_ticks * cfg->write_rate;
+
+
+ bucket->last_updated = current_tick;
+
+ return 1;
+}
+
+static inline void
+bufferevent_update_buckets(struct bufferevent_private *bev)
+{
+ /* Must hold lock on bev. */
+ struct timeval now;
+ unsigned tick;
+ event_base_gettimeofday_cached(bev->bev.ev_base, &now);
+ tick = ev_token_bucket_get_tick_(&now, bev->rate_limiting->cfg);
+ if (tick != bev->rate_limiting->limit.last_updated)
+ ev_token_bucket_update_(&bev->rate_limiting->limit,
+ bev->rate_limiting->cfg, tick);
+}
+
+ev_uint32_t
+ev_token_bucket_get_tick_(const struct timeval *tv,
+ const struct ev_token_bucket_cfg *cfg)
+{
+ /* This computation uses two multiplies and a divide. We could do
+ * fewer if we knew that the tick length was an integer number of
+ * seconds, or if we knew it divided evenly into a second. We should
+ * investigate that more.
+ */
+
+ /* We cast to an ev_uint64_t first, since we don't want to overflow
+ * before we do the final divide. */
+ ev_uint64_t msec = (ev_uint64_t)tv->tv_sec * 1000 + tv->tv_usec / 1000;
+ return (unsigned)(msec / cfg->msec_per_tick);
+}
+
+struct ev_token_bucket_cfg *
+ev_token_bucket_cfg_new(size_t read_rate, size_t read_burst,
+ size_t write_rate, size_t write_burst,
+ const struct timeval *tick_len)
+{
+ struct ev_token_bucket_cfg *r;
+ struct timeval g;
+ if (! tick_len) {
+ g.tv_sec = 1;
+ g.tv_usec = 0;
+ tick_len = &g;
+ }
+ if (read_rate > read_burst || write_rate > write_burst ||
+ read_rate < 1 || write_rate < 1)
+ return NULL;
+ if (read_rate > EV_RATE_LIMIT_MAX ||
+ write_rate > EV_RATE_LIMIT_MAX ||
+ read_burst > EV_RATE_LIMIT_MAX ||
+ write_burst > EV_RATE_LIMIT_MAX)
+ return NULL;
+ r = mm_calloc(1, sizeof(struct ev_token_bucket_cfg));
+ if (!r)
+ return NULL;
+ r->read_rate = read_rate;
+ r->write_rate = write_rate;
+ r->read_maximum = read_burst;
+ r->write_maximum = write_burst;
+ memcpy(&r->tick_timeout, tick_len, sizeof(struct timeval));
+ r->msec_per_tick = (tick_len->tv_sec * 1000) +
+ (tick_len->tv_usec & COMMON_TIMEOUT_MICROSECONDS_MASK)/1000;
+ return r;
+}
+
+void
+ev_token_bucket_cfg_free(struct ev_token_bucket_cfg *cfg)
+{
+ mm_free(cfg);
+}
+
+/* Default values for max_single_read & max_single_write variables. */
+#define MAX_SINGLE_READ_DEFAULT 16384
+#define MAX_SINGLE_WRITE_DEFAULT 16384
+
+#define LOCK_GROUP(g) EVLOCK_LOCK((g)->lock, 0)
+#define UNLOCK_GROUP(g) EVLOCK_UNLOCK((g)->lock, 0)
+
+static int bev_group_suspend_reading_(struct bufferevent_rate_limit_group *g);
+static int bev_group_suspend_writing_(struct bufferevent_rate_limit_group *g);
+static void bev_group_unsuspend_reading_(struct bufferevent_rate_limit_group *g);
+static void bev_group_unsuspend_writing_(struct bufferevent_rate_limit_group *g);
+
+/** Helper: figure out the maximum amount we should write if is_write, or
+ the maximum amount we should read if is_read. Return that maximum, or
+ 0 if our bucket is wholly exhausted.
+ */
+static inline ev_ssize_t
+bufferevent_get_rlim_max_(struct bufferevent_private *bev, int is_write)
+{
+ /* needs lock on bev. */
+ ev_ssize_t max_so_far = is_write?bev->max_single_write:bev->max_single_read;
+
+#define LIM(x) \
+ (is_write ? (x).write_limit : (x).read_limit)
+
+#define GROUP_SUSPENDED(g) \
+ (is_write ? (g)->write_suspended : (g)->read_suspended)
+
+ /* Sets max_so_far to MIN(x, max_so_far) */
+#define CLAMPTO(x) \
+ do { \
+ if (max_so_far > (x)) \
+ max_so_far = (x); \
+ } while (0);
+
+ if (!bev->rate_limiting)
+ return max_so_far;
+
+ /* If rate-limiting is enabled at all, update the appropriate
+ bucket, and take the smaller of our rate limit and the group
+ rate limit.
+ */
+
+ if (bev->rate_limiting->cfg) {
+ bufferevent_update_buckets(bev);
+ max_so_far = LIM(bev->rate_limiting->limit);
+ }
+ if (bev->rate_limiting->group) {
+ struct bufferevent_rate_limit_group *g =
+ bev->rate_limiting->group;
+ ev_ssize_t share;
+ LOCK_GROUP(g);
+ if (GROUP_SUSPENDED(g)) {
+ /* We can get here if we failed to lock this
+ * particular bufferevent while suspending the whole
+ * group. */
+ if (is_write)
+ bufferevent_suspend_write_(&bev->bev,
+ BEV_SUSPEND_BW_GROUP);
+ else
+ bufferevent_suspend_read_(&bev->bev,
+ BEV_SUSPEND_BW_GROUP);
+ share = 0;
+ } else {
+ /* XXXX probably we should divide among the active
+ * members, not the total members. */
+ share = LIM(g->rate_limit) / g->n_members;
+ if (share < g->min_share)
+ share = g->min_share;
+ }
+ UNLOCK_GROUP(g);
+ CLAMPTO(share);
+ }
+
+ if (max_so_far < 0)
+ max_so_far = 0;
+ return max_so_far;
+}
+
+ev_ssize_t
+bufferevent_get_read_max_(struct bufferevent_private *bev)
+{
+ return bufferevent_get_rlim_max_(bev, 0);
+}
+
+ev_ssize_t
+bufferevent_get_write_max_(struct bufferevent_private *bev)
+{
+ return bufferevent_get_rlim_max_(bev, 1);
+}
+
+int
+bufferevent_decrement_read_buckets_(struct bufferevent_private *bev, ev_ssize_t bytes)
+{
+ /* XXXXX Make sure all users of this function check its return value */
+ int r = 0;
+ /* need to hold lock on bev */
+ if (!bev->rate_limiting)
+ return 0;
+
+ if (bev->rate_limiting->cfg) {
+ bev->rate_limiting->limit.read_limit -= bytes;
+ if (bev->rate_limiting->limit.read_limit <= 0) {
+ bufferevent_suspend_read_(&bev->bev, BEV_SUSPEND_BW);
+ if (event_add(&bev->rate_limiting->refill_bucket_event,
+ &bev->rate_limiting->cfg->tick_timeout) < 0)
+ r = -1;
+ } else if (bev->read_suspended & BEV_SUSPEND_BW) {
+ if (!(bev->write_suspended & BEV_SUSPEND_BW))
+ event_del(&bev->rate_limiting->refill_bucket_event);
+ bufferevent_unsuspend_read_(&bev->bev, BEV_SUSPEND_BW);
+ }
+ }
+
+ if (bev->rate_limiting->group) {
+ LOCK_GROUP(bev->rate_limiting->group);
+ bev->rate_limiting->group->rate_limit.read_limit -= bytes;
+ bev->rate_limiting->group->total_read += bytes;
+ if (bev->rate_limiting->group->rate_limit.read_limit <= 0) {
+ bev_group_suspend_reading_(bev->rate_limiting->group);
+ } else if (bev->rate_limiting->group->read_suspended) {
+ bev_group_unsuspend_reading_(bev->rate_limiting->group);
+ }
+ UNLOCK_GROUP(bev->rate_limiting->group);
+ }
+
+ return r;
+}
+
+int
+bufferevent_decrement_write_buckets_(struct bufferevent_private *bev, ev_ssize_t bytes)
+{
+ /* XXXXX Make sure all users of this function check its return value */
+ int r = 0;
+ /* need to hold lock */
+ if (!bev->rate_limiting)
+ return 0;
+
+ if (bev->rate_limiting->cfg) {
+ bev->rate_limiting->limit.write_limit -= bytes;
+ if (bev->rate_limiting->limit.write_limit <= 0) {
+ bufferevent_suspend_write_(&bev->bev, BEV_SUSPEND_BW);
+ if (event_add(&bev->rate_limiting->refill_bucket_event,
+ &bev->rate_limiting->cfg->tick_timeout) < 0)
+ r = -1;
+ } else if (bev->write_suspended & BEV_SUSPEND_BW) {
+ if (!(bev->read_suspended & BEV_SUSPEND_BW))
+ event_del(&bev->rate_limiting->refill_bucket_event);
+ bufferevent_unsuspend_write_(&bev->bev, BEV_SUSPEND_BW);
+ }
+ }
+
+ if (bev->rate_limiting->group) {
+ LOCK_GROUP(bev->rate_limiting->group);
+ bev->rate_limiting->group->rate_limit.write_limit -= bytes;
+ bev->rate_limiting->group->total_written += bytes;
+ if (bev->rate_limiting->group->rate_limit.write_limit <= 0) {
+ bev_group_suspend_writing_(bev->rate_limiting->group);
+ } else if (bev->rate_limiting->group->write_suspended) {
+ bev_group_unsuspend_writing_(bev->rate_limiting->group);
+ }
+ UNLOCK_GROUP(bev->rate_limiting->group);
+ }
+
+ return r;
+}
+
+/** Stop reading on every bufferevent in <b>g</b> */
+static int
+bev_group_suspend_reading_(struct bufferevent_rate_limit_group *g)
+{
+ /* Needs group lock */
+ struct bufferevent_private *bev;
+ g->read_suspended = 1;
+ g->pending_unsuspend_read = 0;
+
+ /* Note that in this loop we call EVLOCK_TRY_LOCK_ instead of BEV_LOCK,
+ to prevent a deadlock. (Ordinarily, the group lock nests inside
+ the bufferevent locks. If we are unable to lock any individual
+ bufferevent, it will find out later when it looks at its limit
+ and sees that its group is suspended.)
+ */
+ LIST_FOREACH(bev, &g->members, rate_limiting->next_in_group) {
+ if (EVLOCK_TRY_LOCK_(bev->lock)) {
+ bufferevent_suspend_read_(&bev->bev,
+ BEV_SUSPEND_BW_GROUP);
+ EVLOCK_UNLOCK(bev->lock, 0);
+ }
+ }
+ return 0;
+}
+
+/** Stop writing on every bufferevent in <b>g</b> */
+static int
+bev_group_suspend_writing_(struct bufferevent_rate_limit_group *g)
+{
+ /* Needs group lock */
+ struct bufferevent_private *bev;
+ g->write_suspended = 1;
+ g->pending_unsuspend_write = 0;
+ LIST_FOREACH(bev, &g->members, rate_limiting->next_in_group) {
+ if (EVLOCK_TRY_LOCK_(bev->lock)) {
+ bufferevent_suspend_write_(&bev->bev,
+ BEV_SUSPEND_BW_GROUP);
+ EVLOCK_UNLOCK(bev->lock, 0);
+ }
+ }
+ return 0;
+}
+
+/** Timer callback invoked on a single bufferevent with one or more exhausted
+ buckets when they are ready to refill. */
+static void
+bev_refill_callback_(evutil_socket_t fd, short what, void *arg)
+{
+ unsigned tick;
+ struct timeval now;
+ struct bufferevent_private *bev = arg;
+ int again = 0;
+ BEV_LOCK(&bev->bev);
+ if (!bev->rate_limiting || !bev->rate_limiting->cfg) {
+ BEV_UNLOCK(&bev->bev);
+ return;
+ }
+
+ /* First, update the bucket */
+ event_base_gettimeofday_cached(bev->bev.ev_base, &now);
+ tick = ev_token_bucket_get_tick_(&now,
+ bev->rate_limiting->cfg);
+ ev_token_bucket_update_(&bev->rate_limiting->limit,
+ bev->rate_limiting->cfg,
+ tick);
+
+ /* Now unsuspend any read/write operations as appropriate. */
+ if ((bev->read_suspended & BEV_SUSPEND_BW)) {
+ if (bev->rate_limiting->limit.read_limit > 0)
+ bufferevent_unsuspend_read_(&bev->bev, BEV_SUSPEND_BW);
+ else
+ again = 1;
+ }
+ if ((bev->write_suspended & BEV_SUSPEND_BW)) {
+ if (bev->rate_limiting->limit.write_limit > 0)
+ bufferevent_unsuspend_write_(&bev->bev, BEV_SUSPEND_BW);
+ else
+ again = 1;
+ }
+ if (again) {
+ /* One or more of the buckets may need another refill if they
+ started negative.
+
+ XXXX if we need to be quiet for more ticks, we should
+ maybe figure out what timeout we really want.
+ */
+ /* XXXX Handle event_add failure somehow */
+ event_add(&bev->rate_limiting->refill_bucket_event,
+ &bev->rate_limiting->cfg->tick_timeout);
+ }
+ BEV_UNLOCK(&bev->bev);
+}
+
+/** Helper: grab a random element from a bufferevent group.
+ *
+ * Requires that we hold the lock on the group.
+ */
+static struct bufferevent_private *
+bev_group_random_element_(struct bufferevent_rate_limit_group *group)
+{
+ int which;
+ struct bufferevent_private *bev;
+
+ /* requires group lock */
+
+ if (!group->n_members)
+ return NULL;
+
+ EVUTIL_ASSERT(! LIST_EMPTY(&group->members));
+
+ which = evutil_weakrand_range_(&group->weakrand_seed, group->n_members);
+
+ bev = LIST_FIRST(&group->members);
+ while (which--)
+ bev = LIST_NEXT(bev, rate_limiting->next_in_group);
+
+ return bev;
+}
+
+/** Iterate over the elements of a rate-limiting group 'g' with a random
+ starting point, assigning each to the variable 'bev', and executing the
+ block 'block'.
+
+ We do this in a half-baked effort to get fairness among group members.
+ XXX Round-robin or some kind of priority queue would be even more fair.
+ */
+#define FOREACH_RANDOM_ORDER(block) \
+ do { \
+ first = bev_group_random_element_(g); \
+ for (bev = first; bev != LIST_END(&g->members); \
+ bev = LIST_NEXT(bev, rate_limiting->next_in_group)) { \
+ block ; \
+ } \
+ for (bev = LIST_FIRST(&g->members); bev && bev != first; \
+ bev = LIST_NEXT(bev, rate_limiting->next_in_group)) { \
+ block ; \
+ } \
+ } while (0)
+
+static void
+bev_group_unsuspend_reading_(struct bufferevent_rate_limit_group *g)
+{
+ int again = 0;
+ struct bufferevent_private *bev, *first;
+
+ g->read_suspended = 0;
+ FOREACH_RANDOM_ORDER({
+ if (EVLOCK_TRY_LOCK_(bev->lock)) {
+ bufferevent_unsuspend_read_(&bev->bev,
+ BEV_SUSPEND_BW_GROUP);
+ EVLOCK_UNLOCK(bev->lock, 0);
+ } else {
+ again = 1;
+ }
+ });
+ g->pending_unsuspend_read = again;
+}
+
+static void
+bev_group_unsuspend_writing_(struct bufferevent_rate_limit_group *g)
+{
+ int again = 0;
+ struct bufferevent_private *bev, *first;
+ g->write_suspended = 0;
+
+ FOREACH_RANDOM_ORDER({
+ if (EVLOCK_TRY_LOCK_(bev->lock)) {
+ bufferevent_unsuspend_write_(&bev->bev,
+ BEV_SUSPEND_BW_GROUP);
+ EVLOCK_UNLOCK(bev->lock, 0);
+ } else {
+ again = 1;
+ }
+ });
+ g->pending_unsuspend_write = again;
+}
+
+/** Callback invoked every tick to add more elements to the group bucket
+ and unsuspend group members as needed.
+ */
+static void
+bev_group_refill_callback_(evutil_socket_t fd, short what, void *arg)
+{
+ struct bufferevent_rate_limit_group *g = arg;
+ unsigned tick;
+ struct timeval now;
+
+ event_base_gettimeofday_cached(event_get_base(&g->master_refill_event), &now);
+
+ LOCK_GROUP(g);
+
+ tick = ev_token_bucket_get_tick_(&now, &g->rate_limit_cfg);
+ ev_token_bucket_update_(&g->rate_limit, &g->rate_limit_cfg, tick);
+
+ if (g->pending_unsuspend_read ||
+ (g->read_suspended && (g->rate_limit.read_limit >= g->min_share))) {
+ bev_group_unsuspend_reading_(g);
+ }
+ if (g->pending_unsuspend_write ||
+ (g->write_suspended && (g->rate_limit.write_limit >= g->min_share))){
+ bev_group_unsuspend_writing_(g);
+ }
+
+ /* XXXX Rather than waiting to the next tick to unsuspend stuff
+ * with pending_unsuspend_write/read, we should do it on the
+ * next iteration of the mainloop.
+ */
+
+ UNLOCK_GROUP(g);
+}
+
+int
+bufferevent_set_rate_limit(struct bufferevent *bev,
+ struct ev_token_bucket_cfg *cfg)
+{
+ struct bufferevent_private *bevp =
+ EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
+ int r = -1;
+ struct bufferevent_rate_limit *rlim;
+ struct timeval now;
+ ev_uint32_t tick;
+ int reinit = 0, suspended = 0;
+ /* XXX reference-count cfg */
+
+ BEV_LOCK(bev);
+
+ if (cfg == NULL) {
+ if (bevp->rate_limiting) {
+ rlim = bevp->rate_limiting;
+ rlim->cfg = NULL;
+ bufferevent_unsuspend_read_(bev, BEV_SUSPEND_BW);
+ bufferevent_unsuspend_write_(bev, BEV_SUSPEND_BW);
+ if (event_initialized(&rlim->refill_bucket_event))
+ event_del(&rlim->refill_bucket_event);
+ }
+ r = 0;
+ goto done;
+ }
+
+ event_base_gettimeofday_cached(bev->ev_base, &now);
+ tick = ev_token_bucket_get_tick_(&now, cfg);
+
+ if (bevp->rate_limiting && bevp->rate_limiting->cfg == cfg) {
+ /* no-op */
+ r = 0;
+ goto done;
+ }
+ if (bevp->rate_limiting == NULL) {
+ rlim = mm_calloc(1, sizeof(struct bufferevent_rate_limit));
+ if (!rlim)
+ goto done;
+ bevp->rate_limiting = rlim;
+ } else {
+ rlim = bevp->rate_limiting;
+ }
+ reinit = rlim->cfg != NULL;
+
+ rlim->cfg = cfg;
+ ev_token_bucket_init_(&rlim->limit, cfg, tick, reinit);
+
+ if (reinit) {
+ EVUTIL_ASSERT(event_initialized(&rlim->refill_bucket_event));
+ event_del(&rlim->refill_bucket_event);
+ }
+ event_assign(&rlim->refill_bucket_event, bev->ev_base,
+ -1, EV_FINALIZE, bev_refill_callback_, bevp);
+
+ if (rlim->limit.read_limit > 0) {
+ bufferevent_unsuspend_read_(bev, BEV_SUSPEND_BW);
+ } else {
+ bufferevent_suspend_read_(bev, BEV_SUSPEND_BW);
+ suspended=1;
+ }
+ if (rlim->limit.write_limit > 0) {
+ bufferevent_unsuspend_write_(bev, BEV_SUSPEND_BW);
+ } else {
+ bufferevent_suspend_write_(bev, BEV_SUSPEND_BW);
+ suspended = 1;
+ }
+
+ if (suspended)
+ event_add(&rlim->refill_bucket_event, &cfg->tick_timeout);
+
+ r = 0;
+
+done:
+ BEV_UNLOCK(bev);
+ return r;
+}
+
+struct bufferevent_rate_limit_group *
+bufferevent_rate_limit_group_new(struct event_base *base,
+ const struct ev_token_bucket_cfg *cfg)
+{
+ struct bufferevent_rate_limit_group *g;
+ struct timeval now;
+ ev_uint32_t tick;
+
+ event_base_gettimeofday_cached(base, &now);
+ tick = ev_token_bucket_get_tick_(&now, cfg);
+
+ g = mm_calloc(1, sizeof(struct bufferevent_rate_limit_group));
+ if (!g)
+ return NULL;
+ memcpy(&g->rate_limit_cfg, cfg, sizeof(g->rate_limit_cfg));
+ LIST_INIT(&g->members);
+
+ ev_token_bucket_init_(&g->rate_limit, cfg, tick, 0);
+
+ event_assign(&g->master_refill_event, base, -1, EV_PERSIST|EV_FINALIZE,
+ bev_group_refill_callback_, g);
+ /*XXXX handle event_add failure */
+ event_add(&g->master_refill_event, &cfg->tick_timeout);
+
+ EVTHREAD_ALLOC_LOCK(g->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+
+ bufferevent_rate_limit_group_set_min_share(g, 64);
+
+ evutil_weakrand_seed_(&g->weakrand_seed,
+ (ev_uint32_t) ((now.tv_sec + now.tv_usec) + (ev_intptr_t)g));
+
+ return g;
+}
+
+int
+bufferevent_rate_limit_group_set_cfg(
+ struct bufferevent_rate_limit_group *g,
+ const struct ev_token_bucket_cfg *cfg)
+{
+ int same_tick;
+ if (!g || !cfg)
+ return -1;
+
+ LOCK_GROUP(g);
+ same_tick = evutil_timercmp(
+ &g->rate_limit_cfg.tick_timeout, &cfg->tick_timeout, ==);
+ memcpy(&g->rate_limit_cfg, cfg, sizeof(g->rate_limit_cfg));
+
+ if (g->rate_limit.read_limit > (ev_ssize_t)cfg->read_maximum)
+ g->rate_limit.read_limit = cfg->read_maximum;
+ if (g->rate_limit.write_limit > (ev_ssize_t)cfg->write_maximum)
+ g->rate_limit.write_limit = cfg->write_maximum;
+
+ if (!same_tick) {
+ /* This can cause a hiccup in the schedule */
+ event_add(&g->master_refill_event, &cfg->tick_timeout);
+ }
+
+ /* The new limits might force us to adjust min_share differently. */
+ bufferevent_rate_limit_group_set_min_share(g, g->configured_min_share);
+
+ UNLOCK_GROUP(g);
+ return 0;
+}
+
+int
+bufferevent_rate_limit_group_set_min_share(
+ struct bufferevent_rate_limit_group *g,
+ size_t share)
+{
+ if (share > EV_SSIZE_MAX)
+ return -1;
+
+ g->configured_min_share = share;
+
+ /* Can't set share to less than the one-tick maximum. IOW, at steady
+ * state, at least one connection can go per tick. */
+ if (share > g->rate_limit_cfg.read_rate)
+ share = g->rate_limit_cfg.read_rate;
+ if (share > g->rate_limit_cfg.write_rate)
+ share = g->rate_limit_cfg.write_rate;
+
+ g->min_share = share;
+ return 0;
+}
+
+void
+bufferevent_rate_limit_group_free(struct bufferevent_rate_limit_group *g)
+{
+ LOCK_GROUP(g);
+ EVUTIL_ASSERT(0 == g->n_members);
+ event_del(&g->master_refill_event);
+ UNLOCK_GROUP(g);
+ EVTHREAD_FREE_LOCK(g->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ mm_free(g);
+}
+
+int
+bufferevent_add_to_rate_limit_group(struct bufferevent *bev,
+ struct bufferevent_rate_limit_group *g)
+{
+ int wsuspend, rsuspend;
+ struct bufferevent_private *bevp =
+ EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
+ BEV_LOCK(bev);
+
+ if (!bevp->rate_limiting) {
+ struct bufferevent_rate_limit *rlim;
+ rlim = mm_calloc(1, sizeof(struct bufferevent_rate_limit));
+ if (!rlim) {
+ BEV_UNLOCK(bev);
+ return -1;
+ }
+ event_assign(&rlim->refill_bucket_event, bev->ev_base,
+ -1, EV_FINALIZE, bev_refill_callback_, bevp);
+ bevp->rate_limiting = rlim;
+ }
+
+ if (bevp->rate_limiting->group == g) {
+ BEV_UNLOCK(bev);
+ return 0;
+ }
+ if (bevp->rate_limiting->group)
+ bufferevent_remove_from_rate_limit_group(bev);
+
+ LOCK_GROUP(g);
+ bevp->rate_limiting->group = g;
+ ++g->n_members;
+ LIST_INSERT_HEAD(&g->members, bevp, rate_limiting->next_in_group);
+
+ rsuspend = g->read_suspended;
+ wsuspend = g->write_suspended;
+
+ UNLOCK_GROUP(g);
+
+ if (rsuspend)
+ bufferevent_suspend_read_(bev, BEV_SUSPEND_BW_GROUP);
+ if (wsuspend)
+ bufferevent_suspend_write_(bev, BEV_SUSPEND_BW_GROUP);
+
+ BEV_UNLOCK(bev);
+ return 0;
+}
+
+int
+bufferevent_remove_from_rate_limit_group(struct bufferevent *bev)
+{
+ return bufferevent_remove_from_rate_limit_group_internal_(bev, 1);
+}
+
+int
+bufferevent_remove_from_rate_limit_group_internal_(struct bufferevent *bev,
+ int unsuspend)
+{
+ struct bufferevent_private *bevp =
+ EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
+ BEV_LOCK(bev);
+ if (bevp->rate_limiting && bevp->rate_limiting->group) {
+ struct bufferevent_rate_limit_group *g =
+ bevp->rate_limiting->group;
+ LOCK_GROUP(g);
+ bevp->rate_limiting->group = NULL;
+ --g->n_members;
+ LIST_REMOVE(bevp, rate_limiting->next_in_group);
+ UNLOCK_GROUP(g);
+ }
+ if (unsuspend) {
+ bufferevent_unsuspend_read_(bev, BEV_SUSPEND_BW_GROUP);
+ bufferevent_unsuspend_write_(bev, BEV_SUSPEND_BW_GROUP);
+ }
+ BEV_UNLOCK(bev);
+ return 0;
+}
+
+/* ===
+ * API functions to expose rate limits.
+ *
+ * Don't use these from inside Libevent; they're meant to be for use by
+ * the program.
+ * === */
+
+/* Mostly you don't want to use this function from inside libevent;
+ * bufferevent_get_read_max_() is more likely what you want*/
+ev_ssize_t
+bufferevent_get_read_limit(struct bufferevent *bev)
+{
+ ev_ssize_t r;
+ struct bufferevent_private *bevp;
+ BEV_LOCK(bev);
+ bevp = BEV_UPCAST(bev);
+ if (bevp->rate_limiting && bevp->rate_limiting->cfg) {
+ bufferevent_update_buckets(bevp);
+ r = bevp->rate_limiting->limit.read_limit;
+ } else {
+ r = EV_SSIZE_MAX;
+ }
+ BEV_UNLOCK(bev);
+ return r;
+}
+
+/* Mostly you don't want to use this function from inside libevent;
+ * bufferevent_get_write_max_() is more likely what you want*/
+ev_ssize_t
+bufferevent_get_write_limit(struct bufferevent *bev)
+{
+ ev_ssize_t r;
+ struct bufferevent_private *bevp;
+ BEV_LOCK(bev);
+ bevp = BEV_UPCAST(bev);
+ if (bevp->rate_limiting && bevp->rate_limiting->cfg) {
+ bufferevent_update_buckets(bevp);
+ r = bevp->rate_limiting->limit.write_limit;
+ } else {
+ r = EV_SSIZE_MAX;
+ }
+ BEV_UNLOCK(bev);
+ return r;
+}
+
+int
+bufferevent_set_max_single_read(struct bufferevent *bev, size_t size)
+{
+ struct bufferevent_private *bevp;
+ BEV_LOCK(bev);
+ bevp = BEV_UPCAST(bev);
+ if (size == 0 || size > EV_SSIZE_MAX)
+ bevp->max_single_read = MAX_SINGLE_READ_DEFAULT;
+ else
+ bevp->max_single_read = size;
+ BEV_UNLOCK(bev);
+ return 0;
+}
+
+int
+bufferevent_set_max_single_write(struct bufferevent *bev, size_t size)
+{
+ struct bufferevent_private *bevp;
+ BEV_LOCK(bev);
+ bevp = BEV_UPCAST(bev);
+ if (size == 0 || size > EV_SSIZE_MAX)
+ bevp->max_single_write = MAX_SINGLE_WRITE_DEFAULT;
+ else
+ bevp->max_single_write = size;
+ BEV_UNLOCK(bev);
+ return 0;
+}
+
+ev_ssize_t
+bufferevent_get_max_single_read(struct bufferevent *bev)
+{
+ ev_ssize_t r;
+
+ BEV_LOCK(bev);
+ r = BEV_UPCAST(bev)->max_single_read;
+ BEV_UNLOCK(bev);
+ return r;
+}
+
+ev_ssize_t
+bufferevent_get_max_single_write(struct bufferevent *bev)
+{
+ ev_ssize_t r;
+
+ BEV_LOCK(bev);
+ r = BEV_UPCAST(bev)->max_single_write;
+ BEV_UNLOCK(bev);
+ return r;
+}
+
+ev_ssize_t
+bufferevent_get_max_to_read(struct bufferevent *bev)
+{
+ ev_ssize_t r;
+ BEV_LOCK(bev);
+ r = bufferevent_get_read_max_(BEV_UPCAST(bev));
+ BEV_UNLOCK(bev);
+ return r;
+}
+
+ev_ssize_t
+bufferevent_get_max_to_write(struct bufferevent *bev)
+{
+ ev_ssize_t r;
+ BEV_LOCK(bev);
+ r = bufferevent_get_write_max_(BEV_UPCAST(bev));
+ BEV_UNLOCK(bev);
+ return r;
+}
+
+const struct ev_token_bucket_cfg *
+bufferevent_get_token_bucket_cfg(const struct bufferevent *bev) {
+ struct bufferevent_private *bufev_private = BEV_UPCAST(bev);
+ struct ev_token_bucket_cfg *cfg;
+
+ BEV_LOCK(bev);
+
+ if (bufev_private->rate_limiting) {
+ cfg = bufev_private->rate_limiting->cfg;
+ } else {
+ cfg = NULL;
+ }
+
+ BEV_UNLOCK(bev);
+
+ return cfg;
+}
+
+/* Mostly you don't want to use this function from inside libevent;
+ * bufferevent_get_read_max_() is more likely what you want*/
+ev_ssize_t
+bufferevent_rate_limit_group_get_read_limit(
+ struct bufferevent_rate_limit_group *grp)
+{
+ ev_ssize_t r;
+ LOCK_GROUP(grp);
+ r = grp->rate_limit.read_limit;
+ UNLOCK_GROUP(grp);
+ return r;
+}
+
+/* Mostly you don't want to use this function from inside libevent;
+ * bufferevent_get_write_max_() is more likely what you want. */
+ev_ssize_t
+bufferevent_rate_limit_group_get_write_limit(
+ struct bufferevent_rate_limit_group *grp)
+{
+ ev_ssize_t r;
+ LOCK_GROUP(grp);
+ r = grp->rate_limit.write_limit;
+ UNLOCK_GROUP(grp);
+ return r;
+}
+
+int
+bufferevent_decrement_read_limit(struct bufferevent *bev, ev_ssize_t decr)
+{
+ int r = 0;
+ ev_ssize_t old_limit, new_limit;
+ struct bufferevent_private *bevp;
+ BEV_LOCK(bev);
+ bevp = BEV_UPCAST(bev);
+ EVUTIL_ASSERT(bevp->rate_limiting && bevp->rate_limiting->cfg);
+ old_limit = bevp->rate_limiting->limit.read_limit;
+
+ new_limit = (bevp->rate_limiting->limit.read_limit -= decr);
+ if (old_limit > 0 && new_limit <= 0) {
+ bufferevent_suspend_read_(bev, BEV_SUSPEND_BW);
+ if (event_add(&bevp->rate_limiting->refill_bucket_event,
+ &bevp->rate_limiting->cfg->tick_timeout) < 0)
+ r = -1;
+ } else if (old_limit <= 0 && new_limit > 0) {
+ if (!(bevp->write_suspended & BEV_SUSPEND_BW))
+ event_del(&bevp->rate_limiting->refill_bucket_event);
+ bufferevent_unsuspend_read_(bev, BEV_SUSPEND_BW);
+ }
+
+ BEV_UNLOCK(bev);
+ return r;
+}
+
+int
+bufferevent_decrement_write_limit(struct bufferevent *bev, ev_ssize_t decr)
+{
+ /* XXXX this is mostly copy-and-paste from
+ * bufferevent_decrement_read_limit */
+ int r = 0;
+ ev_ssize_t old_limit, new_limit;
+ struct bufferevent_private *bevp;
+ BEV_LOCK(bev);
+ bevp = BEV_UPCAST(bev);
+ EVUTIL_ASSERT(bevp->rate_limiting && bevp->rate_limiting->cfg);
+ old_limit = bevp->rate_limiting->limit.write_limit;
+
+ new_limit = (bevp->rate_limiting->limit.write_limit -= decr);
+ if (old_limit > 0 && new_limit <= 0) {
+ bufferevent_suspend_write_(bev, BEV_SUSPEND_BW);
+ if (event_add(&bevp->rate_limiting->refill_bucket_event,
+ &bevp->rate_limiting->cfg->tick_timeout) < 0)
+ r = -1;
+ } else if (old_limit <= 0 && new_limit > 0) {
+ if (!(bevp->read_suspended & BEV_SUSPEND_BW))
+ event_del(&bevp->rate_limiting->refill_bucket_event);
+ bufferevent_unsuspend_write_(bev, BEV_SUSPEND_BW);
+ }
+
+ BEV_UNLOCK(bev);
+ return r;
+}
+
+int
+bufferevent_rate_limit_group_decrement_read(
+ struct bufferevent_rate_limit_group *grp, ev_ssize_t decr)
+{
+ int r = 0;
+ ev_ssize_t old_limit, new_limit;
+ LOCK_GROUP(grp);
+ old_limit = grp->rate_limit.read_limit;
+ new_limit = (grp->rate_limit.read_limit -= decr);
+
+ if (old_limit > 0 && new_limit <= 0) {
+ bev_group_suspend_reading_(grp);
+ } else if (old_limit <= 0 && new_limit > 0) {
+ bev_group_unsuspend_reading_(grp);
+ }
+
+ UNLOCK_GROUP(grp);
+ return r;
+}
+
+int
+bufferevent_rate_limit_group_decrement_write(
+ struct bufferevent_rate_limit_group *grp, ev_ssize_t decr)
+{
+ int r = 0;
+ ev_ssize_t old_limit, new_limit;
+ LOCK_GROUP(grp);
+ old_limit = grp->rate_limit.write_limit;
+ new_limit = (grp->rate_limit.write_limit -= decr);
+
+ if (old_limit > 0 && new_limit <= 0) {
+ bev_group_suspend_writing_(grp);
+ } else if (old_limit <= 0 && new_limit > 0) {
+ bev_group_unsuspend_writing_(grp);
+ }
+
+ UNLOCK_GROUP(grp);
+ return r;
+}
+
+void
+bufferevent_rate_limit_group_get_totals(struct bufferevent_rate_limit_group *grp,
+ ev_uint64_t *total_read_out, ev_uint64_t *total_written_out)
+{
+ EVUTIL_ASSERT(grp != NULL);
+ if (total_read_out)
+ *total_read_out = grp->total_read;
+ if (total_written_out)
+ *total_written_out = grp->total_written;
+}
+
+void
+bufferevent_rate_limit_group_reset_totals(struct bufferevent_rate_limit_group *grp)
+{
+ grp->total_read = grp->total_written = 0;
+}
+
+int
+bufferevent_ratelim_init_(struct bufferevent_private *bev)
+{
+ bev->rate_limiting = NULL;
+ bev->max_single_read = MAX_SINGLE_READ_DEFAULT;
+ bev->max_single_write = MAX_SINGLE_WRITE_DEFAULT;
+
+ return 0;
+}
diff --git a/libs/libevent/src/bufferevent_sock.c b/libs/libevent/src/bufferevent_sock.c
new file mode 100644
index 0000000000..a2b381ac4d
--- /dev/null
+++ b/libs/libevent/src/bufferevent_sock.c
@@ -0,0 +1,707 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ * Copyright (c) 2002-2006 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <sys/types.h>
+
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef EVENT__HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#endif
+
+#ifdef EVENT__HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#ifdef EVENT__HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#endif
+#ifdef EVENT__HAVE_NETINET_IN6_H
+#include <netinet/in6.h>
+#endif
+
+#include "event2/util.h"
+#include "event2/bufferevent.h"
+#include "event2/buffer.h"
+#include "event2/bufferevent_struct.h"
+#include "event2/bufferevent_compat.h"
+#include "event2/event.h"
+#include "log-internal.h"
+#include "mm-internal.h"
+#include "bufferevent-internal.h"
+#include "util-internal.h"
+#ifdef _WIN32
+#include "iocp-internal.h"
+#endif
+
+/* prototypes */
+static int be_socket_enable(struct bufferevent *, short);
+static int be_socket_disable(struct bufferevent *, short);
+static void be_socket_destruct(struct bufferevent *);
+static int be_socket_flush(struct bufferevent *, short, enum bufferevent_flush_mode);
+static int be_socket_ctrl(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
+
+static void be_socket_setfd(struct bufferevent *, evutil_socket_t);
+
+const struct bufferevent_ops bufferevent_ops_socket = {
+ "socket",
+ evutil_offsetof(struct bufferevent_private, bev),
+ be_socket_enable,
+ be_socket_disable,
+ NULL, /* unlink */
+ be_socket_destruct,
+ bufferevent_generic_adj_existing_timeouts_,
+ be_socket_flush,
+ be_socket_ctrl,
+};
+
+const struct sockaddr*
+bufferevent_socket_get_conn_address_(struct bufferevent *bev)
+{
+ struct bufferevent_private *bev_p =
+ EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
+
+ return (struct sockaddr *)&bev_p->conn_address;
+}
+static void
+bufferevent_socket_set_conn_address_fd(struct bufferevent_private *bev_p, int fd)
+{
+ socklen_t len = sizeof(bev_p->conn_address);
+
+ struct sockaddr *addr = (struct sockaddr *)&bev_p->conn_address;
+ if (addr->sa_family != AF_UNSPEC)
+ getpeername(fd, addr, &len);
+}
+static void
+bufferevent_socket_set_conn_address(struct bufferevent_private *bev_p,
+ struct sockaddr *addr, size_t addrlen)
+{
+ EVUTIL_ASSERT(addrlen <= sizeof(bev_p->conn_address));
+ memcpy(&bev_p->conn_address, addr, addrlen);
+}
+
+static void
+bufferevent_socket_outbuf_cb(struct evbuffer *buf,
+ const struct evbuffer_cb_info *cbinfo,
+ void *arg)
+{
+ struct bufferevent *bufev = arg;
+ struct bufferevent_private *bufev_p =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+
+ if (cbinfo->n_added &&
+ (bufev->enabled & EV_WRITE) &&
+ !event_pending(&bufev->ev_write, EV_WRITE, NULL) &&
+ !bufev_p->write_suspended) {
+ /* Somebody added data to the buffer, and we would like to
+ * write, and we were not writing. So, start writing. */
+ if (bufferevent_add_event_(&bufev->ev_write, &bufev->timeout_write) == -1) {
+ /* Should we log this? */
+ }
+ }
+}
+
+static void
+bufferevent_readcb(evutil_socket_t fd, short event, void *arg)
+{
+ struct bufferevent *bufev = arg;
+ struct bufferevent_private *bufev_p =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ struct evbuffer *input;
+ int res = 0;
+ short what = BEV_EVENT_READING;
+ ev_ssize_t howmuch = -1, readmax=-1;
+
+ bufferevent_incref_and_lock_(bufev);
+
+ if (event == EV_TIMEOUT) {
+ /* Note that we only check for event==EV_TIMEOUT. If
+ * event==EV_TIMEOUT|EV_READ, we can safely ignore the
+ * timeout, since a read has occurred */
+ what |= BEV_EVENT_TIMEOUT;
+ goto error;
+ }
+
+ input = bufev->input;
+
+ /*
+ * If we have a high watermark configured then we don't want to
+ * read more data than would make us reach the watermark.
+ */
+ if (bufev->wm_read.high != 0) {
+ howmuch = bufev->wm_read.high - evbuffer_get_length(input);
+ /* we somehow lowered the watermark, stop reading */
+ if (howmuch <= 0) {
+ bufferevent_wm_suspend_read(bufev);
+ goto done;
+ }
+ }
+ readmax = bufferevent_get_read_max_(bufev_p);
+ if (howmuch < 0 || howmuch > readmax) /* The use of -1 for "unlimited"
+ * uglifies this code. XXXX */
+ howmuch = readmax;
+ if (bufev_p->read_suspended)
+ goto done;
+
+ evbuffer_unfreeze(input, 0);
+ res = evbuffer_read(input, fd, (int)howmuch); /* XXXX evbuffer_read would do better to take and return ev_ssize_t */
+ evbuffer_freeze(input, 0);
+
+ if (res == -1) {
+ int err = evutil_socket_geterror(fd);
+ if (EVUTIL_ERR_RW_RETRIABLE(err))
+ goto reschedule;
+ /* error case */
+ what |= BEV_EVENT_ERROR;
+ } else if (res == 0) {
+ /* eof case */
+ what |= BEV_EVENT_EOF;
+ }
+
+ if (res <= 0)
+ goto error;
+
+ bufferevent_decrement_read_buckets_(bufev_p, res);
+
+ /* Invoke the user callback - must always be called last */
+ bufferevent_trigger_nolock_(bufev, EV_READ, 0);
+
+ goto done;
+
+ reschedule:
+ goto done;
+
+ error:
+ bufferevent_disable(bufev, EV_READ);
+ bufferevent_run_eventcb_(bufev, what, 0);
+
+ done:
+ bufferevent_decref_and_unlock_(bufev);
+}
+
+static void
+bufferevent_writecb(evutil_socket_t fd, short event, void *arg)
+{
+ struct bufferevent *bufev = arg;
+ struct bufferevent_private *bufev_p =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ int res = 0;
+ short what = BEV_EVENT_WRITING;
+ int connected = 0;
+ ev_ssize_t atmost = -1;
+
+ bufferevent_incref_and_lock_(bufev);
+
+ if (event == EV_TIMEOUT) {
+ /* Note that we only check for event==EV_TIMEOUT. If
+ * event==EV_TIMEOUT|EV_WRITE, we can safely ignore the
+ * timeout, since a read has occurred */
+ what |= BEV_EVENT_TIMEOUT;
+ goto error;
+ }
+ if (bufev_p->connecting) {
+ int c = evutil_socket_finished_connecting_(fd);
+ /* we need to fake the error if the connection was refused
+ * immediately - usually connection to localhost on BSD */
+ if (bufev_p->connection_refused) {
+ bufev_p->connection_refused = 0;
+ c = -1;
+ }
+
+ if (c == 0)
+ goto done;
+
+ bufev_p->connecting = 0;
+ if (c < 0) {
+ event_del(&bufev->ev_write);
+ event_del(&bufev->ev_read);
+ bufferevent_run_eventcb_(bufev, BEV_EVENT_ERROR, 0);
+ goto done;
+ } else {
+ connected = 1;
+ bufferevent_socket_set_conn_address_fd(bufev_p, fd);
+#ifdef _WIN32
+ if (BEV_IS_ASYNC(bufev)) {
+ event_del(&bufev->ev_write);
+ bufferevent_async_set_connected_(bufev);
+ bufferevent_run_eventcb_(bufev,
+ BEV_EVENT_CONNECTED, 0);
+ goto done;
+ }
+#endif
+ bufferevent_run_eventcb_(bufev,
+ BEV_EVENT_CONNECTED, 0);
+ if (!(bufev->enabled & EV_WRITE) ||
+ bufev_p->write_suspended) {
+ event_del(&bufev->ev_write);
+ goto done;
+ }
+ }
+ }
+
+ atmost = bufferevent_get_write_max_(bufev_p);
+
+ if (bufev_p->write_suspended)
+ goto done;
+
+ if (evbuffer_get_length(bufev->output)) {
+ evbuffer_unfreeze(bufev->output, 1);
+ res = evbuffer_write_atmost(bufev->output, fd, atmost);
+ evbuffer_freeze(bufev->output, 1);
+ if (res == -1) {
+ int err = evutil_socket_geterror(fd);
+ if (EVUTIL_ERR_RW_RETRIABLE(err))
+ goto reschedule;
+ what |= BEV_EVENT_ERROR;
+ } else if (res == 0) {
+ /* eof case
+ XXXX Actually, a 0 on write doesn't indicate
+ an EOF. An ECONNRESET might be more typical.
+ */
+ what |= BEV_EVENT_EOF;
+ }
+ if (res <= 0)
+ goto error;
+
+ bufferevent_decrement_write_buckets_(bufev_p, res);
+ }
+
+ if (evbuffer_get_length(bufev->output) == 0) {
+ event_del(&bufev->ev_write);
+ }
+
+ /*
+ * Invoke the user callback if our buffer is drained or below the
+ * low watermark.
+ */
+ if (res || !connected) {
+ bufferevent_trigger_nolock_(bufev, EV_WRITE, 0);
+ }
+
+ goto done;
+
+ reschedule:
+ if (evbuffer_get_length(bufev->output) == 0) {
+ event_del(&bufev->ev_write);
+ }
+ goto done;
+
+ error:
+ bufferevent_disable(bufev, EV_WRITE);
+ bufferevent_run_eventcb_(bufev, what, 0);
+
+ done:
+ bufferevent_decref_and_unlock_(bufev);
+}
+
+struct bufferevent *
+bufferevent_socket_new(struct event_base *base, evutil_socket_t fd,
+ int options)
+{
+ struct bufferevent_private *bufev_p;
+ struct bufferevent *bufev;
+
+#ifdef _WIN32
+ if (base && event_base_get_iocp_(base))
+ return bufferevent_async_new_(base, fd, options);
+#endif
+
+ if ((bufev_p = mm_calloc(1, sizeof(struct bufferevent_private)))== NULL)
+ return NULL;
+
+ if (bufferevent_init_common_(bufev_p, base, &bufferevent_ops_socket,
+ options) < 0) {
+ mm_free(bufev_p);
+ return NULL;
+ }
+ bufev = &bufev_p->bev;
+ evbuffer_set_flags(bufev->output, EVBUFFER_FLAG_DRAINS_TO_FD);
+
+ event_assign(&bufev->ev_read, bufev->ev_base, fd,
+ EV_READ|EV_PERSIST|EV_FINALIZE, bufferevent_readcb, bufev);
+ event_assign(&bufev->ev_write, bufev->ev_base, fd,
+ EV_WRITE|EV_PERSIST|EV_FINALIZE, bufferevent_writecb, bufev);
+
+ evbuffer_add_cb(bufev->output, bufferevent_socket_outbuf_cb, bufev);
+
+ evbuffer_freeze(bufev->input, 0);
+ evbuffer_freeze(bufev->output, 1);
+
+ return bufev;
+}
+
+int
+bufferevent_socket_connect(struct bufferevent *bev,
+ const struct sockaddr *sa, int socklen)
+{
+ struct bufferevent_private *bufev_p =
+ EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
+
+ evutil_socket_t fd;
+ int r = 0;
+ int result=-1;
+ int ownfd = 0;
+
+ bufferevent_incref_and_lock_(bev);
+
+ if (!bufev_p)
+ goto done;
+
+ fd = bufferevent_getfd(bev);
+ if (fd < 0) {
+ if (!sa)
+ goto done;
+ fd = evutil_socket_(sa->sa_family,
+ SOCK_STREAM|EVUTIL_SOCK_NONBLOCK, 0);
+ if (fd < 0)
+ goto done;
+ ownfd = 1;
+ }
+ if (sa) {
+#ifdef _WIN32
+ if (bufferevent_async_can_connect_(bev)) {
+ bufferevent_setfd(bev, fd);
+ r = bufferevent_async_connect_(bev, fd, sa, socklen);
+ if (r < 0)
+ goto freesock;
+ bufev_p->connecting = 1;
+ result = 0;
+ goto done;
+ } else
+#endif
+ r = evutil_socket_connect_(&fd, sa, socklen);
+ if (r < 0)
+ goto freesock;
+ }
+#ifdef _WIN32
+ /* ConnectEx() isn't always around, even when IOCP is enabled.
+ * Here, we borrow the socket object's write handler to fall back
+ * on a non-blocking connect() when ConnectEx() is unavailable. */
+ if (BEV_IS_ASYNC(bev)) {
+ event_assign(&bev->ev_write, bev->ev_base, fd,
+ EV_WRITE|EV_PERSIST|EV_FINALIZE, bufferevent_writecb, bev);
+ }
+#endif
+ bufferevent_setfd(bev, fd);
+ if (r == 0) {
+ if (! be_socket_enable(bev, EV_WRITE)) {
+ bufev_p->connecting = 1;
+ result = 0;
+ goto done;
+ }
+ } else if (r == 1) {
+ /* The connect succeeded already. How very BSD of it. */
+ result = 0;
+ bufev_p->connecting = 1;
+ event_active(&bev->ev_write, EV_WRITE, 1);
+ } else {
+ /* The connect failed already. How very BSD of it. */
+ bufev_p->connection_refused = 1;
+ bufev_p->connecting = 1;
+ result = 0;
+ event_active(&bev->ev_write, EV_WRITE, 1);
+ }
+
+ goto done;
+
+freesock:
+ bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR, 0);
+ if (ownfd)
+ evutil_closesocket(fd);
+ /* do something about the error? */
+done:
+ bufferevent_decref_and_unlock_(bev);
+ return result;
+}
+
+static void
+bufferevent_connect_getaddrinfo_cb(int result, struct evutil_addrinfo *ai,
+ void *arg)
+{
+ struct bufferevent *bev = arg;
+ struct bufferevent_private *bev_p =
+ EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
+ int r;
+ BEV_LOCK(bev);
+
+ bufferevent_unsuspend_write_(bev, BEV_SUSPEND_LOOKUP);
+ bufferevent_unsuspend_read_(bev, BEV_SUSPEND_LOOKUP);
+
+ if (result != 0) {
+ bev_p->dns_error = result;
+ bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR, 0);
+ bufferevent_decref_and_unlock_(bev);
+ if (ai)
+ evutil_freeaddrinfo(ai);
+ return;
+ }
+
+ /* XXX use the other addrinfos? */
+ /* XXX use this return value */
+ bufferevent_socket_set_conn_address(bev_p, ai->ai_addr, (int)ai->ai_addrlen);
+ r = bufferevent_socket_connect(bev, ai->ai_addr, (int)ai->ai_addrlen);
+ (void)r;
+ bufferevent_decref_and_unlock_(bev);
+ evutil_freeaddrinfo(ai);
+}
+
+int
+bufferevent_socket_connect_hostname(struct bufferevent *bev,
+ struct evdns_base *evdns_base, int family, const char *hostname, int port)
+{
+ char portbuf[10];
+ struct evutil_addrinfo hint;
+ int err;
+ struct bufferevent_private *bev_p =
+ EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
+
+ if (family != AF_INET && family != AF_INET6 && family != AF_UNSPEC)
+ return -1;
+ if (port < 1 || port > 65535)
+ return -1;
+
+ memset(&hint, 0, sizeof(hint));
+ hint.ai_family = family;
+ hint.ai_protocol = IPPROTO_TCP;
+ hint.ai_socktype = SOCK_STREAM;
+
+ evutil_snprintf(portbuf, sizeof(portbuf), "%d", port);
+
+ BEV_LOCK(bev);
+ bev_p->dns_error = 0;
+
+ bufferevent_suspend_write_(bev, BEV_SUSPEND_LOOKUP);
+ bufferevent_suspend_read_(bev, BEV_SUSPEND_LOOKUP);
+
+ bufferevent_incref_(bev);
+ err = evutil_getaddrinfo_async_(evdns_base, hostname, portbuf,
+ &hint, bufferevent_connect_getaddrinfo_cb, bev);
+ BEV_UNLOCK(bev);
+
+ if (err == 0) {
+ return 0;
+ } else {
+ bufferevent_unsuspend_write_(bev, BEV_SUSPEND_LOOKUP);
+ bufferevent_unsuspend_read_(bev, BEV_SUSPEND_LOOKUP);
+ bufferevent_decref_(bev);
+ return -1;
+ }
+}
+
+int
+bufferevent_socket_get_dns_error(struct bufferevent *bev)
+{
+ int rv;
+ struct bufferevent_private *bev_p =
+ EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
+
+ BEV_LOCK(bev);
+ rv = bev_p->dns_error;
+ BEV_UNLOCK(bev);
+
+ return rv;
+}
+
+/*
+ * Create a new buffered event object.
+ *
+ * The read callback is invoked whenever we read new data.
+ * The write callback is invoked whenever the output buffer is drained.
+ * The error callback is invoked on a write/read error or on EOF.
+ *
+ * Both read and write callbacks maybe NULL. The error callback is not
+ * allowed to be NULL and have to be provided always.
+ */
+
+struct bufferevent *
+bufferevent_new(evutil_socket_t fd,
+ bufferevent_data_cb readcb, bufferevent_data_cb writecb,
+ bufferevent_event_cb eventcb, void *cbarg)
+{
+ struct bufferevent *bufev;
+
+ if (!(bufev = bufferevent_socket_new(NULL, fd, 0)))
+ return NULL;
+
+ bufferevent_setcb(bufev, readcb, writecb, eventcb, cbarg);
+
+ return bufev;
+}
+
+
+static int
+be_socket_enable(struct bufferevent *bufev, short event)
+{
+ if (event & EV_READ &&
+ bufferevent_add_event_(&bufev->ev_read, &bufev->timeout_read) == -1)
+ return -1;
+ if (event & EV_WRITE &&
+ bufferevent_add_event_(&bufev->ev_write, &bufev->timeout_write) == -1)
+ return -1;
+ return 0;
+}
+
+static int
+be_socket_disable(struct bufferevent *bufev, short event)
+{
+ struct bufferevent_private *bufev_p =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ if (event & EV_READ) {
+ if (event_del(&bufev->ev_read) == -1)
+ return -1;
+ }
+ /* Don't actually disable the write if we are trying to connect. */
+ if ((event & EV_WRITE) && ! bufev_p->connecting) {
+ if (event_del(&bufev->ev_write) == -1)
+ return -1;
+ }
+ return 0;
+}
+
+static void
+be_socket_destruct(struct bufferevent *bufev)
+{
+ struct bufferevent_private *bufev_p =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+ evutil_socket_t fd;
+ EVUTIL_ASSERT(bufev->be_ops == &bufferevent_ops_socket);
+
+ fd = event_get_fd(&bufev->ev_read);
+
+ if ((bufev_p->options & BEV_OPT_CLOSE_ON_FREE) && fd >= 0)
+ EVUTIL_CLOSESOCKET(fd);
+}
+
+static int
+be_socket_flush(struct bufferevent *bev, short iotype,
+ enum bufferevent_flush_mode mode)
+{
+ return 0;
+}
+
+
+static void
+be_socket_setfd(struct bufferevent *bufev, evutil_socket_t fd)
+{
+ BEV_LOCK(bufev);
+ EVUTIL_ASSERT(bufev->be_ops == &bufferevent_ops_socket);
+
+ event_del(&bufev->ev_read);
+ event_del(&bufev->ev_write);
+
+ event_assign(&bufev->ev_read, bufev->ev_base, fd,
+ EV_READ|EV_PERSIST|EV_FINALIZE, bufferevent_readcb, bufev);
+ event_assign(&bufev->ev_write, bufev->ev_base, fd,
+ EV_WRITE|EV_PERSIST|EV_FINALIZE, bufferevent_writecb, bufev);
+
+ if (fd >= 0)
+ bufferevent_enable(bufev, bufev->enabled);
+
+ BEV_UNLOCK(bufev);
+}
+
+/* XXXX Should non-socket bufferevents support this? */
+int
+bufferevent_priority_set(struct bufferevent *bufev, int priority)
+{
+ int r = -1;
+ struct bufferevent_private *bufev_p =
+ EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
+
+ BEV_LOCK(bufev);
+ if (bufev->be_ops != &bufferevent_ops_socket)
+ goto done;
+
+ if (event_priority_set(&bufev->ev_read, priority) == -1)
+ goto done;
+ if (event_priority_set(&bufev->ev_write, priority) == -1)
+ goto done;
+
+ event_deferred_cb_set_priority_(&bufev_p->deferred, priority);
+
+ r = 0;
+done:
+ BEV_UNLOCK(bufev);
+ return r;
+}
+
+/* XXXX Should non-socket bufferevents support this? */
+int
+bufferevent_base_set(struct event_base *base, struct bufferevent *bufev)
+{
+ int res = -1;
+
+ BEV_LOCK(bufev);
+ if (bufev->be_ops != &bufferevent_ops_socket)
+ goto done;
+
+ bufev->ev_base = base;
+
+ res = event_base_set(base, &bufev->ev_read);
+ if (res == -1)
+ goto done;
+
+ res = event_base_set(base, &bufev->ev_write);
+done:
+ BEV_UNLOCK(bufev);
+ return res;
+}
+
+static int
+be_socket_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op,
+ union bufferevent_ctrl_data *data)
+{
+ switch (op) {
+ case BEV_CTRL_SET_FD:
+ be_socket_setfd(bev, data->fd);
+ return 0;
+ case BEV_CTRL_GET_FD:
+ data->fd = event_get_fd(&bev->ev_read);
+ return 0;
+ case BEV_CTRL_GET_UNDERLYING:
+ case BEV_CTRL_CANCEL_ALL:
+ default:
+ return -1;
+ }
+}
diff --git a/libs/libevent/src/changelist-internal.h b/libs/libevent/src/changelist-internal.h
new file mode 100644
index 0000000000..98fc52aebf
--- /dev/null
+++ b/libs/libevent/src/changelist-internal.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef CHANGELIST_INTERNAL_H_INCLUDED_
+#define CHANGELIST_INTERNAL_H_INCLUDED_
+
+/*
+ A "changelist" is a list of all the fd status changes that should be made
+ between calls to the backend's dispatch function. There are a few reasons
+ that a backend would want to queue changes like this rather than processing
+ them immediately.
+
+ 1) Sometimes applications will add and delete the same event more than
+ once between calls to dispatch. Processing these changes immediately
+ is needless, and potentially expensive (especially if we're on a system
+ that makes one syscall per changed event).
+
+ 2) Sometimes we can coalesce multiple changes on the same fd into a single
+ syscall if we know about them in advance. For example, epoll can do an
+ add and a delete at the same time, but only if we have found out about
+ both of them before we tell epoll.
+
+ 3) Sometimes adding an event that we immediately delete can cause
+ unintended consequences: in kqueue, this makes pending events get
+ reported spuriously.
+ */
+
+#include "event2/util.h"
+
+/** Represents a */
+struct event_change {
+ /** The fd or signal whose events are to be changed */
+ evutil_socket_t fd;
+ /* The events that were enabled on the fd before any of these changes
+ were made. May include EV_READ or EV_WRITE. */
+ short old_events;
+
+ /* The changes that we want to make in reading and writing on this fd.
+ * If this is a signal, then read_change has EV_CHANGE_SIGNAL set,
+ * and write_change is unused. */
+ ev_uint8_t read_change;
+ ev_uint8_t write_change;
+ ev_uint8_t close_change;
+};
+
+/* Flags for read_change and write_change. */
+
+/* If set, add the event. */
+#define EV_CHANGE_ADD 0x01
+/* If set, delete the event. Exclusive with EV_CHANGE_ADD */
+#define EV_CHANGE_DEL 0x02
+/* If set, this event refers a signal, not an fd. */
+#define EV_CHANGE_SIGNAL EV_SIGNAL
+/* Set for persistent events. Currently not used. */
+#define EV_CHANGE_PERSIST EV_PERSIST
+/* Set for adding edge-triggered events. */
+#define EV_CHANGE_ET EV_ET
+
+/* The value of fdinfo_size that a backend should use if it is letting
+ * changelist handle its add and delete functions. */
+#define EVENT_CHANGELIST_FDINFO_SIZE sizeof(int)
+
+/** Set up the data fields in a changelist. */
+void event_changelist_init_(struct event_changelist *changelist);
+/** Remove every change in the changelist, and make corresponding changes
+ * in the event maps in the base. This function is generally used right
+ * after making all the changes in the changelist. */
+void event_changelist_remove_all_(struct event_changelist *changelist,
+ struct event_base *base);
+/** Free all memory held in a changelist. */
+void event_changelist_freemem_(struct event_changelist *changelist);
+
+/** Implementation of eventop_add that queues the event in a changelist. */
+int event_changelist_add_(struct event_base *base, evutil_socket_t fd, short old, short events,
+ void *p);
+/** Implementation of eventop_del that queues the event in a changelist. */
+int event_changelist_del_(struct event_base *base, evutil_socket_t fd, short old, short events,
+ void *p);
+
+#endif
diff --git a/libs/libevent/src/compat/sys/queue.h b/libs/libevent/src/compat/sys/queue.h
new file mode 100644
index 0000000000..c387bdcf50
--- /dev/null
+++ b/libs/libevent/src/compat/sys/queue.h
@@ -0,0 +1,488 @@
+/* $OpenBSD: queue.h,v 1.16 2000/09/07 19:47:59 art Exp $ */
+/* $NetBSD: queue.h,v 1.11 1996/05/16 05:17:14 mycroft Exp $ */
+
+/*
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ */
+
+#ifndef SYS_QUEUE_H__
+#define SYS_QUEUE_H__
+
+/*
+ * This file defines five types of data structures: singly-linked lists,
+ * lists, simple queues, tail queues, and circular queues.
+ *
+ *
+ * A singly-linked list is headed by a single forward pointer. The elements
+ * are singly linked for minimum space and pointer manipulation overhead at
+ * the expense of O(n) removal for arbitrary elements. New elements can be
+ * added to the list after an existing element or at the head of the list.
+ * Elements being removed from the head of the list should use the explicit
+ * macro for this purpose for optimum efficiency. A singly-linked list may
+ * only be traversed in the forward direction. Singly-linked lists are ideal
+ * for applications with large datasets and few or no removals or for
+ * implementing a LIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A simple queue is headed by a pair of pointers, one the head of the
+ * list and the other to the tail of the list. The elements are singly
+ * linked to save space, so elements can only be removed from the
+ * head of the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the
+ * list. A simple queue may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * A circle queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the list.
+ * A circle queue may be traversed in either direction, but has a more
+ * complex end of list detection.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+/*
+ * Singly-linked List definitions.
+ */
+#define SLIST_HEAD(name, type) \
+struct name { \
+ struct type *slh_first; /* first element */ \
+}
+
+#define SLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#ifndef _WIN32
+#define SLIST_ENTRY(type) \
+struct { \
+ struct type *sle_next; /* next element */ \
+}
+#endif
+
+/*
+ * Singly-linked List access methods.
+ */
+#define SLIST_FIRST(head) ((head)->slh_first)
+#define SLIST_END(head) NULL
+#define SLIST_EMPTY(head) (SLIST_FIRST(head) == SLIST_END(head))
+#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
+
+#define SLIST_FOREACH(var, head, field) \
+ for((var) = SLIST_FIRST(head); \
+ (var) != SLIST_END(head); \
+ (var) = SLIST_NEXT(var, field))
+
+/*
+ * Singly-linked List functions.
+ */
+#define SLIST_INIT(head) { \
+ SLIST_FIRST(head) = SLIST_END(head); \
+}
+
+#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
+ (elm)->field.sle_next = (slistelm)->field.sle_next; \
+ (slistelm)->field.sle_next = (elm); \
+} while (0)
+
+#define SLIST_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.sle_next = (head)->slh_first; \
+ (head)->slh_first = (elm); \
+} while (0)
+
+#define SLIST_REMOVE_HEAD(head, field) do { \
+ (head)->slh_first = (head)->slh_first->field.sle_next; \
+} while (0)
+
+/*
+ * List definitions.
+ */
+#define LIST_HEAD(name, type) \
+struct name { \
+ struct type *lh_first; /* first element */ \
+}
+
+#define LIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define LIST_ENTRY(type) \
+struct { \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
+}
+
+/*
+ * List access methods
+ */
+#define LIST_FIRST(head) ((head)->lh_first)
+#define LIST_END(head) NULL
+#define LIST_EMPTY(head) (LIST_FIRST(head) == LIST_END(head))
+#define LIST_NEXT(elm, field) ((elm)->field.le_next)
+
+#define LIST_FOREACH(var, head, field) \
+ for((var) = LIST_FIRST(head); \
+ (var)!= LIST_END(head); \
+ (var) = LIST_NEXT(var, field))
+
+/*
+ * List functions.
+ */
+#define LIST_INIT(head) do { \
+ LIST_FIRST(head) = LIST_END(head); \
+} while (0)
+
+#define LIST_INSERT_AFTER(listelm, elm, field) do { \
+ if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
+ (listelm)->field.le_next->field.le_prev = \
+ &(elm)->field.le_next; \
+ (listelm)->field.le_next = (elm); \
+ (elm)->field.le_prev = &(listelm)->field.le_next; \
+} while (0)
+
+#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ (elm)->field.le_next = (listelm); \
+ *(listelm)->field.le_prev = (elm); \
+ (listelm)->field.le_prev = &(elm)->field.le_next; \
+} while (0)
+
+#define LIST_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.le_next = (head)->lh_first) != NULL) \
+ (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
+ (head)->lh_first = (elm); \
+ (elm)->field.le_prev = &(head)->lh_first; \
+} while (0)
+
+#define LIST_REMOVE(elm, field) do { \
+ if ((elm)->field.le_next != NULL) \
+ (elm)->field.le_next->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = (elm)->field.le_next; \
+} while (0)
+
+#define LIST_REPLACE(elm, elm2, field) do { \
+ if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \
+ (elm2)->field.le_next->field.le_prev = \
+ &(elm2)->field.le_next; \
+ (elm2)->field.le_prev = (elm)->field.le_prev; \
+ *(elm2)->field.le_prev = (elm2); \
+} while (0)
+
+/*
+ * Simple queue definitions.
+ */
+#define SIMPLEQ_HEAD(name, type) \
+struct name { \
+ struct type *sqh_first; /* first element */ \
+ struct type **sqh_last; /* addr of last next element */ \
+}
+
+#define SIMPLEQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).sqh_first }
+
+#define SIMPLEQ_ENTRY(type) \
+struct { \
+ struct type *sqe_next; /* next element */ \
+}
+
+/*
+ * Simple queue access methods.
+ */
+#define SIMPLEQ_FIRST(head) ((head)->sqh_first)
+#define SIMPLEQ_END(head) NULL
+#define SIMPLEQ_EMPTY(head) (SIMPLEQ_FIRST(head) == SIMPLEQ_END(head))
+#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
+
+#define SIMPLEQ_FOREACH(var, head, field) \
+ for((var) = SIMPLEQ_FIRST(head); \
+ (var) != SIMPLEQ_END(head); \
+ (var) = SIMPLEQ_NEXT(var, field))
+
+/*
+ * Simple queue functions.
+ */
+#define SIMPLEQ_INIT(head) do { \
+ (head)->sqh_first = NULL; \
+ (head)->sqh_last = &(head)->sqh_first; \
+} while (0)
+
+#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (head)->sqh_first = (elm); \
+} while (0)
+
+#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.sqe_next = NULL; \
+ *(head)->sqh_last = (elm); \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+} while (0)
+
+#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (listelm)->field.sqe_next = (elm); \
+} while (0)
+
+#define SIMPLEQ_REMOVE_HEAD(head, elm, field) do { \
+ if (((head)->sqh_first = (elm)->field.sqe_next) == NULL) \
+ (head)->sqh_last = &(head)->sqh_first; \
+} while (0)
+
+/*
+ * Tail queue definitions.
+ */
+#define TAILQ_HEAD(name, type) \
+struct name { \
+ struct type *tqh_first; /* first element */ \
+ struct type **tqh_last; /* addr of last next element */ \
+}
+
+#define TAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).tqh_first }
+
+#define TAILQ_ENTRY(type) \
+struct { \
+ struct type *tqe_next; /* next element */ \
+ struct type **tqe_prev; /* address of previous next element */ \
+}
+
+/*
+ * tail queue access methods
+ */
+#define TAILQ_FIRST(head) ((head)->tqh_first)
+#define TAILQ_END(head) NULL
+#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+#define TAILQ_LAST(head, headname) \
+ (*(((struct headname *)((head)->tqh_last))->tqh_last))
+/* XXX */
+#define TAILQ_PREV(elm, headname, field) \
+ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+#define TAILQ_EMPTY(head) \
+ (TAILQ_FIRST(head) == TAILQ_END(head))
+
+#define TAILQ_FOREACH(var, head, field) \
+ for((var) = TAILQ_FIRST(head); \
+ (var) != TAILQ_END(head); \
+ (var) = TAILQ_NEXT(var, field))
+
+#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
+ for((var) = TAILQ_LAST(head, headname); \
+ (var) != TAILQ_END(head); \
+ (var) = TAILQ_PREV(var, headname, field))
+
+/*
+ * Tail queue functions.
+ */
+#define TAILQ_INIT(head) do { \
+ (head)->tqh_first = NULL; \
+ (head)->tqh_last = &(head)->tqh_first; \
+} while (0)
+
+#define TAILQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
+ (head)->tqh_first->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (head)->tqh_first = (elm); \
+ (elm)->field.tqe_prev = &(head)->tqh_first; \
+} while (0)
+
+#define TAILQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.tqe_next = NULL; \
+ (elm)->field.tqe_prev = (head)->tqh_last; \
+ *(head)->tqh_last = (elm); \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
+ (elm)->field.tqe_next->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (listelm)->field.tqe_next = (elm); \
+ (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
+ (elm)->field.tqe_next = (listelm); \
+ *(listelm)->field.tqe_prev = (elm); \
+ (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_REMOVE(head, elm, field) do { \
+ if (((elm)->field.tqe_next) != NULL) \
+ (elm)->field.tqe_next->field.tqe_prev = \
+ (elm)->field.tqe_prev; \
+ else \
+ (head)->tqh_last = (elm)->field.tqe_prev; \
+ *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_REPLACE(head, elm, elm2, field) do { \
+ if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != NULL) \
+ (elm2)->field.tqe_next->field.tqe_prev = \
+ &(elm2)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm2)->field.tqe_next; \
+ (elm2)->field.tqe_prev = (elm)->field.tqe_prev; \
+ *(elm2)->field.tqe_prev = (elm2); \
+} while (0)
+
+/*
+ * Circular queue definitions.
+ */
+#define CIRCLEQ_HEAD(name, type) \
+struct name { \
+ struct type *cqh_first; /* first element */ \
+ struct type *cqh_last; /* last element */ \
+}
+
+#define CIRCLEQ_HEAD_INITIALIZER(head) \
+ { CIRCLEQ_END(&head), CIRCLEQ_END(&head) }
+
+#define CIRCLEQ_ENTRY(type) \
+struct { \
+ struct type *cqe_next; /* next element */ \
+ struct type *cqe_prev; /* previous element */ \
+}
+
+/*
+ * Circular queue access methods
+ */
+#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
+#define CIRCLEQ_LAST(head) ((head)->cqh_last)
+#define CIRCLEQ_END(head) ((void *)(head))
+#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
+#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
+#define CIRCLEQ_EMPTY(head) \
+ (CIRCLEQ_FIRST(head) == CIRCLEQ_END(head))
+
+#define CIRCLEQ_FOREACH(var, head, field) \
+ for((var) = CIRCLEQ_FIRST(head); \
+ (var) != CIRCLEQ_END(head); \
+ (var) = CIRCLEQ_NEXT(var, field))
+
+#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
+ for((var) = CIRCLEQ_LAST(head); \
+ (var) != CIRCLEQ_END(head); \
+ (var) = CIRCLEQ_PREV(var, field))
+
+/*
+ * Circular queue functions.
+ */
+#define CIRCLEQ_INIT(head) do { \
+ (head)->cqh_first = CIRCLEQ_END(head); \
+ (head)->cqh_last = CIRCLEQ_END(head); \
+} while (0)
+
+#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ (elm)->field.cqe_next = (listelm)->field.cqe_next; \
+ (elm)->field.cqe_prev = (listelm); \
+ if ((listelm)->field.cqe_next == CIRCLEQ_END(head)) \
+ (head)->cqh_last = (elm); \
+ else \
+ (listelm)->field.cqe_next->field.cqe_prev = (elm); \
+ (listelm)->field.cqe_next = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
+ (elm)->field.cqe_next = (listelm); \
+ (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
+ if ((listelm)->field.cqe_prev == CIRCLEQ_END(head)) \
+ (head)->cqh_first = (elm); \
+ else \
+ (listelm)->field.cqe_prev->field.cqe_next = (elm); \
+ (listelm)->field.cqe_prev = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.cqe_next = (head)->cqh_first; \
+ (elm)->field.cqe_prev = CIRCLEQ_END(head); \
+ if ((head)->cqh_last == CIRCLEQ_END(head)) \
+ (head)->cqh_last = (elm); \
+ else \
+ (head)->cqh_first->field.cqe_prev = (elm); \
+ (head)->cqh_first = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.cqe_next = CIRCLEQ_END(head); \
+ (elm)->field.cqe_prev = (head)->cqh_last; \
+ if ((head)->cqh_first == CIRCLEQ_END(head)) \
+ (head)->cqh_first = (elm); \
+ else \
+ (head)->cqh_last->field.cqe_next = (elm); \
+ (head)->cqh_last = (elm); \
+} while (0)
+
+#define CIRCLEQ_REMOVE(head, elm, field) do { \
+ if ((elm)->field.cqe_next == CIRCLEQ_END(head)) \
+ (head)->cqh_last = (elm)->field.cqe_prev; \
+ else \
+ (elm)->field.cqe_next->field.cqe_prev = \
+ (elm)->field.cqe_prev; \
+ if ((elm)->field.cqe_prev == CIRCLEQ_END(head)) \
+ (head)->cqh_first = (elm)->field.cqe_next; \
+ else \
+ (elm)->field.cqe_prev->field.cqe_next = \
+ (elm)->field.cqe_next; \
+} while (0)
+
+#define CIRCLEQ_REPLACE(head, elm, elm2, field) do { \
+ if (((elm2)->field.cqe_next = (elm)->field.cqe_next) == \
+ CIRCLEQ_END(head)) \
+ (head).cqh_last = (elm2); \
+ else \
+ (elm2)->field.cqe_next->field.cqe_prev = (elm2); \
+ if (((elm2)->field.cqe_prev = (elm)->field.cqe_prev) == \
+ CIRCLEQ_END(head)) \
+ (head).cqh_first = (elm2); \
+ else \
+ (elm2)->field.cqe_prev->field.cqe_next = (elm2); \
+} while (0)
+
+#endif /* !SYS_QUEUE_H__ */
diff --git a/libs/libevent/src/defer-internal.h b/libs/libevent/src/defer-internal.h
new file mode 100644
index 0000000000..e3c7d7da5b
--- /dev/null
+++ b/libs/libevent/src/defer-internal.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef DEFER_INTERNAL_H_INCLUDED_
+#define DEFER_INTERNAL_H_INCLUDED_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <sys/queue.h>
+
+struct event_callback;
+typedef void (*deferred_cb_fn)(struct event_callback *, void *);
+
+/**
+ Initialize an empty, non-pending event_callback.
+
+ @param deferred The struct event_callback structure to initialize.
+ @param priority The priority that the callback should run at.
+ @param cb The function to run when the struct event_callback executes.
+ @param arg The function's second argument.
+ */
+void event_deferred_cb_init_(struct event_callback *, ev_uint8_t, deferred_cb_fn, void *);
+/**
+ Change the priority of a non-pending event_callback.
+ */
+void event_deferred_cb_set_priority_(struct event_callback *, ev_uint8_t);
+/**
+ Cancel a struct event_callback if it is currently scheduled in an event_base.
+ */
+void event_deferred_cb_cancel_(struct event_base *, struct event_callback *);
+/**
+ Activate a struct event_callback if it is not currently scheduled in an event_base.
+
+ Return true if it was not previously scheduled.
+ */
+int event_deferred_cb_schedule_(struct event_base *, struct event_callback *);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT_INTERNAL_H_INCLUDED_ */
+
diff --git a/libs/libevent/src/epolltable-internal.h b/libs/libevent/src/epolltable-internal.h
new file mode 100644
index 0000000000..da30e0973a
--- /dev/null
+++ b/libs/libevent/src/epolltable-internal.h
@@ -0,0 +1,1166 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EPOLLTABLE_INTERNAL_H_INCLUDED_
+#define EPOLLTABLE_INTERNAL_H_INCLUDED_
+
+/*
+ Here are the values we're masking off to decide what operations to do.
+ Note that since EV_READ|EV_WRITE.
+
+ Note also that this table is a little sparse, since ADD+DEL is
+ nonsensical ("xxx" in the list below.)
+
+ Note also also that we are shifting old_events by only 5 bits, since
+ EV_READ is 2 and EV_WRITE is 4.
+
+ The table was auto-generated with a python script, according to this
+ pseudocode:[*0]
+
+ If either the read or the write change is add+del:
+ This is impossible; Set op==-1, events=0.
+ Else, if either the read or the write change is add:
+ Set events to 0.
+ If the read change is add, or
+ (the read change is not del, and ev_read is in old_events):
+ Add EPOLLIN to events.
+ If the write change is add, or
+ (the write change is not del, and ev_write is in old_events):
+ Add EPOLLOUT to events.
+
+ If old_events is set:
+ Set op to EPOLL_CTL_MOD [*1,*2]
+ Else:
+ Set op to EPOLL_CTL_ADD [*3]
+
+ Else, if the read or the write change is del:
+ Set op to EPOLL_CTL_DEL.
+ If the read change is del:
+ If the write change is del:
+ Set events to EPOLLIN|EPOLLOUT
+ Else if ev_write is in old_events:
+ Set events to EPOLLOUT
+ Set op to EPOLL_CTL_MOD
+ Else
+ Set events to EPOLLIN
+ Else:
+ {The write change is del.}
+ If ev_read is in old_events:
+ Set events to EPOLLIN
+ Set op to EPOLL_CTL_MOD
+ Else:
+ Set the events to EPOLLOUT
+
+ Else:
+ There is no read or write change; set op to 0 and events to 0.
+
+ The logic is a little tricky, since we had no events set on the fd before,
+ we need to set op="ADD" and set events=the events we want to add. If we
+ had any events set on the fd before, and we want any events to remain on
+ the fd, we need to say op="MOD" and set events=the events we want to
+ remain. But if we want to delete the last event, we say op="DEL" and
+ set events=(any non-null pointer).
+
+ [*0] Actually, the Python script has gotten a bit more complicated, to
+ support EPOLLRDHUP.
+
+ [*1] This MOD is only a guess. MOD might fail with ENOENT if the file was
+ closed and a new file was opened with the same fd. If so, we'll retry
+ with ADD.
+
+ [*2] We can't replace this with a no-op even if old_events is the same as
+ the new events: if the file was closed and reopened, we need to retry
+ with an ADD. (We do a MOD in this case since "no change" is more
+ common than "close and reopen", so we'll usually wind up doing 1
+ syscalls instead of 2.)
+
+ [*3] This ADD is only a guess. There is a fun Linux kernel issue where if
+ you have two fds for the same file (via dup) and you ADD one to an
+ epfd, then close it, then re-create it with the same fd (via dup2 or an
+ unlucky dup), then try to ADD it again, you'll get an EEXIST, since the
+ struct epitem is not actually removed from the struct eventpoll until
+ the file itself is closed.
+
+ EV_CHANGE_ADD==1
+ EV_CHANGE_DEL==2
+ EV_READ ==2
+ EV_WRITE ==4
+ EV_CLOSED ==0x80
+
+ Bit 0: close change is add
+ Bit 1: close change is del
+ Bit 2: read change is add
+ Bit 3: read change is del
+ Bit 4: write change is add
+ Bit 5: write change is del
+ Bit 6: old events had EV_READ
+ Bit 7: old events had EV_WRITE
+ Bit 8: old events had EV_CLOSED
+*/
+
+#define EPOLL_OP_TABLE_INDEX(c) \
+ ( (((c)->close_change&(EV_CHANGE_ADD|EV_CHANGE_DEL))) | \
+ (((c)->read_change&(EV_CHANGE_ADD|EV_CHANGE_DEL)) << 2) | \
+ (((c)->write_change&(EV_CHANGE_ADD|EV_CHANGE_DEL)) << 4) | \
+ (((c)->old_events&(EV_READ|EV_WRITE)) << 5) | \
+ (((c)->old_events&(EV_CLOSED)) << 1) \
+ )
+
+#if EV_READ != 2 || EV_WRITE != 4 || EV_CLOSED != 0x80 || EV_CHANGE_ADD != 1 || EV_CHANGE_DEL != 2
+#error "Libevent's internals changed! Regenerate the op_table in epolltable-internal.h"
+#endif
+
+static const struct operation {
+ int events;
+ int op;
+} epoll_op_table[] = {
+ /* old= 0, write: 0, read: 0, close: 0 */
+ { 0, 0 },
+ /* old= 0, write: 0, read: 0, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_ADD },
+ /* old= 0, write: 0, read: 0, close:del */
+ { EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= 0, write: 0, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= 0, write: 0, read:add, close: 0 */
+ { EPOLLIN, EPOLL_CTL_ADD },
+ /* old= 0, write: 0, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_ADD },
+ /* old= 0, write: 0, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_ADD },
+ /* old= 0, write: 0, read:add, close:xxx */
+ { 0, 255 },
+ /* old= 0, write: 0, read:del, close: 0 */
+ { EPOLLIN, EPOLL_CTL_DEL },
+ /* old= 0, write: 0, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_ADD },
+ /* old= 0, write: 0, read:del, close:del */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= 0, write: 0, read:del, close:xxx */
+ { 0, 255 },
+ /* old= 0, write: 0, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= 0, write: 0, read:xxx, close:add */
+ { 0, 255 },
+ /* old= 0, write: 0, read:xxx, close:del */
+ { 0, 255 },
+ /* old= 0, write: 0, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:add, read: 0, close: 0 */
+ { EPOLLOUT, EPOLL_CTL_ADD },
+ /* old= 0, write:add, read: 0, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_ADD },
+ /* old= 0, write:add, read: 0, close:del */
+ { EPOLLOUT, EPOLL_CTL_ADD },
+ /* old= 0, write:add, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:add, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_ADD },
+ /* old= 0, write:add, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_ADD },
+ /* old= 0, write:add, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_ADD },
+ /* old= 0, write:add, read:add, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:add, read:del, close: 0 */
+ { EPOLLOUT, EPOLL_CTL_ADD },
+ /* old= 0, write:add, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_ADD },
+ /* old= 0, write:add, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_ADD },
+ /* old= 0, write:add, read:del, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:add, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= 0, write:add, read:xxx, close:add */
+ { 0, 255 },
+ /* old= 0, write:add, read:xxx, close:del */
+ { 0, 255 },
+ /* old= 0, write:add, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:del, read: 0, close: 0 */
+ { EPOLLOUT, EPOLL_CTL_DEL },
+ /* old= 0, write:del, read: 0, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_ADD },
+ /* old= 0, write:del, read: 0, close:del */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= 0, write:del, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:del, read:add, close: 0 */
+ { EPOLLIN, EPOLL_CTL_ADD },
+ /* old= 0, write:del, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_ADD },
+ /* old= 0, write:del, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_ADD },
+ /* old= 0, write:del, read:add, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:del, read:del, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_DEL },
+ /* old= 0, write:del, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_ADD },
+ /* old= 0, write:del, read:del, close:del */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= 0, write:del, read:del, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:del, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= 0, write:del, read:xxx, close:add */
+ { 0, 255 },
+ /* old= 0, write:del, read:xxx, close:del */
+ { 0, 255 },
+ /* old= 0, write:del, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:xxx, read: 0, close: 0 */
+ { 0, 255 },
+ /* old= 0, write:xxx, read: 0, close:add */
+ { 0, 255 },
+ /* old= 0, write:xxx, read: 0, close:del */
+ { 0, 255 },
+ /* old= 0, write:xxx, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:add, close: 0 */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:add, close:add */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:add, close:del */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:add, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:del, close: 0 */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:del, close:add */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:del, close:del */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:del, close:xxx */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:xxx, close:add */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:xxx, close:del */
+ { 0, 255 },
+ /* old= 0, write:xxx, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= r, write: 0, read: 0, close: 0 */
+ { 0, 0 },
+ /* old= r, write: 0, read: 0, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= r, write: 0, read: 0, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= r, write: 0, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= r, write: 0, read:add, close: 0 */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= r, write: 0, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= r, write: 0, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= r, write: 0, read:add, close:xxx */
+ { 0, 255 },
+ /* old= r, write: 0, read:del, close: 0 */
+ { EPOLLIN, EPOLL_CTL_DEL },
+ /* old= r, write: 0, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= r, write: 0, read:del, close:del */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= r, write: 0, read:del, close:xxx */
+ { 0, 255 },
+ /* old= r, write: 0, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= r, write: 0, read:xxx, close:add */
+ { 0, 255 },
+ /* old= r, write: 0, read:xxx, close:del */
+ { 0, 255 },
+ /* old= r, write: 0, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= r, write:add, read: 0, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= r, write:add, read: 0, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= r, write:add, read: 0, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= r, write:add, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= r, write:add, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= r, write:add, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= r, write:add, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= r, write:add, read:add, close:xxx */
+ { 0, 255 },
+ /* old= r, write:add, read:del, close: 0 */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= r, write:add, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= r, write:add, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= r, write:add, read:del, close:xxx */
+ { 0, 255 },
+ /* old= r, write:add, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= r, write:add, read:xxx, close:add */
+ { 0, 255 },
+ /* old= r, write:add, read:xxx, close:del */
+ { 0, 255 },
+ /* old= r, write:add, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= r, write:del, read: 0, close: 0 */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= r, write:del, read: 0, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= r, write:del, read: 0, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= r, write:del, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= r, write:del, read:add, close: 0 */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= r, write:del, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= r, write:del, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= r, write:del, read:add, close:xxx */
+ { 0, 255 },
+ /* old= r, write:del, read:del, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_DEL },
+ /* old= r, write:del, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= r, write:del, read:del, close:del */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= r, write:del, read:del, close:xxx */
+ { 0, 255 },
+ /* old= r, write:del, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= r, write:del, read:xxx, close:add */
+ { 0, 255 },
+ /* old= r, write:del, read:xxx, close:del */
+ { 0, 255 },
+ /* old= r, write:del, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= r, write:xxx, read: 0, close: 0 */
+ { 0, 255 },
+ /* old= r, write:xxx, read: 0, close:add */
+ { 0, 255 },
+ /* old= r, write:xxx, read: 0, close:del */
+ { 0, 255 },
+ /* old= r, write:xxx, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= r, write:xxx, read:add, close: 0 */
+ { 0, 255 },
+ /* old= r, write:xxx, read:add, close:add */
+ { 0, 255 },
+ /* old= r, write:xxx, read:add, close:del */
+ { 0, 255 },
+ /* old= r, write:xxx, read:add, close:xxx */
+ { 0, 255 },
+ /* old= r, write:xxx, read:del, close: 0 */
+ { 0, 255 },
+ /* old= r, write:xxx, read:del, close:add */
+ { 0, 255 },
+ /* old= r, write:xxx, read:del, close:del */
+ { 0, 255 },
+ /* old= r, write:xxx, read:del, close:xxx */
+ { 0, 255 },
+ /* old= r, write:xxx, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= r, write:xxx, read:xxx, close:add */
+ { 0, 255 },
+ /* old= r, write:xxx, read:xxx, close:del */
+ { 0, 255 },
+ /* old= r, write:xxx, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= w, write: 0, read: 0, close: 0 */
+ { 0, 0 },
+ /* old= w, write: 0, read: 0, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= w, write: 0, read: 0, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= w, write: 0, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= w, write: 0, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= w, write: 0, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= w, write: 0, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= w, write: 0, read:add, close:xxx */
+ { 0, 255 },
+ /* old= w, write: 0, read:del, close: 0 */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= w, write: 0, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= w, write: 0, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= w, write: 0, read:del, close:xxx */
+ { 0, 255 },
+ /* old= w, write: 0, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= w, write: 0, read:xxx, close:add */
+ { 0, 255 },
+ /* old= w, write: 0, read:xxx, close:del */
+ { 0, 255 },
+ /* old= w, write: 0, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= w, write:add, read: 0, close: 0 */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= w, write:add, read: 0, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= w, write:add, read: 0, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= w, write:add, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= w, write:add, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= w, write:add, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= w, write:add, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= w, write:add, read:add, close:xxx */
+ { 0, 255 },
+ /* old= w, write:add, read:del, close: 0 */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= w, write:add, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= w, write:add, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= w, write:add, read:del, close:xxx */
+ { 0, 255 },
+ /* old= w, write:add, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= w, write:add, read:xxx, close:add */
+ { 0, 255 },
+ /* old= w, write:add, read:xxx, close:del */
+ { 0, 255 },
+ /* old= w, write:add, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= w, write:del, read: 0, close: 0 */
+ { EPOLLOUT, EPOLL_CTL_DEL },
+ /* old= w, write:del, read: 0, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= w, write:del, read: 0, close:del */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= w, write:del, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= w, write:del, read:add, close: 0 */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= w, write:del, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= w, write:del, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= w, write:del, read:add, close:xxx */
+ { 0, 255 },
+ /* old= w, write:del, read:del, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_DEL },
+ /* old= w, write:del, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= w, write:del, read:del, close:del */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= w, write:del, read:del, close:xxx */
+ { 0, 255 },
+ /* old= w, write:del, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= w, write:del, read:xxx, close:add */
+ { 0, 255 },
+ /* old= w, write:del, read:xxx, close:del */
+ { 0, 255 },
+ /* old= w, write:del, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= w, write:xxx, read: 0, close: 0 */
+ { 0, 255 },
+ /* old= w, write:xxx, read: 0, close:add */
+ { 0, 255 },
+ /* old= w, write:xxx, read: 0, close:del */
+ { 0, 255 },
+ /* old= w, write:xxx, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= w, write:xxx, read:add, close: 0 */
+ { 0, 255 },
+ /* old= w, write:xxx, read:add, close:add */
+ { 0, 255 },
+ /* old= w, write:xxx, read:add, close:del */
+ { 0, 255 },
+ /* old= w, write:xxx, read:add, close:xxx */
+ { 0, 255 },
+ /* old= w, write:xxx, read:del, close: 0 */
+ { 0, 255 },
+ /* old= w, write:xxx, read:del, close:add */
+ { 0, 255 },
+ /* old= w, write:xxx, read:del, close:del */
+ { 0, 255 },
+ /* old= w, write:xxx, read:del, close:xxx */
+ { 0, 255 },
+ /* old= w, write:xxx, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= w, write:xxx, read:xxx, close:add */
+ { 0, 255 },
+ /* old= w, write:xxx, read:xxx, close:del */
+ { 0, 255 },
+ /* old= w, write:xxx, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= rw, write: 0, read: 0, close: 0 */
+ { 0, 0 },
+ /* old= rw, write: 0, read: 0, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= rw, write: 0, read: 0, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= rw, write: 0, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= rw, write: 0, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= rw, write: 0, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= rw, write: 0, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= rw, write: 0, read:add, close:xxx */
+ { 0, 255 },
+ /* old= rw, write: 0, read:del, close: 0 */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= rw, write: 0, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= rw, write: 0, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= rw, write: 0, read:del, close:xxx */
+ { 0, 255 },
+ /* old= rw, write: 0, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= rw, write: 0, read:xxx, close:add */
+ { 0, 255 },
+ /* old= rw, write: 0, read:xxx, close:del */
+ { 0, 255 },
+ /* old= rw, write: 0, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:add, read: 0, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= rw, write:add, read: 0, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= rw, write:add, read: 0, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= rw, write:add, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:add, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= rw, write:add, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= rw, write:add, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= rw, write:add, read:add, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:add, read:del, close: 0 */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= rw, write:add, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= rw, write:add, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= rw, write:add, read:del, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:add, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= rw, write:add, read:xxx, close:add */
+ { 0, 255 },
+ /* old= rw, write:add, read:xxx, close:del */
+ { 0, 255 },
+ /* old= rw, write:add, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:del, read: 0, close: 0 */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= rw, write:del, read: 0, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= rw, write:del, read: 0, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= rw, write:del, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:del, read:add, close: 0 */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= rw, write:del, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= rw, write:del, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= rw, write:del, read:add, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:del, read:del, close: 0 */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_DEL },
+ /* old= rw, write:del, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= rw, write:del, read:del, close:del */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= rw, write:del, read:del, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:del, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= rw, write:del, read:xxx, close:add */
+ { 0, 255 },
+ /* old= rw, write:del, read:xxx, close:del */
+ { 0, 255 },
+ /* old= rw, write:del, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:xxx, read: 0, close: 0 */
+ { 0, 255 },
+ /* old= rw, write:xxx, read: 0, close:add */
+ { 0, 255 },
+ /* old= rw, write:xxx, read: 0, close:del */
+ { 0, 255 },
+ /* old= rw, write:xxx, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:add, close: 0 */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:add, close:add */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:add, close:del */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:add, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:del, close: 0 */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:del, close:add */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:del, close:del */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:del, close:xxx */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:xxx, close:add */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:xxx, close:del */
+ { 0, 255 },
+ /* old= rw, write:xxx, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= c, write: 0, read: 0, close: 0 */
+ { 0, 0 },
+ /* old= c, write: 0, read: 0, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write: 0, read: 0, close:del */
+ { EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= c, write: 0, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= c, write: 0, read:add, close: 0 */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write: 0, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write: 0, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= c, write: 0, read:add, close:xxx */
+ { 0, 255 },
+ /* old= c, write: 0, read:del, close: 0 */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write: 0, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write: 0, read:del, close:del */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= c, write: 0, read:del, close:xxx */
+ { 0, 255 },
+ /* old= c, write: 0, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= c, write: 0, read:xxx, close:add */
+ { 0, 255 },
+ /* old= c, write: 0, read:xxx, close:del */
+ { 0, 255 },
+ /* old= c, write: 0, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= c, write:add, read: 0, close: 0 */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:add, read: 0, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:add, read: 0, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= c, write:add, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= c, write:add, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:add, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:add, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= c, write:add, read:add, close:xxx */
+ { 0, 255 },
+ /* old= c, write:add, read:del, close: 0 */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:add, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:add, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= c, write:add, read:del, close:xxx */
+ { 0, 255 },
+ /* old= c, write:add, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= c, write:add, read:xxx, close:add */
+ { 0, 255 },
+ /* old= c, write:add, read:xxx, close:del */
+ { 0, 255 },
+ /* old= c, write:add, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= c, write:del, read: 0, close: 0 */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:del, read: 0, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:del, read: 0, close:del */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= c, write:del, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= c, write:del, read:add, close: 0 */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:del, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:del, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= c, write:del, read:add, close:xxx */
+ { 0, 255 },
+ /* old= c, write:del, read:del, close: 0 */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:del, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= c, write:del, read:del, close:del */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= c, write:del, read:del, close:xxx */
+ { 0, 255 },
+ /* old= c, write:del, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= c, write:del, read:xxx, close:add */
+ { 0, 255 },
+ /* old= c, write:del, read:xxx, close:del */
+ { 0, 255 },
+ /* old= c, write:del, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= c, write:xxx, read: 0, close: 0 */
+ { 0, 255 },
+ /* old= c, write:xxx, read: 0, close:add */
+ { 0, 255 },
+ /* old= c, write:xxx, read: 0, close:del */
+ { 0, 255 },
+ /* old= c, write:xxx, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= c, write:xxx, read:add, close: 0 */
+ { 0, 255 },
+ /* old= c, write:xxx, read:add, close:add */
+ { 0, 255 },
+ /* old= c, write:xxx, read:add, close:del */
+ { 0, 255 },
+ /* old= c, write:xxx, read:add, close:xxx */
+ { 0, 255 },
+ /* old= c, write:xxx, read:del, close: 0 */
+ { 0, 255 },
+ /* old= c, write:xxx, read:del, close:add */
+ { 0, 255 },
+ /* old= c, write:xxx, read:del, close:del */
+ { 0, 255 },
+ /* old= c, write:xxx, read:del, close:xxx */
+ { 0, 255 },
+ /* old= c, write:xxx, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= c, write:xxx, read:xxx, close:add */
+ { 0, 255 },
+ /* old= c, write:xxx, read:xxx, close:del */
+ { 0, 255 },
+ /* old= c, write:xxx, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= cr, write: 0, read: 0, close: 0 */
+ { 0, 0 },
+ /* old= cr, write: 0, read: 0, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write: 0, read: 0, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= cr, write: 0, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= cr, write: 0, read:add, close: 0 */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write: 0, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write: 0, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= cr, write: 0, read:add, close:xxx */
+ { 0, 255 },
+ /* old= cr, write: 0, read:del, close: 0 */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write: 0, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write: 0, read:del, close:del */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= cr, write: 0, read:del, close:xxx */
+ { 0, 255 },
+ /* old= cr, write: 0, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= cr, write: 0, read:xxx, close:add */
+ { 0, 255 },
+ /* old= cr, write: 0, read:xxx, close:del */
+ { 0, 255 },
+ /* old= cr, write: 0, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:add, read: 0, close: 0 */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:add, read: 0, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:add, read: 0, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= cr, write:add, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:add, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:add, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:add, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= cr, write:add, read:add, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:add, read:del, close: 0 */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:add, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:add, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= cr, write:add, read:del, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:add, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= cr, write:add, read:xxx, close:add */
+ { 0, 255 },
+ /* old= cr, write:add, read:xxx, close:del */
+ { 0, 255 },
+ /* old= cr, write:add, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:del, read: 0, close: 0 */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:del, read: 0, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:del, read: 0, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= cr, write:del, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:del, read:add, close: 0 */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:del, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:del, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= cr, write:del, read:add, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:del, read:del, close: 0 */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:del, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cr, write:del, read:del, close:del */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= cr, write:del, read:del, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:del, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= cr, write:del, read:xxx, close:add */
+ { 0, 255 },
+ /* old= cr, write:del, read:xxx, close:del */
+ { 0, 255 },
+ /* old= cr, write:del, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:xxx, read: 0, close: 0 */
+ { 0, 255 },
+ /* old= cr, write:xxx, read: 0, close:add */
+ { 0, 255 },
+ /* old= cr, write:xxx, read: 0, close:del */
+ { 0, 255 },
+ /* old= cr, write:xxx, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:add, close: 0 */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:add, close:add */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:add, close:del */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:add, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:del, close: 0 */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:del, close:add */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:del, close:del */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:del, close:xxx */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:xxx, close:add */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:xxx, close:del */
+ { 0, 255 },
+ /* old= cr, write:xxx, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= cw, write: 0, read: 0, close: 0 */
+ { 0, 0 },
+ /* old= cw, write: 0, read: 0, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write: 0, read: 0, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= cw, write: 0, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= cw, write: 0, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write: 0, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write: 0, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= cw, write: 0, read:add, close:xxx */
+ { 0, 255 },
+ /* old= cw, write: 0, read:del, close: 0 */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write: 0, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write: 0, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= cw, write: 0, read:del, close:xxx */
+ { 0, 255 },
+ /* old= cw, write: 0, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= cw, write: 0, read:xxx, close:add */
+ { 0, 255 },
+ /* old= cw, write: 0, read:xxx, close:del */
+ { 0, 255 },
+ /* old= cw, write: 0, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:add, read: 0, close: 0 */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:add, read: 0, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:add, read: 0, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= cw, write:add, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:add, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:add, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:add, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= cw, write:add, read:add, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:add, read:del, close: 0 */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:add, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:add, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old= cw, write:add, read:del, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:add, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= cw, write:add, read:xxx, close:add */
+ { 0, 255 },
+ /* old= cw, write:add, read:xxx, close:del */
+ { 0, 255 },
+ /* old= cw, write:add, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:del, read: 0, close: 0 */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:del, read: 0, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:del, read: 0, close:del */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= cw, write:del, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:del, read:add, close: 0 */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:del, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:del, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old= cw, write:del, read:add, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:del, read:del, close: 0 */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:del, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old= cw, write:del, read:del, close:del */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old= cw, write:del, read:del, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:del, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= cw, write:del, read:xxx, close:add */
+ { 0, 255 },
+ /* old= cw, write:del, read:xxx, close:del */
+ { 0, 255 },
+ /* old= cw, write:del, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:xxx, read: 0, close: 0 */
+ { 0, 255 },
+ /* old= cw, write:xxx, read: 0, close:add */
+ { 0, 255 },
+ /* old= cw, write:xxx, read: 0, close:del */
+ { 0, 255 },
+ /* old= cw, write:xxx, read: 0, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:add, close: 0 */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:add, close:add */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:add, close:del */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:add, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:del, close: 0 */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:del, close:add */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:del, close:del */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:del, close:xxx */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:xxx, close:add */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:xxx, close:del */
+ { 0, 255 },
+ /* old= cw, write:xxx, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old=crw, write: 0, read: 0, close: 0 */
+ { 0, 0 },
+ /* old=crw, write: 0, read: 0, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write: 0, read: 0, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old=crw, write: 0, read: 0, close:xxx */
+ { 0, 255 },
+ /* old=crw, write: 0, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write: 0, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write: 0, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old=crw, write: 0, read:add, close:xxx */
+ { 0, 255 },
+ /* old=crw, write: 0, read:del, close: 0 */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write: 0, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write: 0, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old=crw, write: 0, read:del, close:xxx */
+ { 0, 255 },
+ /* old=crw, write: 0, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old=crw, write: 0, read:xxx, close:add */
+ { 0, 255 },
+ /* old=crw, write: 0, read:xxx, close:del */
+ { 0, 255 },
+ /* old=crw, write: 0, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:add, read: 0, close: 0 */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:add, read: 0, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:add, read: 0, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old=crw, write:add, read: 0, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:add, read:add, close: 0 */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:add, read:add, close:add */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:add, read:add, close:del */
+ { EPOLLIN|EPOLLOUT, EPOLL_CTL_MOD },
+ /* old=crw, write:add, read:add, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:add, read:del, close: 0 */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:add, read:del, close:add */
+ { EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:add, read:del, close:del */
+ { EPOLLOUT, EPOLL_CTL_MOD },
+ /* old=crw, write:add, read:del, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:add, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old=crw, write:add, read:xxx, close:add */
+ { 0, 255 },
+ /* old=crw, write:add, read:xxx, close:del */
+ { 0, 255 },
+ /* old=crw, write:add, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:del, read: 0, close: 0 */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:del, read: 0, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:del, read: 0, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old=crw, write:del, read: 0, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:del, read:add, close: 0 */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:del, read:add, close:add */
+ { EPOLLIN|EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:del, read:add, close:del */
+ { EPOLLIN, EPOLL_CTL_MOD },
+ /* old=crw, write:del, read:add, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:del, read:del, close: 0 */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:del, read:del, close:add */
+ { EPOLLRDHUP, EPOLL_CTL_MOD },
+ /* old=crw, write:del, read:del, close:del */
+ { EPOLLIN|EPOLLOUT|EPOLLRDHUP, EPOLL_CTL_DEL },
+ /* old=crw, write:del, read:del, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:del, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old=crw, write:del, read:xxx, close:add */
+ { 0, 255 },
+ /* old=crw, write:del, read:xxx, close:del */
+ { 0, 255 },
+ /* old=crw, write:del, read:xxx, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:xxx, read: 0, close: 0 */
+ { 0, 255 },
+ /* old=crw, write:xxx, read: 0, close:add */
+ { 0, 255 },
+ /* old=crw, write:xxx, read: 0, close:del */
+ { 0, 255 },
+ /* old=crw, write:xxx, read: 0, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:add, close: 0 */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:add, close:add */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:add, close:del */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:add, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:del, close: 0 */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:del, close:add */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:del, close:del */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:del, close:xxx */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:xxx, close: 0 */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:xxx, close:add */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:xxx, close:del */
+ { 0, 255 },
+ /* old=crw, write:xxx, read:xxx, close:xxx */
+ { 0, 255 },
+};
+
+#endif
diff --git a/libs/libevent/src/evbuffer-internal.h b/libs/libevent/src/evbuffer-internal.h
new file mode 100644
index 0000000000..cf4bddc80e
--- /dev/null
+++ b/libs/libevent/src/evbuffer-internal.h
@@ -0,0 +1,351 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVBUFFER_INTERNAL_H_INCLUDED_
+#define EVBUFFER_INTERNAL_H_INCLUDED_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+#include "event2/util.h"
+#include "event2/event_struct.h"
+#include "util-internal.h"
+#include "defer-internal.h"
+
+/* Experimental cb flag: "never deferred." Implementation note:
+ * these callbacks may get an inaccurate view of n_del/n_added in their
+ * arguments. */
+#define EVBUFFER_CB_NODEFER 2
+
+#ifdef _WIN32
+#include <winsock2.h>
+#endif
+#include <sys/queue.h>
+
+/* Minimum allocation for a chain. We define this so that we're burning no
+ * more than 5% of each allocation on overhead. It would be nice to lose even
+ * less space, though. */
+#if EVENT__SIZEOF_VOID_P < 8
+#define MIN_BUFFER_SIZE 512
+#else
+#define MIN_BUFFER_SIZE 1024
+#endif
+
+/** A single evbuffer callback for an evbuffer. This function will be invoked
+ * when bytes are added to or removed from the evbuffer. */
+struct evbuffer_cb_entry {
+ /** Structures to implement a doubly-linked queue of callbacks */
+ LIST_ENTRY(evbuffer_cb_entry) next;
+ /** The callback function to invoke when this callback is called.
+ If EVBUFFER_CB_OBSOLETE is set in flags, the cb_obsolete field is
+ valid; otherwise, cb_func is valid. */
+ union {
+ evbuffer_cb_func cb_func;
+ evbuffer_cb cb_obsolete;
+ } cb;
+ /** Argument to pass to cb. */
+ void *cbarg;
+ /** Currently set flags on this callback. */
+ ev_uint32_t flags;
+};
+
+struct bufferevent;
+struct evbuffer_chain;
+struct evbuffer {
+ /** The first chain in this buffer's linked list of chains. */
+ struct evbuffer_chain *first;
+ /** The last chain in this buffer's linked list of chains. */
+ struct evbuffer_chain *last;
+
+ /** Pointer to the next pointer pointing at the 'last_with_data' chain.
+ *
+ * To unpack:
+ *
+ * The last_with_data chain is the last chain that has any data in it.
+ * If all chains in the buffer are empty, it is the first chain.
+ * If the buffer has no chains, it is NULL.
+ *
+ * The last_with_datap pointer points at _whatever 'next' pointer_
+ * points at the last_with_datap chain. If the last_with_data chain
+ * is the first chain, or it is NULL, then the last_with_datap pointer
+ * is &buf->first.
+ */
+ struct evbuffer_chain **last_with_datap;
+
+ /** Total amount of bytes stored in all chains.*/
+ size_t total_len;
+
+ /** Number of bytes we have added to the buffer since we last tried to
+ * invoke callbacks. */
+ size_t n_add_for_cb;
+ /** Number of bytes we have removed from the buffer since we last
+ * tried to invoke callbacks. */
+ size_t n_del_for_cb;
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ /** A lock used to mediate access to this buffer. */
+ void *lock;
+#endif
+ /** True iff we should free the lock field when we free this
+ * evbuffer. */
+ unsigned own_lock : 1;
+ /** True iff we should not allow changes to the front of the buffer
+ * (drains or prepends). */
+ unsigned freeze_start : 1;
+ /** True iff we should not allow changes to the end of the buffer
+ * (appends) */
+ unsigned freeze_end : 1;
+ /** True iff this evbuffer's callbacks are not invoked immediately
+ * upon a change in the buffer, but instead are deferred to be invoked
+ * from the event_base's loop. Useful for preventing enormous stack
+ * overflows when we have mutually recursive callbacks, and for
+ * serializing callbacks in a single thread. */
+ unsigned deferred_cbs : 1;
+#ifdef _WIN32
+ /** True iff this buffer is set up for overlapped IO. */
+ unsigned is_overlapped : 1;
+#endif
+ /** Zero or more EVBUFFER_FLAG_* bits */
+ ev_uint32_t flags;
+
+ /** Used to implement deferred callbacks. */
+ struct event_base *cb_queue;
+
+ /** A reference count on this evbuffer. When the reference count
+ * reaches 0, the buffer is destroyed. Manipulated with
+ * evbuffer_incref and evbuffer_decref_and_unlock and
+ * evbuffer_free. */
+ int refcnt;
+
+ /** A struct event_callback handle to make all of this buffer's callbacks
+ * invoked from the event loop. */
+ struct event_callback deferred;
+
+ /** A doubly-linked-list of callback functions */
+ LIST_HEAD(evbuffer_cb_queue, evbuffer_cb_entry) callbacks;
+
+ /** The parent bufferevent object this evbuffer belongs to.
+ * NULL if the evbuffer stands alone. */
+ struct bufferevent *parent;
+};
+
+#if EVENT__SIZEOF_OFF_T < EVENT__SIZEOF_SIZE_T
+typedef ev_ssize_t ev_misalign_t;
+#define EVBUFFER_CHAIN_MAX ((size_t)EV_SSIZE_MAX)
+#else
+typedef ev_off_t ev_misalign_t;
+#if EVENT__SIZEOF_OFF_T > EVENT__SIZEOF_SIZE_T
+#define EVBUFFER_CHAIN_MAX EV_SIZE_MAX
+#else
+#define EVBUFFER_CHAIN_MAX ((size_t)EV_SSIZE_MAX)
+#endif
+#endif
+
+/** A single item in an evbuffer. */
+struct evbuffer_chain {
+ /** points to next buffer in the chain */
+ struct evbuffer_chain *next;
+
+ /** total allocation available in the buffer field. */
+ size_t buffer_len;
+
+ /** unused space at the beginning of buffer or an offset into a
+ * file for sendfile buffers. */
+ ev_misalign_t misalign;
+
+ /** Offset into buffer + misalign at which to start writing.
+ * In other words, the total number of bytes actually stored
+ * in buffer. */
+ size_t off;
+
+ /** Set if special handling is required for this chain */
+ unsigned flags;
+#define EVBUFFER_FILESEGMENT 0x0001 /**< A chain used for a file segment */
+#define EVBUFFER_SENDFILE 0x0002 /**< a chain used with sendfile */
+#define EVBUFFER_REFERENCE 0x0004 /**< a chain with a mem reference */
+#define EVBUFFER_IMMUTABLE 0x0008 /**< read-only chain */
+ /** a chain that mustn't be reallocated or freed, or have its contents
+ * memmoved, until the chain is un-pinned. */
+#define EVBUFFER_MEM_PINNED_R 0x0010
+#define EVBUFFER_MEM_PINNED_W 0x0020
+#define EVBUFFER_MEM_PINNED_ANY (EVBUFFER_MEM_PINNED_R|EVBUFFER_MEM_PINNED_W)
+ /** a chain that should be freed, but can't be freed until it is
+ * un-pinned. */
+#define EVBUFFER_DANGLING 0x0040
+ /** a chain that is a referenced copy of another chain */
+#define EVBUFFER_MULTICAST 0x0080
+
+ /** number of references to this chain */
+ int refcnt;
+
+ /** Usually points to the read-write memory belonging to this
+ * buffer allocated as part of the evbuffer_chain allocation.
+ * For mmap, this can be a read-only buffer and
+ * EVBUFFER_IMMUTABLE will be set in flags. For sendfile, it
+ * may point to NULL.
+ */
+ unsigned char *buffer;
+};
+
+/** callback for a reference chain; lets us know what to do with it when
+ * we're done with it. Lives at the end of an evbuffer_chain with the
+ * EVBUFFER_REFERENCE flag set */
+struct evbuffer_chain_reference {
+ evbuffer_ref_cleanup_cb cleanupfn;
+ void *extra;
+};
+
+/** File segment for a file-segment chain. Lives at the end of an
+ * evbuffer_chain with the EVBUFFER_FILESEGMENT flag set. */
+struct evbuffer_chain_file_segment {
+ struct evbuffer_file_segment *segment;
+#ifdef _WIN32
+ /** If we're using CreateFileMapping, this is the handle to the view. */
+ HANDLE view_handle;
+#endif
+};
+
+/* Declared in event2/buffer.h; defined here. */
+struct evbuffer_file_segment {
+ void *lock; /**< lock prevent concurrent access to refcnt */
+ int refcnt; /**< Reference count for this file segment */
+ unsigned flags; /**< combination of EVBUF_FS_* flags */
+
+ /** What kind of file segment is this? */
+ unsigned can_sendfile : 1;
+ unsigned is_mapping : 1;
+
+ /** The fd that we read the data from. */
+ int fd;
+ /** If we're using mmap, this is the raw mapped memory. */
+ void *mapping;
+#ifdef _WIN32
+ /** If we're using CreateFileMapping, this is the mapping */
+ HANDLE mapping_handle;
+#endif
+ /** If we're using mmap or IO, this is the content of the file
+ * segment. */
+ char *contents;
+ /** Position of this segment within the file. */
+ ev_off_t file_offset;
+ /** If we're using mmap, this is the offset within 'mapping' where
+ * this data segment begins. */
+ ev_off_t mmap_offset;
+ /** The length of this segment. */
+ ev_off_t length;
+ /** Cleanup callback function */
+ evbuffer_file_segment_cleanup_cb cleanup_cb;
+ /** Argument to be pass to cleanup callback function */
+ void *cleanup_cb_arg;
+};
+
+/** Information about the multicast parent of a chain. Lives at the
+ * end of an evbuffer_chain with the EVBUFFER_MULTICAST flag set. */
+struct evbuffer_multicast_parent {
+ /** source buffer the multicast parent belongs to */
+ struct evbuffer *source;
+ /** multicast parent for this chain */
+ struct evbuffer_chain *parent;
+};
+
+#define EVBUFFER_CHAIN_SIZE sizeof(struct evbuffer_chain)
+/** Return a pointer to extra data allocated along with an evbuffer. */
+#define EVBUFFER_CHAIN_EXTRA(t, c) (t *)((struct evbuffer_chain *)(c) + 1)
+
+/** Assert that we are holding the lock on an evbuffer */
+#define ASSERT_EVBUFFER_LOCKED(buffer) \
+ EVLOCK_ASSERT_LOCKED((buffer)->lock)
+
+#define EVBUFFER_LOCK(buffer) \
+ do { \
+ EVLOCK_LOCK((buffer)->lock, 0); \
+ } while (0)
+#define EVBUFFER_UNLOCK(buffer) \
+ do { \
+ EVLOCK_UNLOCK((buffer)->lock, 0); \
+ } while (0)
+#define EVBUFFER_LOCK2(buffer1, buffer2) \
+ do { \
+ EVLOCK_LOCK2((buffer1)->lock, (buffer2)->lock, 0, 0); \
+ } while (0)
+#define EVBUFFER_UNLOCK2(buffer1, buffer2) \
+ do { \
+ EVLOCK_UNLOCK2((buffer1)->lock, (buffer2)->lock, 0, 0); \
+ } while (0)
+
+/** Increase the reference count of buf by one. */
+void evbuffer_incref_(struct evbuffer *buf);
+/** Increase the reference count of buf by one and acquire the lock. */
+void evbuffer_incref_and_lock_(struct evbuffer *buf);
+/** Pin a single buffer chain using a given flag. A pinned chunk may not be
+ * moved or freed until it is unpinned. */
+void evbuffer_chain_pin_(struct evbuffer_chain *chain, unsigned flag);
+/** Unpin a single buffer chain using a given flag. */
+void evbuffer_chain_unpin_(struct evbuffer_chain *chain, unsigned flag);
+/** As evbuffer_free, but requires that we hold a lock on the buffer, and
+ * releases the lock before freeing it and the buffer. */
+void evbuffer_decref_and_unlock_(struct evbuffer *buffer);
+
+/** As evbuffer_expand, but does not guarantee that the newly allocated memory
+ * is contiguous. Instead, it may be split across two or more chunks. */
+int evbuffer_expand_fast_(struct evbuffer *, size_t, int);
+
+/** Helper: prepares for a readv/WSARecv call by expanding the buffer to
+ * hold enough memory to read 'howmuch' bytes in possibly noncontiguous memory.
+ * Sets up the one or two iovecs in 'vecs' to point to the free memory and its
+ * extent, and *chainp to point to the first chain that we'll try to read into.
+ * Returns the number of vecs used.
+ */
+int evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch,
+ struct evbuffer_iovec *vecs, int n_vecs, struct evbuffer_chain ***chainp,
+ int exact);
+
+/* Helper macro: copies an evbuffer_iovec in ei to a win32 WSABUF in i. */
+#define WSABUF_FROM_EVBUFFER_IOV(i,ei) do { \
+ (i)->buf = (ei)->iov_base; \
+ (i)->len = (unsigned long)(ei)->iov_len; \
+ } while (0)
+/* XXXX the cast above is safe for now, but not if we allow mmaps on win64.
+ * See note in buffer_iocp's launch_write function */
+
+/** Set the parent bufferevent object for buf to bev */
+void evbuffer_set_parent_(struct evbuffer *buf, struct bufferevent *bev);
+
+void evbuffer_invoke_callbacks_(struct evbuffer *buf);
+
+
+int evbuffer_get_callbacks_(struct evbuffer *buffer,
+ struct event_callback **cbs,
+ int max_cbs);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVBUFFER_INTERNAL_H_INCLUDED_ */
diff --git a/libs/libevent/src/evdns.c b/libs/libevent/src/evdns.c
new file mode 100644
index 0000000000..c4112330a9
--- /dev/null
+++ b/libs/libevent/src/evdns.c
@@ -0,0 +1,4761 @@
+/* Copyright 2006-2007 Niels Provos
+ * Copyright 2007-2012 Nick Mathewson and Niels Provos
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Based on software by Adam Langly. Adam's original message:
+ *
+ * Async DNS Library
+ * Adam Langley <agl@imperialviolet.org>
+ * http://www.imperialviolet.org/eventdns.html
+ * Public Domain code
+ *
+ * This software is Public Domain. To view a copy of the public domain dedication,
+ * visit http://creativecommons.org/licenses/publicdomain/ or send a letter to
+ * Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
+ *
+ * I ask and expect, but do not require, that all derivative works contain an
+ * attribution similar to:
+ * Parts developed by Adam Langley <agl@imperialviolet.org>
+ *
+ * You may wish to replace the word "Parts" with something else depending on
+ * the amount of original code.
+ *
+ * (Derivative works does not include programs which link against, run or include
+ * the source verbatim in their source distributions)
+ *
+ * Version: 0.1b
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <sys/types.h>
+
+#ifndef _FORTIFY_SOURCE
+#define _FORTIFY_SOURCE 3
+#endif
+
+#include <string.h>
+#include <fcntl.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef EVENT__HAVE_STDINT_H
+#include <stdint.h>
+#endif
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#include <limits.h>
+#include <sys/stat.h>
+#include <stdio.h>
+#include <stdarg.h>
+#ifdef _WIN32
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#ifndef _WIN32_IE
+#define _WIN32_IE 0x400
+#endif
+#include <shlobj.h>
+#endif
+
+#include "event2/dns.h"
+#include "event2/dns_struct.h"
+#include "event2/dns_compat.h"
+#include "event2/util.h"
+#include "event2/event.h"
+#include "event2/event_struct.h"
+#include "event2/thread.h"
+
+#include "defer-internal.h"
+#include "log-internal.h"
+#include "mm-internal.h"
+#include "strlcpy-internal.h"
+#include "ipv6-internal.h"
+#include "util-internal.h"
+#include "evthread-internal.h"
+#ifdef _WIN32
+#include <ctype.h>
+#include <winsock2.h>
+#include <windows.h>
+#include <iphlpapi.h>
+#include <io.h>
+#else
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#endif
+
+#ifdef EVENT__HAVE_NETINET_IN6_H
+#include <netinet/in6.h>
+#endif
+
+#define EVDNS_LOG_DEBUG EVENT_LOG_DEBUG
+#define EVDNS_LOG_WARN EVENT_LOG_WARN
+#define EVDNS_LOG_MSG EVENT_LOG_MSG
+
+#ifndef HOST_NAME_MAX
+#define HOST_NAME_MAX 255
+#endif
+
+#include <stdio.h>
+
+#undef MIN
+#define MIN(a,b) ((a)<(b)?(a):(b))
+
+#define ASSERT_VALID_REQUEST(req) \
+ EVUTIL_ASSERT((req)->handle && (req)->handle->current_req == (req))
+
+#define u64 ev_uint64_t
+#define u32 ev_uint32_t
+#define u16 ev_uint16_t
+#define u8 ev_uint8_t
+
+/* maximum number of addresses from a single packet */
+/* that we bother recording */
+#define MAX_V4_ADDRS 32
+#define MAX_V6_ADDRS 32
+
+
+#define TYPE_A EVDNS_TYPE_A
+#define TYPE_CNAME 5
+#define TYPE_PTR EVDNS_TYPE_PTR
+#define TYPE_SOA EVDNS_TYPE_SOA
+#define TYPE_AAAA EVDNS_TYPE_AAAA
+
+#define CLASS_INET EVDNS_CLASS_INET
+
+/* Persistent handle. We keep this separate from 'struct request' since we
+ * need some object to last for as long as an evdns_request is outstanding so
+ * that it can be canceled, whereas a search request can lead to multiple
+ * 'struct request' instances being created over its lifetime. */
+struct evdns_request {
+ struct request *current_req;
+ struct evdns_base *base;
+
+ int pending_cb; /* Waiting for its callback to be invoked; not
+ * owned by event base any more. */
+
+ /* elements used by the searching code */
+ int search_index;
+ struct search_state *search_state;
+ char *search_origname; /* needs to be free()ed */
+ int search_flags;
+};
+
+struct request {
+ u8 *request; /* the dns packet data */
+ u8 request_type; /* TYPE_PTR or TYPE_A or TYPE_AAAA */
+ unsigned int request_len;
+ int reissue_count;
+ int tx_count; /* the number of times that this packet has been sent */
+ void *user_pointer; /* the pointer given to us for this request */
+ evdns_callback_type user_callback;
+ struct nameserver *ns; /* the server which we last sent it */
+
+ /* these objects are kept in a circular list */
+ /* XXX We could turn this into a CIRCLEQ. */
+ struct request *next, *prev;
+
+ struct event timeout_event;
+
+ u16 trans_id; /* the transaction id */
+ unsigned request_appended :1; /* true if the request pointer is data which follows this struct */
+ unsigned transmit_me :1; /* needs to be transmitted */
+
+ /* XXXX This is a horrible hack. */
+ char **put_cname_in_ptr; /* store the cname here if we get one. */
+
+ struct evdns_base *base;
+
+ struct evdns_request *handle;
+};
+
+struct reply {
+ unsigned int type;
+ unsigned int have_answer : 1;
+ union {
+ struct {
+ u32 addrcount;
+ u32 addresses[MAX_V4_ADDRS];
+ } a;
+ struct {
+ u32 addrcount;
+ struct in6_addr addresses[MAX_V6_ADDRS];
+ } aaaa;
+ struct {
+ char name[HOST_NAME_MAX];
+ } ptr;
+ } data;
+};
+
+struct nameserver {
+ evutil_socket_t socket; /* a connected UDP socket */
+ struct sockaddr_storage address;
+ ev_socklen_t addrlen;
+ int failed_times; /* number of times which we have given this server a chance */
+ int timedout; /* number of times in a row a request has timed out */
+ struct event event;
+ /* these objects are kept in a circular list */
+ struct nameserver *next, *prev;
+ struct event timeout_event; /* used to keep the timeout for */
+ /* when we next probe this server. */
+ /* Valid if state == 0 */
+ /* Outstanding probe request for this nameserver, if any */
+ struct evdns_request *probe_request;
+ char state; /* zero if we think that this server is down */
+ char choked; /* true if we have an EAGAIN from this server's socket */
+ char write_waiting; /* true if we are waiting for EV_WRITE events */
+ struct evdns_base *base;
+
+ /* Number of currently inflight requests: used
+ * to track when we should add/del the event. */
+ int requests_inflight;
+};
+
+
+/* Represents a local port where we're listening for DNS requests. Right now, */
+/* only UDP is supported. */
+struct evdns_server_port {
+ evutil_socket_t socket; /* socket we use to read queries and write replies. */
+ int refcnt; /* reference count. */
+ char choked; /* Are we currently blocked from writing? */
+ char closing; /* Are we trying to close this port, pending writes? */
+ evdns_request_callback_fn_type user_callback; /* Fn to handle requests */
+ void *user_data; /* Opaque pointer passed to user_callback */
+ struct event event; /* Read/write event */
+ /* circular list of replies that we want to write. */
+ struct server_request *pending_replies;
+ struct event_base *event_base;
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ void *lock;
+#endif
+};
+
+/* Represents part of a reply being built. (That is, a single RR.) */
+struct server_reply_item {
+ struct server_reply_item *next; /* next item in sequence. */
+ char *name; /* name part of the RR */
+ u16 type; /* The RR type */
+ u16 class; /* The RR class (usually CLASS_INET) */
+ u32 ttl; /* The RR TTL */
+ char is_name; /* True iff data is a label */
+ u16 datalen; /* Length of data; -1 if data is a label */
+ void *data; /* The contents of the RR */
+};
+
+/* Represents a request that we've received as a DNS server, and holds */
+/* the components of the reply as we're constructing it. */
+struct server_request {
+ /* Pointers to the next and previous entries on the list of replies */
+ /* that we're waiting to write. Only set if we have tried to respond */
+ /* and gotten EAGAIN. */
+ struct server_request *next_pending;
+ struct server_request *prev_pending;
+
+ u16 trans_id; /* Transaction id. */
+ struct evdns_server_port *port; /* Which port received this request on? */
+ struct sockaddr_storage addr; /* Where to send the response */
+ ev_socklen_t addrlen; /* length of addr */
+
+ int n_answer; /* how many answer RRs have been set? */
+ int n_authority; /* how many authority RRs have been set? */
+ int n_additional; /* how many additional RRs have been set? */
+
+ struct server_reply_item *answer; /* linked list of answer RRs */
+ struct server_reply_item *authority; /* linked list of authority RRs */
+ struct server_reply_item *additional; /* linked list of additional RRs */
+
+ /* Constructed response. Only set once we're ready to send a reply. */
+ /* Once this is set, the RR fields are cleared, and no more should be set. */
+ char *response;
+ size_t response_len;
+
+ /* Caller-visible fields: flags, questions. */
+ struct evdns_server_request base;
+};
+
+struct evdns_base {
+ /* An array of n_req_heads circular lists for inflight requests.
+ * Each inflight request req is in req_heads[req->trans_id % n_req_heads].
+ */
+ struct request **req_heads;
+ /* A circular list of requests that we're waiting to send, but haven't
+ * sent yet because there are too many requests inflight */
+ struct request *req_waiting_head;
+ /* A circular list of nameservers. */
+ struct nameserver *server_head;
+ int n_req_heads;
+
+ struct event_base *event_base;
+
+ /* The number of good nameservers that we have */
+ int global_good_nameservers;
+
+ /* inflight requests are contained in the req_head list */
+ /* and are actually going out across the network */
+ int global_requests_inflight;
+ /* requests which aren't inflight are in the waiting list */
+ /* and are counted here */
+ int global_requests_waiting;
+
+ int global_max_requests_inflight;
+
+ struct timeval global_timeout; /* 5 seconds by default */
+ int global_max_reissues; /* a reissue occurs when we get some errors from the server */
+ int global_max_retransmits; /* number of times we'll retransmit a request which timed out */
+ /* number of timeouts in a row before we consider this server to be down */
+ int global_max_nameserver_timeout;
+ /* true iff we will use the 0x20 hack to prevent poisoning attacks. */
+ int global_randomize_case;
+
+ /* The first time that a nameserver fails, how long do we wait before
+ * probing to see if it has returned? */
+ struct timeval global_nameserver_probe_initial_timeout;
+
+ /** Port to bind to for outgoing DNS packets. */
+ struct sockaddr_storage global_outgoing_address;
+ /** ev_socklen_t for global_outgoing_address. 0 if it isn't set. */
+ ev_socklen_t global_outgoing_addrlen;
+
+ struct timeval global_getaddrinfo_allow_skew;
+
+ int getaddrinfo_ipv4_timeouts;
+ int getaddrinfo_ipv6_timeouts;
+ int getaddrinfo_ipv4_answered;
+ int getaddrinfo_ipv6_answered;
+
+ struct search_state *global_search_state;
+
+ TAILQ_HEAD(hosts_list, hosts_entry) hostsdb;
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ void *lock;
+#endif
+
+ int disable_when_inactive;
+};
+
+struct hosts_entry {
+ TAILQ_ENTRY(hosts_entry) next;
+ union {
+ struct sockaddr sa;
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+ } addr;
+ int addrlen;
+ char hostname[1];
+};
+
+static struct evdns_base *current_base = NULL;
+
+struct evdns_base *
+evdns_get_global_base(void)
+{
+ return current_base;
+}
+
+/* Given a pointer to an evdns_server_request, get the corresponding */
+/* server_request. */
+#define TO_SERVER_REQUEST(base_ptr) \
+ ((struct server_request*) \
+ (((char*)(base_ptr) - evutil_offsetof(struct server_request, base))))
+
+#define REQ_HEAD(base, id) ((base)->req_heads[id % (base)->n_req_heads])
+
+static struct nameserver *nameserver_pick(struct evdns_base *base);
+static void evdns_request_insert(struct request *req, struct request **head);
+static void evdns_request_remove(struct request *req, struct request **head);
+static void nameserver_ready_callback(evutil_socket_t fd, short events, void *arg);
+static int evdns_transmit(struct evdns_base *base);
+static int evdns_request_transmit(struct request *req);
+static void nameserver_send_probe(struct nameserver *const ns);
+static void search_request_finished(struct evdns_request *const);
+static int search_try_next(struct evdns_request *const req);
+static struct request *search_request_new(struct evdns_base *base, struct evdns_request *handle, int type, const char *const name, int flags, evdns_callback_type user_callback, void *user_arg);
+static void evdns_requests_pump_waiting_queue(struct evdns_base *base);
+static u16 transaction_id_pick(struct evdns_base *base);
+static struct request *request_new(struct evdns_base *base, struct evdns_request *handle, int type, const char *name, int flags, evdns_callback_type callback, void *ptr);
+static void request_submit(struct request *const req);
+
+static int server_request_free(struct server_request *req);
+static void server_request_free_answers(struct server_request *req);
+static void server_port_free(struct evdns_server_port *port);
+static void server_port_ready_callback(evutil_socket_t fd, short events, void *arg);
+static int evdns_base_resolv_conf_parse_impl(struct evdns_base *base, int flags, const char *const filename);
+static int evdns_base_set_option_impl(struct evdns_base *base,
+ const char *option, const char *val, int flags);
+static void evdns_base_free_and_unlock(struct evdns_base *base, int fail_requests);
+static void evdns_request_timeout_callback(evutil_socket_t fd, short events, void *arg);
+
+static int strtoint(const char *const str);
+
+#ifdef EVENT__DISABLE_THREAD_SUPPORT
+#define EVDNS_LOCK(base) EVUTIL_NIL_STMT_
+#define EVDNS_UNLOCK(base) EVUTIL_NIL_STMT_
+#define ASSERT_LOCKED(base) EVUTIL_NIL_STMT_
+#else
+#define EVDNS_LOCK(base) \
+ EVLOCK_LOCK((base)->lock, 0)
+#define EVDNS_UNLOCK(base) \
+ EVLOCK_UNLOCK((base)->lock, 0)
+#define ASSERT_LOCKED(base) \
+ EVLOCK_ASSERT_LOCKED((base)->lock)
+#endif
+
+static evdns_debug_log_fn_type evdns_log_fn = NULL;
+
+void
+evdns_set_log_fn(evdns_debug_log_fn_type fn)
+{
+ evdns_log_fn = fn;
+}
+
+#ifdef __GNUC__
+#define EVDNS_LOG_CHECK __attribute__ ((format(printf, 2, 3)))
+#else
+#define EVDNS_LOG_CHECK
+#endif
+
+static void evdns_log_(int severity, const char *fmt, ...) EVDNS_LOG_CHECK;
+static void
+evdns_log_(int severity, const char *fmt, ...)
+{
+ va_list args;
+ va_start(args,fmt);
+ if (evdns_log_fn) {
+ char buf[512];
+ int is_warn = (severity == EVDNS_LOG_WARN);
+ evutil_vsnprintf(buf, sizeof(buf), fmt, args);
+ evdns_log_fn(is_warn, buf);
+ } else {
+ event_logv_(severity, NULL, fmt, args);
+ }
+ va_end(args);
+}
+
+#define log evdns_log_
+
+/* This walks the list of inflight requests to find the */
+/* one with a matching transaction id. Returns NULL on */
+/* failure */
+static struct request *
+request_find_from_trans_id(struct evdns_base *base, u16 trans_id) {
+ struct request *req = REQ_HEAD(base, trans_id);
+ struct request *const started_at = req;
+
+ ASSERT_LOCKED(base);
+
+ if (req) {
+ do {
+ if (req->trans_id == trans_id) return req;
+ req = req->next;
+ } while (req != started_at);
+ }
+
+ return NULL;
+}
+
+/* a libevent callback function which is called when a nameserver */
+/* has gone down and we want to test if it has came back to life yet */
+static void
+nameserver_prod_callback(evutil_socket_t fd, short events, void *arg) {
+ struct nameserver *const ns = (struct nameserver *) arg;
+ (void)fd;
+ (void)events;
+
+ EVDNS_LOCK(ns->base);
+ nameserver_send_probe(ns);
+ EVDNS_UNLOCK(ns->base);
+}
+
+/* a libevent callback which is called when a nameserver probe (to see if */
+/* it has come back to life) times out. We increment the count of failed_times */
+/* and wait longer to send the next probe packet. */
+static void
+nameserver_probe_failed(struct nameserver *const ns) {
+ struct timeval timeout;
+ int i;
+
+ ASSERT_LOCKED(ns->base);
+ (void) evtimer_del(&ns->timeout_event);
+ if (ns->state == 1) {
+ /* This can happen if the nameserver acts in a way which makes us mark */
+ /* it as bad and then starts sending good replies. */
+ return;
+ }
+
+#define MAX_PROBE_TIMEOUT 3600
+#define TIMEOUT_BACKOFF_FACTOR 3
+
+ memcpy(&timeout, &ns->base->global_nameserver_probe_initial_timeout,
+ sizeof(struct timeval));
+ for (i=ns->failed_times; i > 0 && timeout.tv_sec < MAX_PROBE_TIMEOUT; --i) {
+ timeout.tv_sec *= TIMEOUT_BACKOFF_FACTOR;
+ timeout.tv_usec *= TIMEOUT_BACKOFF_FACTOR;
+ if (timeout.tv_usec > 1000000) {
+ timeout.tv_sec += timeout.tv_usec / 1000000;
+ timeout.tv_usec %= 1000000;
+ }
+ }
+ if (timeout.tv_sec > MAX_PROBE_TIMEOUT) {
+ timeout.tv_sec = MAX_PROBE_TIMEOUT;
+ timeout.tv_usec = 0;
+ }
+
+ ns->failed_times++;
+
+ if (evtimer_add(&ns->timeout_event, &timeout) < 0) {
+ char addrbuf[128];
+ log(EVDNS_LOG_WARN,
+ "Error from libevent when adding timer event for %s",
+ evutil_format_sockaddr_port_(
+ (struct sockaddr *)&ns->address,
+ addrbuf, sizeof(addrbuf)));
+ }
+}
+
+static void
+request_swap_ns(struct request *req, struct nameserver *ns) {
+ if (ns && req->ns != ns) {
+ EVUTIL_ASSERT(req->ns->requests_inflight > 0);
+ req->ns->requests_inflight--;
+ ns->requests_inflight++;
+
+ req->ns = ns;
+ }
+}
+
+/* called when a nameserver has been deemed to have failed. For example, too */
+/* many packets have timed out etc */
+static void
+nameserver_failed(struct nameserver *const ns, const char *msg) {
+ struct request *req, *started_at;
+ struct evdns_base *base = ns->base;
+ int i;
+ char addrbuf[128];
+
+ ASSERT_LOCKED(base);
+ /* if this nameserver has already been marked as failed */
+ /* then don't do anything */
+ if (!ns->state) return;
+
+ log(EVDNS_LOG_MSG, "Nameserver %s has failed: %s",
+ evutil_format_sockaddr_port_(
+ (struct sockaddr *)&ns->address,
+ addrbuf, sizeof(addrbuf)),
+ msg);
+
+ base->global_good_nameservers--;
+ EVUTIL_ASSERT(base->global_good_nameservers >= 0);
+ if (base->global_good_nameservers == 0) {
+ log(EVDNS_LOG_MSG, "All nameservers have failed");
+ }
+
+ ns->state = 0;
+ ns->failed_times = 1;
+
+ if (evtimer_add(&ns->timeout_event,
+ &base->global_nameserver_probe_initial_timeout) < 0) {
+ log(EVDNS_LOG_WARN,
+ "Error from libevent when adding timer event for %s",
+ evutil_format_sockaddr_port_(
+ (struct sockaddr *)&ns->address,
+ addrbuf, sizeof(addrbuf)));
+ /* ???? Do more? */
+ }
+
+ /* walk the list of inflight requests to see if any can be reassigned to */
+ /* a different server. Requests in the waiting queue don't have a */
+ /* nameserver assigned yet */
+
+ /* if we don't have *any* good nameservers then there's no point */
+ /* trying to reassign requests to one */
+ if (!base->global_good_nameservers) return;
+
+ for (i = 0; i < base->n_req_heads; ++i) {
+ req = started_at = base->req_heads[i];
+ if (req) {
+ do {
+ if (req->tx_count == 0 && req->ns == ns) {
+ /* still waiting to go out, can be moved */
+ /* to another server */
+ request_swap_ns(req, nameserver_pick(base));
+ }
+ req = req->next;
+ } while (req != started_at);
+ }
+ }
+}
+
+static void
+nameserver_up(struct nameserver *const ns)
+{
+ char addrbuf[128];
+ ASSERT_LOCKED(ns->base);
+ if (ns->state) return;
+ log(EVDNS_LOG_MSG, "Nameserver %s is back up",
+ evutil_format_sockaddr_port_(
+ (struct sockaddr *)&ns->address,
+ addrbuf, sizeof(addrbuf)));
+ evtimer_del(&ns->timeout_event);
+ if (ns->probe_request) {
+ evdns_cancel_request(ns->base, ns->probe_request);
+ ns->probe_request = NULL;
+ }
+ ns->state = 1;
+ ns->failed_times = 0;
+ ns->timedout = 0;
+ ns->base->global_good_nameservers++;
+}
+
+static void
+request_trans_id_set(struct request *const req, const u16 trans_id) {
+ req->trans_id = trans_id;
+ *((u16 *) req->request) = htons(trans_id);
+}
+
+/* Called to remove a request from a list and dealloc it. */
+/* head is a pointer to the head of the list it should be */
+/* removed from or NULL if the request isn't in a list. */
+/* when free_handle is one, free the handle as well. */
+static void
+request_finished(struct request *const req, struct request **head, int free_handle) {
+ struct evdns_base *base = req->base;
+ int was_inflight = (head != &base->req_waiting_head);
+ EVDNS_LOCK(base);
+ ASSERT_VALID_REQUEST(req);
+
+ if (head)
+ evdns_request_remove(req, head);
+
+ log(EVDNS_LOG_DEBUG, "Removing timeout for request %p", req);
+ if (was_inflight) {
+ evtimer_del(&req->timeout_event);
+ base->global_requests_inflight--;
+ req->ns->requests_inflight--;
+ } else {
+ base->global_requests_waiting--;
+ }
+ /* it was initialized during request_new / evtimer_assign */
+ event_debug_unassign(&req->timeout_event);
+
+ if (req->ns &&
+ req->ns->requests_inflight == 0 &&
+ req->base->disable_when_inactive) {
+ event_del(&req->ns->event);
+ evtimer_del(&req->ns->timeout_event);
+ }
+
+ if (!req->request_appended) {
+ /* need to free the request data on it's own */
+ mm_free(req->request);
+ } else {
+ /* the request data is appended onto the header */
+ /* so everything gets free()ed when we: */
+ }
+
+ if (req->handle) {
+ EVUTIL_ASSERT(req->handle->current_req == req);
+
+ if (free_handle) {
+ search_request_finished(req->handle);
+ req->handle->current_req = NULL;
+ if (! req->handle->pending_cb) {
+ /* If we're planning to run the callback,
+ * don't free the handle until later. */
+ mm_free(req->handle);
+ }
+ req->handle = NULL; /* If we have a bug, let's crash
+ * early */
+ } else {
+ req->handle->current_req = NULL;
+ }
+ }
+
+ mm_free(req);
+
+ evdns_requests_pump_waiting_queue(base);
+ EVDNS_UNLOCK(base);
+}
+
+/* This is called when a server returns a funny error code. */
+/* We try the request again with another server. */
+/* */
+/* return: */
+/* 0 ok */
+/* 1 failed/reissue is pointless */
+static int
+request_reissue(struct request *req) {
+ const struct nameserver *const last_ns = req->ns;
+ ASSERT_LOCKED(req->base);
+ ASSERT_VALID_REQUEST(req);
+ /* the last nameserver should have been marked as failing */
+ /* by the caller of this function, therefore pick will try */
+ /* not to return it */
+ request_swap_ns(req, nameserver_pick(req->base));
+ if (req->ns == last_ns) {
+ /* ... but pick did return it */
+ /* not a lot of point in trying again with the */
+ /* same server */
+ return 1;
+ }
+
+ req->reissue_count++;
+ req->tx_count = 0;
+ req->transmit_me = 1;
+
+ return 0;
+}
+
+/* this function looks for space on the inflight queue and promotes */
+/* requests from the waiting queue if it can. */
+/* */
+/* TODO: */
+/* add return code, see at nameserver_pick() and other functions. */
+static void
+evdns_requests_pump_waiting_queue(struct evdns_base *base) {
+ ASSERT_LOCKED(base);
+ while (base->global_requests_inflight < base->global_max_requests_inflight &&
+ base->global_requests_waiting) {
+ struct request *req;
+
+ EVUTIL_ASSERT(base->req_waiting_head);
+ req = base->req_waiting_head;
+
+ req->ns = nameserver_pick(base);
+ if (!req->ns)
+ return;
+
+ /* move a request from the waiting queue to the inflight queue */
+ req->ns->requests_inflight++;
+
+ evdns_request_remove(req, &base->req_waiting_head);
+
+ base->global_requests_waiting--;
+ base->global_requests_inflight++;
+
+ request_trans_id_set(req, transaction_id_pick(base));
+
+ evdns_request_insert(req, &REQ_HEAD(base, req->trans_id));
+ evdns_request_transmit(req);
+ evdns_transmit(base);
+ }
+}
+
+/* TODO(nickm) document */
+struct deferred_reply_callback {
+ struct event_callback deferred;
+ struct evdns_request *handle;
+ u8 request_type;
+ u8 have_reply;
+ u32 ttl;
+ u32 err;
+ evdns_callback_type user_callback;
+ struct reply reply;
+};
+
+static void
+reply_run_callback(struct event_callback *d, void *user_pointer)
+{
+ struct deferred_reply_callback *cb =
+ EVUTIL_UPCAST(d, struct deferred_reply_callback, deferred);
+
+ switch (cb->request_type) {
+ case TYPE_A:
+ if (cb->have_reply)
+ cb->user_callback(DNS_ERR_NONE, DNS_IPv4_A,
+ cb->reply.data.a.addrcount, cb->ttl,
+ cb->reply.data.a.addresses,
+ user_pointer);
+ else
+ cb->user_callback(cb->err, 0, 0, cb->ttl, NULL, user_pointer);
+ break;
+ case TYPE_PTR:
+ if (cb->have_reply) {
+ char *name = cb->reply.data.ptr.name;
+ cb->user_callback(DNS_ERR_NONE, DNS_PTR, 1, cb->ttl,
+ &name, user_pointer);
+ } else {
+ cb->user_callback(cb->err, 0, 0, cb->ttl, NULL, user_pointer);
+ }
+ break;
+ case TYPE_AAAA:
+ if (cb->have_reply)
+ cb->user_callback(DNS_ERR_NONE, DNS_IPv6_AAAA,
+ cb->reply.data.aaaa.addrcount, cb->ttl,
+ cb->reply.data.aaaa.addresses,
+ user_pointer);
+ else
+ cb->user_callback(cb->err, 0, 0, cb->ttl, NULL, user_pointer);
+ break;
+ default:
+ EVUTIL_ASSERT(0);
+ }
+
+ if (cb->handle && cb->handle->pending_cb) {
+ mm_free(cb->handle);
+ }
+
+ mm_free(cb);
+}
+
+static void
+reply_schedule_callback(struct request *const req, u32 ttl, u32 err, struct reply *reply)
+{
+ struct deferred_reply_callback *d = mm_calloc(1, sizeof(*d));
+
+ if (!d) {
+ event_warn("%s: Couldn't allocate space for deferred callback.",
+ __func__);
+ return;
+ }
+
+ ASSERT_LOCKED(req->base);
+
+ d->request_type = req->request_type;
+ d->user_callback = req->user_callback;
+ d->ttl = ttl;
+ d->err = err;
+ if (reply) {
+ d->have_reply = 1;
+ memcpy(&d->reply, reply, sizeof(struct reply));
+ }
+
+ if (req->handle) {
+ req->handle->pending_cb = 1;
+ d->handle = req->handle;
+ }
+
+ event_deferred_cb_init_(
+ &d->deferred,
+ event_get_priority(&req->timeout_event),
+ reply_run_callback,
+ req->user_pointer);
+ event_deferred_cb_schedule_(
+ req->base->event_base,
+ &d->deferred);
+}
+
+/* this processes a parsed reply packet */
+static void
+reply_handle(struct request *const req, u16 flags, u32 ttl, struct reply *reply) {
+ int error;
+ char addrbuf[128];
+ static const int error_codes[] = {
+ DNS_ERR_FORMAT, DNS_ERR_SERVERFAILED, DNS_ERR_NOTEXIST,
+ DNS_ERR_NOTIMPL, DNS_ERR_REFUSED
+ };
+
+ ASSERT_LOCKED(req->base);
+ ASSERT_VALID_REQUEST(req);
+
+ if (flags & 0x020f || !reply || !reply->have_answer) {
+ /* there was an error */
+ if (flags & 0x0200) {
+ error = DNS_ERR_TRUNCATED;
+ } else if (flags & 0x000f) {
+ u16 error_code = (flags & 0x000f) - 1;
+ if (error_code > 4) {
+ error = DNS_ERR_UNKNOWN;
+ } else {
+ error = error_codes[error_code];
+ }
+ } else if (reply && !reply->have_answer) {
+ error = DNS_ERR_NODATA;
+ } else {
+ error = DNS_ERR_UNKNOWN;
+ }
+
+ switch (error) {
+ case DNS_ERR_NOTIMPL:
+ case DNS_ERR_REFUSED:
+ /* we regard these errors as marking a bad nameserver */
+ if (req->reissue_count < req->base->global_max_reissues) {
+ char msg[64];
+ evutil_snprintf(msg, sizeof(msg), "Bad response %d (%s)",
+ error, evdns_err_to_string(error));
+ nameserver_failed(req->ns, msg);
+ if (!request_reissue(req)) return;
+ }
+ break;
+ case DNS_ERR_SERVERFAILED:
+ /* rcode 2 (servfailed) sometimes means "we
+ * are broken" and sometimes (with some binds)
+ * means "that request was very confusing."
+ * Treat this as a timeout, not a failure.
+ */
+ log(EVDNS_LOG_DEBUG, "Got a SERVERFAILED from nameserver"
+ "at %s; will allow the request to time out.",
+ evutil_format_sockaddr_port_(
+ (struct sockaddr *)&req->ns->address,
+ addrbuf, sizeof(addrbuf)));
+ /* Call the timeout function */
+ evdns_request_timeout_callback(0, 0, req);
+ return;
+ default:
+ /* we got a good reply from the nameserver: it is up. */
+ if (req->handle == req->ns->probe_request) {
+ /* Avoid double-free */
+ req->ns->probe_request = NULL;
+ }
+
+ nameserver_up(req->ns);
+ }
+
+ if (req->handle->search_state &&
+ req->request_type != TYPE_PTR) {
+ /* if we have a list of domains to search in,
+ * try the next one */
+ if (!search_try_next(req->handle)) {
+ /* a new request was issued so this
+ * request is finished and */
+ /* the user callback will be made when
+ * that request (or a */
+ /* child of it) finishes. */
+ return;
+ }
+ }
+
+ /* all else failed. Pass the failure up */
+ reply_schedule_callback(req, ttl, error, NULL);
+ request_finished(req, &REQ_HEAD(req->base, req->trans_id), 1);
+ } else {
+ /* all ok, tell the user */
+ reply_schedule_callback(req, ttl, 0, reply);
+ if (req->handle == req->ns->probe_request)
+ req->ns->probe_request = NULL; /* Avoid double-free */
+ nameserver_up(req->ns);
+ request_finished(req, &REQ_HEAD(req->base, req->trans_id), 1);
+ }
+}
+
+static int
+name_parse(u8 *packet, int length, int *idx, char *name_out, int name_out_len) {
+ int name_end = -1;
+ int j = *idx;
+ int ptr_count = 0;
+#define GET32(x) do { if (j + 4 > length) goto err; memcpy(&t32_, packet + j, 4); j += 4; x = ntohl(t32_); } while (0)
+#define GET16(x) do { if (j + 2 > length) goto err; memcpy(&t_, packet + j, 2); j += 2; x = ntohs(t_); } while (0)
+#define GET8(x) do { if (j >= length) goto err; x = packet[j++]; } while (0)
+
+ char *cp = name_out;
+ const char *const end = name_out + name_out_len;
+
+ /* Normally, names are a series of length prefixed strings terminated */
+ /* with a length of 0 (the lengths are u8's < 63). */
+ /* However, the length can start with a pair of 1 bits and that */
+ /* means that the next 14 bits are a pointer within the current */
+ /* packet. */
+
+ for (;;) {
+ u8 label_len;
+ GET8(label_len);
+ if (!label_len) break;
+ if (label_len & 0xc0) {
+ u8 ptr_low;
+ GET8(ptr_low);
+ if (name_end < 0) name_end = j;
+ j = (((int)label_len & 0x3f) << 8) + ptr_low;
+ /* Make sure that the target offset is in-bounds. */
+ if (j < 0 || j >= length) return -1;
+ /* If we've jumped more times than there are characters in the
+ * message, we must have a loop. */
+ if (++ptr_count > length) return -1;
+ continue;
+ }
+ if (label_len > 63) return -1;
+ if (cp != name_out) {
+ if (cp + 1 >= end) return -1;
+ *cp++ = '.';
+ }
+ if (cp + label_len >= end) return -1;
+ if (j + label_len > length) return -1;
+ memcpy(cp, packet + j, label_len);
+ cp += label_len;
+ j += label_len;
+ }
+ if (cp >= end) return -1;
+ *cp = '\0';
+ if (name_end < 0)
+ *idx = j;
+ else
+ *idx = name_end;
+ return 0;
+ err:
+ return -1;
+}
+
+/* parses a raw request from a nameserver */
+static int
+reply_parse(struct evdns_base *base, u8 *packet, int length) {
+ int j = 0, k = 0; /* index into packet */
+ u16 t_; /* used by the macros */
+ u32 t32_; /* used by the macros */
+ char tmp_name[256], cmp_name[256]; /* used by the macros */
+ int name_matches = 0;
+
+ u16 trans_id, questions, answers, authority, additional, datalength;
+ u16 flags = 0;
+ u32 ttl, ttl_r = 0xffffffff;
+ struct reply reply;
+ struct request *req = NULL;
+ unsigned int i;
+
+ ASSERT_LOCKED(base);
+
+ GET16(trans_id);
+ GET16(flags);
+ GET16(questions);
+ GET16(answers);
+ GET16(authority);
+ GET16(additional);
+ (void) authority; /* suppress "unused variable" warnings. */
+ (void) additional; /* suppress "unused variable" warnings. */
+
+ req = request_find_from_trans_id(base, trans_id);
+ if (!req) return -1;
+ EVUTIL_ASSERT(req->base == base);
+
+ memset(&reply, 0, sizeof(reply));
+
+ /* If it's not an answer, it doesn't correspond to any request. */
+ if (!(flags & 0x8000)) return -1; /* must be an answer */
+ if ((flags & 0x020f) && (flags & 0x020f) != DNS_ERR_NOTEXIST) {
+ /* there was an error and it's not NXDOMAIN */
+ goto err;
+ }
+ /* if (!answers) return; */ /* must have an answer of some form */
+
+ /* This macro skips a name in the DNS reply. */
+#define SKIP_NAME \
+ do { tmp_name[0] = '\0'; \
+ if (name_parse(packet, length, &j, tmp_name, \
+ sizeof(tmp_name))<0) \
+ goto err; \
+ } while (0)
+
+ reply.type = req->request_type;
+
+ /* skip over each question in the reply */
+ for (i = 0; i < questions; ++i) {
+ /* the question looks like
+ * <label:name><u16:type><u16:class>
+ */
+ tmp_name[0] = '\0';
+ cmp_name[0] = '\0';
+ k = j;
+ if (name_parse(packet, length, &j, tmp_name, sizeof(tmp_name)) < 0)
+ goto err;
+ if (name_parse(req->request, req->request_len, &k,
+ cmp_name, sizeof(cmp_name))<0)
+ goto err;
+ if (!base->global_randomize_case) {
+ if (strcmp(tmp_name, cmp_name) == 0)
+ name_matches = 1;
+ } else {
+ if (evutil_ascii_strcasecmp(tmp_name, cmp_name) == 0)
+ name_matches = 1;
+ }
+
+ j += 4;
+ if (j > length)
+ goto err;
+ }
+
+ if (!name_matches)
+ goto err;
+
+ /* now we have the answer section which looks like
+ * <label:name><u16:type><u16:class><u32:ttl><u16:len><data...>
+ */
+
+ for (i = 0; i < answers; ++i) {
+ u16 type, class;
+
+ SKIP_NAME;
+ GET16(type);
+ GET16(class);
+ GET32(ttl);
+ GET16(datalength);
+
+ if (type == TYPE_A && class == CLASS_INET) {
+ int addrcount, addrtocopy;
+ if (req->request_type != TYPE_A) {
+ j += datalength; continue;
+ }
+ if ((datalength & 3) != 0) /* not an even number of As. */
+ goto err;
+ addrcount = datalength >> 2;
+ addrtocopy = MIN(MAX_V4_ADDRS - reply.data.a.addrcount, (unsigned)addrcount);
+
+ ttl_r = MIN(ttl_r, ttl);
+ /* we only bother with the first four addresses. */
+ if (j + 4*addrtocopy > length) goto err;
+ memcpy(&reply.data.a.addresses[reply.data.a.addrcount],
+ packet + j, 4*addrtocopy);
+ j += 4*addrtocopy;
+ reply.data.a.addrcount += addrtocopy;
+ reply.have_answer = 1;
+ if (reply.data.a.addrcount == MAX_V4_ADDRS) break;
+ } else if (type == TYPE_PTR && class == CLASS_INET) {
+ if (req->request_type != TYPE_PTR) {
+ j += datalength; continue;
+ }
+ if (name_parse(packet, length, &j, reply.data.ptr.name,
+ sizeof(reply.data.ptr.name))<0)
+ goto err;
+ ttl_r = MIN(ttl_r, ttl);
+ reply.have_answer = 1;
+ break;
+ } else if (type == TYPE_CNAME) {
+ char cname[HOST_NAME_MAX];
+ if (!req->put_cname_in_ptr || *req->put_cname_in_ptr) {
+ j += datalength; continue;
+ }
+ if (name_parse(packet, length, &j, cname,
+ sizeof(cname))<0)
+ goto err;
+ *req->put_cname_in_ptr = mm_strdup(cname);
+ } else if (type == TYPE_AAAA && class == CLASS_INET) {
+ int addrcount, addrtocopy;
+ if (req->request_type != TYPE_AAAA) {
+ j += datalength; continue;
+ }
+ if ((datalength & 15) != 0) /* not an even number of AAAAs. */
+ goto err;
+ addrcount = datalength >> 4; /* each address is 16 bytes long */
+ addrtocopy = MIN(MAX_V6_ADDRS - reply.data.aaaa.addrcount, (unsigned)addrcount);
+ ttl_r = MIN(ttl_r, ttl);
+
+ /* we only bother with the first four addresses. */
+ if (j + 16*addrtocopy > length) goto err;
+ memcpy(&reply.data.aaaa.addresses[reply.data.aaaa.addrcount],
+ packet + j, 16*addrtocopy);
+ reply.data.aaaa.addrcount += addrtocopy;
+ j += 16*addrtocopy;
+ reply.have_answer = 1;
+ if (reply.data.aaaa.addrcount == MAX_V6_ADDRS) break;
+ } else {
+ /* skip over any other type of resource */
+ j += datalength;
+ }
+ }
+
+ if (!reply.have_answer) {
+ for (i = 0; i < authority; ++i) {
+ u16 type, class;
+ SKIP_NAME;
+ GET16(type);
+ GET16(class);
+ GET32(ttl);
+ GET16(datalength);
+ if (type == TYPE_SOA && class == CLASS_INET) {
+ u32 serial, refresh, retry, expire, minimum;
+ SKIP_NAME;
+ SKIP_NAME;
+ GET32(serial);
+ GET32(refresh);
+ GET32(retry);
+ GET32(expire);
+ GET32(minimum);
+ (void)expire;
+ (void)retry;
+ (void)refresh;
+ (void)serial;
+ ttl_r = MIN(ttl_r, ttl);
+ ttl_r = MIN(ttl_r, minimum);
+ } else {
+ /* skip over any other type of resource */
+ j += datalength;
+ }
+ }
+ }
+
+ if (ttl_r == 0xffffffff)
+ ttl_r = 0;
+
+ reply_handle(req, flags, ttl_r, &reply);
+ return 0;
+ err:
+ if (req)
+ reply_handle(req, flags, 0, NULL);
+ return -1;
+}
+
+/* Parse a raw request (packet,length) sent to a nameserver port (port) from */
+/* a DNS client (addr,addrlen), and if it's well-formed, call the corresponding */
+/* callback. */
+static int
+request_parse(u8 *packet, int length, struct evdns_server_port *port, struct sockaddr *addr, ev_socklen_t addrlen)
+{
+ int j = 0; /* index into packet */
+ u16 t_; /* used by the macros */
+ char tmp_name[256]; /* used by the macros */
+
+ int i;
+ u16 trans_id, flags, questions, answers, authority, additional;
+ struct server_request *server_req = NULL;
+
+ ASSERT_LOCKED(port);
+
+ /* Get the header fields */
+ GET16(trans_id);
+ GET16(flags);
+ GET16(questions);
+ GET16(answers);
+ GET16(authority);
+ GET16(additional);
+ (void)answers;
+ (void)additional;
+ (void)authority;
+
+ if (flags & 0x8000) return -1; /* Must not be an answer. */
+ flags &= 0x0110; /* Only RD and CD get preserved. */
+
+ server_req = mm_malloc(sizeof(struct server_request));
+ if (server_req == NULL) return -1;
+ memset(server_req, 0, sizeof(struct server_request));
+
+ server_req->trans_id = trans_id;
+ memcpy(&server_req->addr, addr, addrlen);
+ server_req->addrlen = addrlen;
+
+ server_req->base.flags = flags;
+ server_req->base.nquestions = 0;
+ server_req->base.questions = mm_calloc(sizeof(struct evdns_server_question *), questions);
+ if (server_req->base.questions == NULL)
+ goto err;
+
+ for (i = 0; i < questions; ++i) {
+ u16 type, class;
+ struct evdns_server_question *q;
+ int namelen;
+ if (name_parse(packet, length, &j, tmp_name, sizeof(tmp_name))<0)
+ goto err;
+ GET16(type);
+ GET16(class);
+ namelen = (int)strlen(tmp_name);
+ q = mm_malloc(sizeof(struct evdns_server_question) + namelen);
+ if (!q)
+ goto err;
+ q->type = type;
+ q->dns_question_class = class;
+ memcpy(q->name, tmp_name, namelen+1);
+ server_req->base.questions[server_req->base.nquestions++] = q;
+ }
+
+ /* Ignore answers, authority, and additional. */
+
+ server_req->port = port;
+ port->refcnt++;
+
+ /* Only standard queries are supported. */
+ if (flags & 0x7800) {
+ evdns_server_request_respond(&(server_req->base), DNS_ERR_NOTIMPL);
+ return -1;
+ }
+
+ port->user_callback(&(server_req->base), port->user_data);
+
+ return 0;
+err:
+ if (server_req) {
+ if (server_req->base.questions) {
+ for (i = 0; i < server_req->base.nquestions; ++i)
+ mm_free(server_req->base.questions[i]);
+ mm_free(server_req->base.questions);
+ }
+ mm_free(server_req);
+ }
+ return -1;
+
+#undef SKIP_NAME
+#undef GET32
+#undef GET16
+#undef GET8
+}
+
+
+void
+evdns_set_transaction_id_fn(ev_uint16_t (*fn)(void))
+{
+}
+
+void
+evdns_set_random_bytes_fn(void (*fn)(char *, size_t))
+{
+}
+
+/* Try to choose a strong transaction id which isn't already in flight */
+static u16
+transaction_id_pick(struct evdns_base *base) {
+ ASSERT_LOCKED(base);
+ for (;;) {
+ u16 trans_id;
+ evutil_secure_rng_get_bytes(&trans_id, sizeof(trans_id));
+
+ if (trans_id == 0xffff) continue;
+ /* now check to see if that id is already inflight */
+ if (request_find_from_trans_id(base, trans_id) == NULL)
+ return trans_id;
+ }
+}
+
+/* choose a namesever to use. This function will try to ignore */
+/* nameservers which we think are down and load balance across the rest */
+/* by updating the server_head global each time. */
+static struct nameserver *
+nameserver_pick(struct evdns_base *base) {
+ struct nameserver *started_at = base->server_head, *picked;
+ ASSERT_LOCKED(base);
+ if (!base->server_head) return NULL;
+
+ /* if we don't have any good nameservers then there's no */
+ /* point in trying to find one. */
+ if (!base->global_good_nameservers) {
+ base->server_head = base->server_head->next;
+ return base->server_head;
+ }
+
+ /* remember that nameservers are in a circular list */
+ for (;;) {
+ if (base->server_head->state) {
+ /* we think this server is currently good */
+ picked = base->server_head;
+ base->server_head = base->server_head->next;
+ return picked;
+ }
+
+ base->server_head = base->server_head->next;
+ if (base->server_head == started_at) {
+ /* all the nameservers seem to be down */
+ /* so we just return this one and hope for the */
+ /* best */
+ EVUTIL_ASSERT(base->global_good_nameservers == 0);
+ picked = base->server_head;
+ base->server_head = base->server_head->next;
+ return picked;
+ }
+ }
+}
+
+/* this is called when a namesever socket is ready for reading */
+static void
+nameserver_read(struct nameserver *ns) {
+ struct sockaddr_storage ss;
+ ev_socklen_t addrlen = sizeof(ss);
+ u8 packet[1500];
+ char addrbuf[128];
+ ASSERT_LOCKED(ns->base);
+
+ for (;;) {
+ const int r = recvfrom(ns->socket, (void*)packet,
+ sizeof(packet), 0,
+ (struct sockaddr*)&ss, &addrlen);
+ if (r < 0) {
+ int err = evutil_socket_geterror(ns->socket);
+ if (EVUTIL_ERR_RW_RETRIABLE(err))
+ return;
+ nameserver_failed(ns,
+ evutil_socket_error_to_string(err));
+ return;
+ }
+ if (evutil_sockaddr_cmp((struct sockaddr*)&ss,
+ (struct sockaddr*)&ns->address, 0)) {
+ log(EVDNS_LOG_WARN, "Address mismatch on received "
+ "DNS packet. Apparent source was %s",
+ evutil_format_sockaddr_port_(
+ (struct sockaddr *)&ss,
+ addrbuf, sizeof(addrbuf)));
+ return;
+ }
+
+ ns->timedout = 0;
+ reply_parse(ns->base, packet, r);
+ }
+}
+
+/* Read a packet from a DNS client on a server port s, parse it, and */
+/* act accordingly. */
+static void
+server_port_read(struct evdns_server_port *s) {
+ u8 packet[1500];
+ struct sockaddr_storage addr;
+ ev_socklen_t addrlen;
+ int r;
+ ASSERT_LOCKED(s);
+
+ for (;;) {
+ addrlen = sizeof(struct sockaddr_storage);
+ r = recvfrom(s->socket, (void*)packet, sizeof(packet), 0,
+ (struct sockaddr*) &addr, &addrlen);
+ if (r < 0) {
+ int err = evutil_socket_geterror(s->socket);
+ if (EVUTIL_ERR_RW_RETRIABLE(err))
+ return;
+ log(EVDNS_LOG_WARN,
+ "Error %s (%d) while reading request.",
+ evutil_socket_error_to_string(err), err);
+ return;
+ }
+ request_parse(packet, r, s, (struct sockaddr*) &addr, addrlen);
+ }
+}
+
+/* Try to write all pending replies on a given DNS server port. */
+static void
+server_port_flush(struct evdns_server_port *port)
+{
+ struct server_request *req = port->pending_replies;
+ ASSERT_LOCKED(port);
+ while (req) {
+ int r = sendto(port->socket, req->response, (int)req->response_len, 0,
+ (struct sockaddr*) &req->addr, (ev_socklen_t)req->addrlen);
+ if (r < 0) {
+ int err = evutil_socket_geterror(port->socket);
+ if (EVUTIL_ERR_RW_RETRIABLE(err))
+ return;
+ log(EVDNS_LOG_WARN, "Error %s (%d) while writing response to port; dropping", evutil_socket_error_to_string(err), err);
+ }
+ if (server_request_free(req)) {
+ /* we released the last reference to req->port. */
+ return;
+ } else {
+ EVUTIL_ASSERT(req != port->pending_replies);
+ req = port->pending_replies;
+ }
+ }
+
+ /* We have no more pending requests; stop listening for 'writeable' events. */
+ (void) event_del(&port->event);
+ event_assign(&port->event, port->event_base,
+ port->socket, EV_READ | EV_PERSIST,
+ server_port_ready_callback, port);
+
+ if (event_add(&port->event, NULL) < 0) {
+ log(EVDNS_LOG_WARN, "Error from libevent when adding event for DNS server.");
+ /* ???? Do more? */
+ }
+}
+
+/* set if we are waiting for the ability to write to this server. */
+/* if waiting is true then we ask libevent for EV_WRITE events, otherwise */
+/* we stop these events. */
+static void
+nameserver_write_waiting(struct nameserver *ns, char waiting) {
+ ASSERT_LOCKED(ns->base);
+ if (ns->write_waiting == waiting) return;
+
+ ns->write_waiting = waiting;
+ (void) event_del(&ns->event);
+ event_assign(&ns->event, ns->base->event_base,
+ ns->socket, EV_READ | (waiting ? EV_WRITE : 0) | EV_PERSIST,
+ nameserver_ready_callback, ns);
+ if (event_add(&ns->event, NULL) < 0) {
+ char addrbuf[128];
+ log(EVDNS_LOG_WARN, "Error from libevent when adding event for %s",
+ evutil_format_sockaddr_port_(
+ (struct sockaddr *)&ns->address,
+ addrbuf, sizeof(addrbuf)));
+ /* ???? Do more? */
+ }
+}
+
+/* a callback function. Called by libevent when the kernel says that */
+/* a nameserver socket is ready for writing or reading */
+static void
+nameserver_ready_callback(evutil_socket_t fd, short events, void *arg) {
+ struct nameserver *ns = (struct nameserver *) arg;
+ (void)fd;
+
+ EVDNS_LOCK(ns->base);
+ if (events & EV_WRITE) {
+ ns->choked = 0;
+ if (!evdns_transmit(ns->base)) {
+ nameserver_write_waiting(ns, 0);
+ }
+ }
+ if (events & EV_READ) {
+ nameserver_read(ns);
+ }
+ EVDNS_UNLOCK(ns->base);
+}
+
+/* a callback function. Called by libevent when the kernel says that */
+/* a server socket is ready for writing or reading. */
+static void
+server_port_ready_callback(evutil_socket_t fd, short events, void *arg) {
+ struct evdns_server_port *port = (struct evdns_server_port *) arg;
+ (void) fd;
+
+ EVDNS_LOCK(port);
+ if (events & EV_WRITE) {
+ port->choked = 0;
+ server_port_flush(port);
+ }
+ if (events & EV_READ) {
+ server_port_read(port);
+ }
+ EVDNS_UNLOCK(port);
+}
+
+/* This is an inefficient representation; only use it via the dnslabel_table_*
+ * functions, so that is can be safely replaced with something smarter later. */
+#define MAX_LABELS 128
+/* Structures used to implement name compression */
+struct dnslabel_entry { char *v; off_t pos; };
+struct dnslabel_table {
+ int n_labels; /* number of current entries */
+ /* map from name to position in message */
+ struct dnslabel_entry labels[MAX_LABELS];
+};
+
+/* Initialize dnslabel_table. */
+static void
+dnslabel_table_init(struct dnslabel_table *table)
+{
+ table->n_labels = 0;
+}
+
+/* Free all storage held by table, but not the table itself. */
+static void
+dnslabel_clear(struct dnslabel_table *table)
+{
+ int i;
+ for (i = 0; i < table->n_labels; ++i)
+ mm_free(table->labels[i].v);
+ table->n_labels = 0;
+}
+
+/* return the position of the label in the current message, or -1 if the label */
+/* hasn't been used yet. */
+static int
+dnslabel_table_get_pos(const struct dnslabel_table *table, const char *label)
+{
+ int i;
+ for (i = 0; i < table->n_labels; ++i) {
+ if (!strcmp(label, table->labels[i].v))
+ return table->labels[i].pos;
+ }
+ return -1;
+}
+
+/* remember that we've used the label at position pos */
+static int
+dnslabel_table_add(struct dnslabel_table *table, const char *label, off_t pos)
+{
+ char *v;
+ int p;
+ if (table->n_labels == MAX_LABELS)
+ return (-1);
+ v = mm_strdup(label);
+ if (v == NULL)
+ return (-1);
+ p = table->n_labels++;
+ table->labels[p].v = v;
+ table->labels[p].pos = pos;
+
+ return (0);
+}
+
+/* Converts a string to a length-prefixed set of DNS labels, starting */
+/* at buf[j]. name and buf must not overlap. name_len should be the length */
+/* of name. table is optional, and is used for compression. */
+/* */
+/* Input: abc.def */
+/* Output: <3>abc<3>def<0> */
+/* */
+/* Returns the first index after the encoded name, or negative on error. */
+/* -1 label was > 63 bytes */
+/* -2 name too long to fit in buffer. */
+/* */
+static off_t
+dnsname_to_labels(u8 *const buf, size_t buf_len, off_t j,
+ const char *name, const size_t name_len,
+ struct dnslabel_table *table) {
+ const char *end = name + name_len;
+ int ref = 0;
+ u16 t_;
+
+#define APPEND16(x) do { \
+ if (j + 2 > (off_t)buf_len) \
+ goto overflow; \
+ t_ = htons(x); \
+ memcpy(buf + j, &t_, 2); \
+ j += 2; \
+ } while (0)
+#define APPEND32(x) do { \
+ if (j + 4 > (off_t)buf_len) \
+ goto overflow; \
+ t32_ = htonl(x); \
+ memcpy(buf + j, &t32_, 4); \
+ j += 4; \
+ } while (0)
+
+ if (name_len > 255) return -2;
+
+ for (;;) {
+ const char *const start = name;
+ if (table && (ref = dnslabel_table_get_pos(table, name)) >= 0) {
+ APPEND16(ref | 0xc000);
+ return j;
+ }
+ name = strchr(name, '.');
+ if (!name) {
+ const size_t label_len = end - start;
+ if (label_len > 63) return -1;
+ if ((size_t)(j+label_len+1) > buf_len) return -2;
+ if (table) dnslabel_table_add(table, start, j);
+ buf[j++] = (ev_uint8_t)label_len;
+
+ memcpy(buf + j, start, label_len);
+ j += (int) label_len;
+ break;
+ } else {
+ /* append length of the label. */
+ const size_t label_len = name - start;
+ if (label_len > 63) return -1;
+ if ((size_t)(j+label_len+1) > buf_len) return -2;
+ if (table) dnslabel_table_add(table, start, j);
+ buf[j++] = (ev_uint8_t)label_len;
+
+ memcpy(buf + j, start, label_len);
+ j += (int) label_len;
+ /* hop over the '.' */
+ name++;
+ }
+ }
+
+ /* the labels must be terminated by a 0. */
+ /* It's possible that the name ended in a . */
+ /* in which case the zero is already there */
+ if (!j || buf[j-1]) buf[j++] = 0;
+ return j;
+ overflow:
+ return (-2);
+}
+
+/* Finds the length of a dns request for a DNS name of the given */
+/* length. The actual request may be smaller than the value returned */
+/* here */
+static size_t
+evdns_request_len(const size_t name_len) {
+ return 96 + /* length of the DNS standard header */
+ name_len + 2 +
+ 4; /* space for the resource type */
+}
+
+/* build a dns request packet into buf. buf should be at least as long */
+/* as evdns_request_len told you it should be. */
+/* */
+/* Returns the amount of space used. Negative on error. */
+static int
+evdns_request_data_build(const char *const name, const size_t name_len,
+ const u16 trans_id, const u16 type, const u16 class,
+ u8 *const buf, size_t buf_len) {
+ off_t j = 0; /* current offset into buf */
+ u16 t_; /* used by the macros */
+
+ APPEND16(trans_id);
+ APPEND16(0x0100); /* standard query, recusion needed */
+ APPEND16(1); /* one question */
+ APPEND16(0); /* no answers */
+ APPEND16(0); /* no authority */
+ APPEND16(0); /* no additional */
+
+ j = dnsname_to_labels(buf, buf_len, j, name, name_len, NULL);
+ if (j < 0) {
+ return (int)j;
+ }
+
+ APPEND16(type);
+ APPEND16(class);
+
+ return (int)j;
+ overflow:
+ return (-1);
+}
+
+/* exported function */
+struct evdns_server_port *
+evdns_add_server_port_with_base(struct event_base *base, evutil_socket_t socket, int flags, evdns_request_callback_fn_type cb, void *user_data)
+{
+ struct evdns_server_port *port;
+ if (flags)
+ return NULL; /* flags not yet implemented */
+ if (!(port = mm_malloc(sizeof(struct evdns_server_port))))
+ return NULL;
+ memset(port, 0, sizeof(struct evdns_server_port));
+
+
+ port->socket = socket;
+ port->refcnt = 1;
+ port->choked = 0;
+ port->closing = 0;
+ port->user_callback = cb;
+ port->user_data = user_data;
+ port->pending_replies = NULL;
+ port->event_base = base;
+
+ event_assign(&port->event, port->event_base,
+ port->socket, EV_READ | EV_PERSIST,
+ server_port_ready_callback, port);
+ if (event_add(&port->event, NULL) < 0) {
+ mm_free(port);
+ return NULL;
+ }
+ EVTHREAD_ALLOC_LOCK(port->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ return port;
+}
+
+struct evdns_server_port *
+evdns_add_server_port(evutil_socket_t socket, int flags, evdns_request_callback_fn_type cb, void *user_data)
+{
+ return evdns_add_server_port_with_base(NULL, socket, flags, cb, user_data);
+}
+
+/* exported function */
+void
+evdns_close_server_port(struct evdns_server_port *port)
+{
+ EVDNS_LOCK(port);
+ if (--port->refcnt == 0) {
+ EVDNS_UNLOCK(port);
+ server_port_free(port);
+ } else {
+ port->closing = 1;
+ }
+}
+
+/* exported function */
+int
+evdns_server_request_add_reply(struct evdns_server_request *req_, int section, const char *name, int type, int class, int ttl, int datalen, int is_name, const char *data)
+{
+ struct server_request *req = TO_SERVER_REQUEST(req_);
+ struct server_reply_item **itemp, *item;
+ int *countp;
+ int result = -1;
+
+ EVDNS_LOCK(req->port);
+ if (req->response) /* have we already answered? */
+ goto done;
+
+ switch (section) {
+ case EVDNS_ANSWER_SECTION:
+ itemp = &req->answer;
+ countp = &req->n_answer;
+ break;
+ case EVDNS_AUTHORITY_SECTION:
+ itemp = &req->authority;
+ countp = &req->n_authority;
+ break;
+ case EVDNS_ADDITIONAL_SECTION:
+ itemp = &req->additional;
+ countp = &req->n_additional;
+ break;
+ default:
+ goto done;
+ }
+ while (*itemp) {
+ itemp = &((*itemp)->next);
+ }
+ item = mm_malloc(sizeof(struct server_reply_item));
+ if (!item)
+ goto done;
+ item->next = NULL;
+ if (!(item->name = mm_strdup(name))) {
+ mm_free(item);
+ goto done;
+ }
+ item->type = type;
+ item->dns_question_class = class;
+ item->ttl = ttl;
+ item->is_name = is_name != 0;
+ item->datalen = 0;
+ item->data = NULL;
+ if (data) {
+ if (item->is_name) {
+ if (!(item->data = mm_strdup(data))) {
+ mm_free(item->name);
+ mm_free(item);
+ goto done;
+ }
+ item->datalen = (u16)-1;
+ } else {
+ if (!(item->data = mm_malloc(datalen))) {
+ mm_free(item->name);
+ mm_free(item);
+ goto done;
+ }
+ item->datalen = datalen;
+ memcpy(item->data, data, datalen);
+ }
+ }
+
+ *itemp = item;
+ ++(*countp);
+ result = 0;
+done:
+ EVDNS_UNLOCK(req->port);
+ return result;
+}
+
+/* exported function */
+int
+evdns_server_request_add_a_reply(struct evdns_server_request *req, const char *name, int n, const void *addrs, int ttl)
+{
+ return evdns_server_request_add_reply(
+ req, EVDNS_ANSWER_SECTION, name, TYPE_A, CLASS_INET,
+ ttl, n*4, 0, addrs);
+}
+
+/* exported function */
+int
+evdns_server_request_add_aaaa_reply(struct evdns_server_request *req, const char *name, int n, const void *addrs, int ttl)
+{
+ return evdns_server_request_add_reply(
+ req, EVDNS_ANSWER_SECTION, name, TYPE_AAAA, CLASS_INET,
+ ttl, n*16, 0, addrs);
+}
+
+/* exported function */
+int
+evdns_server_request_add_ptr_reply(struct evdns_server_request *req, struct in_addr *in, const char *inaddr_name, const char *hostname, int ttl)
+{
+ u32 a;
+ char buf[32];
+ if (in && inaddr_name)
+ return -1;
+ else if (!in && !inaddr_name)
+ return -1;
+ if (in) {
+ a = ntohl(in->s_addr);
+ evutil_snprintf(buf, sizeof(buf), "%d.%d.%d.%d.in-addr.arpa",
+ (int)(u8)((a )&0xff),
+ (int)(u8)((a>>8 )&0xff),
+ (int)(u8)((a>>16)&0xff),
+ (int)(u8)((a>>24)&0xff));
+ inaddr_name = buf;
+ }
+ return evdns_server_request_add_reply(
+ req, EVDNS_ANSWER_SECTION, inaddr_name, TYPE_PTR, CLASS_INET,
+ ttl, -1, 1, hostname);
+}
+
+/* exported function */
+int
+evdns_server_request_add_cname_reply(struct evdns_server_request *req, const char *name, const char *cname, int ttl)
+{
+ return evdns_server_request_add_reply(
+ req, EVDNS_ANSWER_SECTION, name, TYPE_CNAME, CLASS_INET,
+ ttl, -1, 1, cname);
+}
+
+/* exported function */
+void
+evdns_server_request_set_flags(struct evdns_server_request *exreq, int flags)
+{
+ struct server_request *req = TO_SERVER_REQUEST(exreq);
+ req->base.flags &= ~(EVDNS_FLAGS_AA|EVDNS_FLAGS_RD);
+ req->base.flags |= flags;
+}
+
+static int
+evdns_server_request_format_response(struct server_request *req, int err)
+{
+ unsigned char buf[1500];
+ size_t buf_len = sizeof(buf);
+ off_t j = 0, r;
+ u16 t_;
+ u32 t32_;
+ int i;
+ u16 flags;
+ struct dnslabel_table table;
+
+ if (err < 0 || err > 15) return -1;
+
+ /* Set response bit and error code; copy OPCODE and RD fields from
+ * question; copy RA and AA if set by caller. */
+ flags = req->base.flags;
+ flags |= (0x8000 | err);
+
+ dnslabel_table_init(&table);
+ APPEND16(req->trans_id);
+ APPEND16(flags);
+ APPEND16(req->base.nquestions);
+ APPEND16(req->n_answer);
+ APPEND16(req->n_authority);
+ APPEND16(req->n_additional);
+
+ /* Add questions. */
+ for (i=0; i < req->base.nquestions; ++i) {
+ const char *s = req->base.questions[i]->name;
+ j = dnsname_to_labels(buf, buf_len, j, s, strlen(s), &table);
+ if (j < 0) {
+ dnslabel_clear(&table);
+ return (int) j;
+ }
+ APPEND16(req->base.questions[i]->type);
+ APPEND16(req->base.questions[i]->dns_question_class);
+ }
+
+ /* Add answer, authority, and additional sections. */
+ for (i=0; i<3; ++i) {
+ struct server_reply_item *item;
+ if (i==0)
+ item = req->answer;
+ else if (i==1)
+ item = req->authority;
+ else
+ item = req->additional;
+ while (item) {
+ r = dnsname_to_labels(buf, buf_len, j, item->name, strlen(item->name), &table);
+ if (r < 0)
+ goto overflow;
+ j = r;
+
+ APPEND16(item->type);
+ APPEND16(item->dns_question_class);
+ APPEND32(item->ttl);
+ if (item->is_name) {
+ off_t len_idx = j, name_start;
+ j += 2;
+ name_start = j;
+ r = dnsname_to_labels(buf, buf_len, j, item->data, strlen(item->data), &table);
+ if (r < 0)
+ goto overflow;
+ j = r;
+ t_ = htons( (short) (j-name_start) );
+ memcpy(buf+len_idx, &t_, 2);
+ } else {
+ APPEND16(item->datalen);
+ if (j+item->datalen > (off_t)buf_len)
+ goto overflow;
+ memcpy(buf+j, item->data, item->datalen);
+ j += item->datalen;
+ }
+ item = item->next;
+ }
+ }
+
+ if (j > 512) {
+overflow:
+ j = 512;
+ buf[2] |= 0x02; /* set the truncated bit. */
+ }
+
+ req->response_len = j;
+
+ if (!(req->response = mm_malloc(req->response_len))) {
+ server_request_free_answers(req);
+ dnslabel_clear(&table);
+ return (-1);
+ }
+ memcpy(req->response, buf, req->response_len);
+ server_request_free_answers(req);
+ dnslabel_clear(&table);
+ return (0);
+}
+
+/* exported function */
+int
+evdns_server_request_respond(struct evdns_server_request *req_, int err)
+{
+ struct server_request *req = TO_SERVER_REQUEST(req_);
+ struct evdns_server_port *port = req->port;
+ int r = -1;
+
+ EVDNS_LOCK(port);
+ if (!req->response) {
+ if ((r = evdns_server_request_format_response(req, err))<0)
+ goto done;
+ }
+
+ r = sendto(port->socket, req->response, (int)req->response_len, 0,
+ (struct sockaddr*) &req->addr, (ev_socklen_t)req->addrlen);
+ if (r<0) {
+ int sock_err = evutil_socket_geterror(port->socket);
+ if (EVUTIL_ERR_RW_RETRIABLE(sock_err))
+ goto done;
+
+ if (port->pending_replies) {
+ req->prev_pending = port->pending_replies->prev_pending;
+ req->next_pending = port->pending_replies;
+ req->prev_pending->next_pending =
+ req->next_pending->prev_pending = req;
+ } else {
+ req->prev_pending = req->next_pending = req;
+ port->pending_replies = req;
+ port->choked = 1;
+
+ (void) event_del(&port->event);
+ event_assign(&port->event, port->event_base, port->socket, (port->closing?0:EV_READ) | EV_WRITE | EV_PERSIST, server_port_ready_callback, port);
+
+ if (event_add(&port->event, NULL) < 0) {
+ log(EVDNS_LOG_WARN, "Error from libevent when adding event for DNS server");
+ }
+
+ }
+
+ r = 1;
+ goto done;
+ }
+ if (server_request_free(req)) {
+ r = 0;
+ goto done;
+ }
+
+ if (port->pending_replies)
+ server_port_flush(port);
+
+ r = 0;
+done:
+ EVDNS_UNLOCK(port);
+ return r;
+}
+
+/* Free all storage held by RRs in req. */
+static void
+server_request_free_answers(struct server_request *req)
+{
+ struct server_reply_item *victim, *next, **list;
+ int i;
+ for (i = 0; i < 3; ++i) {
+ if (i==0)
+ list = &req->answer;
+ else if (i==1)
+ list = &req->authority;
+ else
+ list = &req->additional;
+
+ victim = *list;
+ while (victim) {
+ next = victim->next;
+ mm_free(victim->name);
+ if (victim->data)
+ mm_free(victim->data);
+ mm_free(victim);
+ victim = next;
+ }
+ *list = NULL;
+ }
+}
+
+/* Free all storage held by req, and remove links to it. */
+/* return true iff we just wound up freeing the server_port. */
+static int
+server_request_free(struct server_request *req)
+{
+ int i, rc=1, lock=0;
+ if (req->base.questions) {
+ for (i = 0; i < req->base.nquestions; ++i)
+ mm_free(req->base.questions[i]);
+ mm_free(req->base.questions);
+ }
+
+ if (req->port) {
+ EVDNS_LOCK(req->port);
+ lock=1;
+ if (req->port->pending_replies == req) {
+ if (req->next_pending && req->next_pending != req)
+ req->port->pending_replies = req->next_pending;
+ else
+ req->port->pending_replies = NULL;
+ }
+ rc = --req->port->refcnt;
+ }
+
+ if (req->response) {
+ mm_free(req->response);
+ }
+
+ server_request_free_answers(req);
+
+ if (req->next_pending && req->next_pending != req) {
+ req->next_pending->prev_pending = req->prev_pending;
+ req->prev_pending->next_pending = req->next_pending;
+ }
+
+ if (rc == 0) {
+ EVDNS_UNLOCK(req->port); /* ????? nickm */
+ server_port_free(req->port);
+ mm_free(req);
+ return (1);
+ }
+ if (lock)
+ EVDNS_UNLOCK(req->port);
+ mm_free(req);
+ return (0);
+}
+
+/* Free all storage held by an evdns_server_port. Only called when */
+static void
+server_port_free(struct evdns_server_port *port)
+{
+ EVUTIL_ASSERT(port);
+ EVUTIL_ASSERT(!port->refcnt);
+ EVUTIL_ASSERT(!port->pending_replies);
+ if (port->socket > 0) {
+ evutil_closesocket(port->socket);
+ port->socket = -1;
+ }
+ (void) event_del(&port->event);
+ event_debug_unassign(&port->event);
+ EVTHREAD_FREE_LOCK(port->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ mm_free(port);
+}
+
+/* exported function */
+int
+evdns_server_request_drop(struct evdns_server_request *req_)
+{
+ struct server_request *req = TO_SERVER_REQUEST(req_);
+ server_request_free(req);
+ return 0;
+}
+
+/* exported function */
+int
+evdns_server_request_get_requesting_addr(struct evdns_server_request *req_, struct sockaddr *sa, int addr_len)
+{
+ struct server_request *req = TO_SERVER_REQUEST(req_);
+ if (addr_len < (int)req->addrlen)
+ return -1;
+ memcpy(sa, &(req->addr), req->addrlen);
+ return req->addrlen;
+}
+
+#undef APPEND16
+#undef APPEND32
+
+/* this is a libevent callback function which is called when a request */
+/* has timed out. */
+static void
+evdns_request_timeout_callback(evutil_socket_t fd, short events, void *arg) {
+ struct request *const req = (struct request *) arg;
+ struct evdns_base *base = req->base;
+
+ (void) fd;
+ (void) events;
+
+ log(EVDNS_LOG_DEBUG, "Request %p timed out", arg);
+ EVDNS_LOCK(base);
+
+ if (req->tx_count >= req->base->global_max_retransmits) {
+ struct nameserver *ns = req->ns;
+ /* this request has failed */
+ log(EVDNS_LOG_DEBUG, "Giving up on request %p; tx_count==%d",
+ arg, req->tx_count);
+ reply_schedule_callback(req, 0, DNS_ERR_TIMEOUT, NULL);
+
+ request_finished(req, &REQ_HEAD(req->base, req->trans_id), 1);
+ nameserver_failed(ns, "request timed out.");
+ } else {
+ /* retransmit it */
+ log(EVDNS_LOG_DEBUG, "Retransmitting request %p; tx_count==%d",
+ arg, req->tx_count);
+ (void) evtimer_del(&req->timeout_event);
+ request_swap_ns(req, nameserver_pick(base));
+ evdns_request_transmit(req);
+
+ req->ns->timedout++;
+ if (req->ns->timedout > req->base->global_max_nameserver_timeout) {
+ req->ns->timedout = 0;
+ nameserver_failed(req->ns, "request timed out.");
+ }
+ }
+
+ EVDNS_UNLOCK(base);
+}
+
+/* try to send a request to a given server. */
+/* */
+/* return: */
+/* 0 ok */
+/* 1 temporary failure */
+/* 2 other failure */
+static int
+evdns_request_transmit_to(struct request *req, struct nameserver *server) {
+ int r;
+ ASSERT_LOCKED(req->base);
+ ASSERT_VALID_REQUEST(req);
+
+ if (server->requests_inflight == 1 &&
+ req->base->disable_when_inactive &&
+ event_add(&server->event, NULL) < 0) {
+ return 1;
+ }
+
+ r = sendto(server->socket, (void*)req->request, req->request_len, 0,
+ (struct sockaddr *)&server->address, server->addrlen);
+ if (r < 0) {
+ int err = evutil_socket_geterror(server->socket);
+ if (EVUTIL_ERR_RW_RETRIABLE(err))
+ return 1;
+ nameserver_failed(req->ns, evutil_socket_error_to_string(err));
+ return 2;
+ } else if (r != (int)req->request_len) {
+ return 1; /* short write */
+ } else {
+ return 0;
+ }
+}
+
+/* try to send a request, updating the fields of the request */
+/* as needed */
+/* */
+/* return: */
+/* 0 ok */
+/* 1 failed */
+static int
+evdns_request_transmit(struct request *req) {
+ int retcode = 0, r;
+
+ ASSERT_LOCKED(req->base);
+ ASSERT_VALID_REQUEST(req);
+ /* if we fail to send this packet then this flag marks it */
+ /* for evdns_transmit */
+ req->transmit_me = 1;
+ EVUTIL_ASSERT(req->trans_id != 0xffff);
+
+ if (!req->ns)
+ {
+ /* unable to transmit request if no nameservers */
+ return 1;
+ }
+
+ if (req->ns->choked) {
+ /* don't bother trying to write to a socket */
+ /* which we have had EAGAIN from */
+ return 1;
+ }
+
+ r = evdns_request_transmit_to(req, req->ns);
+ switch (r) {
+ case 1:
+ /* temp failure */
+ req->ns->choked = 1;
+ nameserver_write_waiting(req->ns, 1);
+ return 1;
+ case 2:
+ /* failed to transmit the request entirely. */
+ retcode = 1;
+ /* fall through: we'll set a timeout, which will time out,
+ * and make us retransmit the request anyway. */
+ default:
+ /* all ok */
+ log(EVDNS_LOG_DEBUG,
+ "Setting timeout for request %p, sent to nameserver %p", req, req->ns);
+ if (evtimer_add(&req->timeout_event, &req->base->global_timeout) < 0) {
+ log(EVDNS_LOG_WARN,
+ "Error from libevent when adding timer for request %p",
+ req);
+ /* ???? Do more? */
+ }
+ req->tx_count++;
+ req->transmit_me = 0;
+ return retcode;
+ }
+}
+
+static void
+nameserver_probe_callback(int result, char type, int count, int ttl, void *addresses, void *arg) {
+ struct nameserver *const ns = (struct nameserver *) arg;
+ (void) type;
+ (void) count;
+ (void) ttl;
+ (void) addresses;
+
+ if (result == DNS_ERR_CANCEL) {
+ /* We canceled this request because the nameserver came up
+ * for some other reason. Do not change our opinion about
+ * the nameserver. */
+ return;
+ }
+
+ EVDNS_LOCK(ns->base);
+ ns->probe_request = NULL;
+ if (result == DNS_ERR_NONE || result == DNS_ERR_NOTEXIST) {
+ /* this is a good reply */
+ nameserver_up(ns);
+ } else {
+ nameserver_probe_failed(ns);
+ }
+ EVDNS_UNLOCK(ns->base);
+}
+
+static void
+nameserver_send_probe(struct nameserver *const ns) {
+ struct evdns_request *handle;
+ struct request *req;
+ char addrbuf[128];
+ /* here we need to send a probe to a given nameserver */
+ /* in the hope that it is up now. */
+
+ ASSERT_LOCKED(ns->base);
+ log(EVDNS_LOG_DEBUG, "Sending probe to %s",
+ evutil_format_sockaddr_port_(
+ (struct sockaddr *)&ns->address,
+ addrbuf, sizeof(addrbuf)));
+ handle = mm_calloc(1, sizeof(*handle));
+ if (!handle) return;
+ req = request_new(ns->base, handle, TYPE_A, "google.com", DNS_QUERY_NO_SEARCH, nameserver_probe_callback, ns);
+ if (!req) {
+ mm_free(handle);
+ return;
+ }
+ ns->probe_request = handle;
+ /* we force this into the inflight queue no matter what */
+ request_trans_id_set(req, transaction_id_pick(ns->base));
+ req->ns = ns;
+ request_submit(req);
+}
+
+/* returns: */
+/* 0 didn't try to transmit anything */
+/* 1 tried to transmit something */
+static int
+evdns_transmit(struct evdns_base *base) {
+ char did_try_to_transmit = 0;
+ int i;
+
+ ASSERT_LOCKED(base);
+ for (i = 0; i < base->n_req_heads; ++i) {
+ if (base->req_heads[i]) {
+ struct request *const started_at = base->req_heads[i], *req = started_at;
+ /* first transmit all the requests which are currently waiting */
+ do {
+ if (req->transmit_me) {
+ did_try_to_transmit = 1;
+ evdns_request_transmit(req);
+ }
+
+ req = req->next;
+ } while (req != started_at);
+ }
+ }
+
+ return did_try_to_transmit;
+}
+
+/* exported function */
+int
+evdns_base_count_nameservers(struct evdns_base *base)
+{
+ const struct nameserver *server;
+ int n = 0;
+
+ EVDNS_LOCK(base);
+ server = base->server_head;
+ if (!server)
+ goto done;
+ do {
+ ++n;
+ server = server->next;
+ } while (server != base->server_head);
+done:
+ EVDNS_UNLOCK(base);
+ return n;
+}
+
+int
+evdns_count_nameservers(void)
+{
+ return evdns_base_count_nameservers(current_base);
+}
+
+/* exported function */
+int
+evdns_base_clear_nameservers_and_suspend(struct evdns_base *base)
+{
+ struct nameserver *server, *started_at;
+ int i;
+
+ EVDNS_LOCK(base);
+ server = base->server_head;
+ started_at = base->server_head;
+ if (!server) {
+ EVDNS_UNLOCK(base);
+ return 0;
+ }
+ while (1) {
+ struct nameserver *next = server->next;
+ (void) event_del(&server->event);
+ if (evtimer_initialized(&server->timeout_event))
+ (void) evtimer_del(&server->timeout_event);
+ if (server->probe_request) {
+ evdns_cancel_request(server->base, server->probe_request);
+ server->probe_request = NULL;
+ }
+ if (server->socket >= 0)
+ evutil_closesocket(server->socket);
+ mm_free(server);
+ if (next == started_at)
+ break;
+ server = next;
+ }
+ base->server_head = NULL;
+ base->global_good_nameservers = 0;
+
+ for (i = 0; i < base->n_req_heads; ++i) {
+ struct request *req, *req_started_at;
+ req = req_started_at = base->req_heads[i];
+ while (req) {
+ struct request *next = req->next;
+ req->tx_count = req->reissue_count = 0;
+ req->ns = NULL;
+ /* ???? What to do about searches? */
+ (void) evtimer_del(&req->timeout_event);
+ req->trans_id = 0;
+ req->transmit_me = 0;
+
+ base->global_requests_waiting++;
+ evdns_request_insert(req, &base->req_waiting_head);
+ /* We want to insert these suspended elements at the front of
+ * the waiting queue, since they were pending before any of
+ * the waiting entries were added. This is a circular list,
+ * so we can just shift the start back by one.*/
+ base->req_waiting_head = base->req_waiting_head->prev;
+
+ if (next == req_started_at)
+ break;
+ req = next;
+ }
+ base->req_heads[i] = NULL;
+ }
+
+ base->global_requests_inflight = 0;
+
+ EVDNS_UNLOCK(base);
+ return 0;
+}
+
+int
+evdns_clear_nameservers_and_suspend(void)
+{
+ return evdns_base_clear_nameservers_and_suspend(current_base);
+}
+
+
+/* exported function */
+int
+evdns_base_resume(struct evdns_base *base)
+{
+ EVDNS_LOCK(base);
+ evdns_requests_pump_waiting_queue(base);
+ EVDNS_UNLOCK(base);
+
+ return 0;
+}
+
+int
+evdns_resume(void)
+{
+ return evdns_base_resume(current_base);
+}
+
+static int
+evdns_nameserver_add_impl_(struct evdns_base *base, const struct sockaddr *address, int addrlen) {
+ /* first check to see if we already have this nameserver */
+
+ const struct nameserver *server = base->server_head, *const started_at = base->server_head;
+ struct nameserver *ns;
+ int err = 0;
+ char addrbuf[128];
+
+ ASSERT_LOCKED(base);
+ if (server) {
+ do {
+ if (!evutil_sockaddr_cmp((struct sockaddr*)&server->address, address, 1)) return 3;
+ server = server->next;
+ } while (server != started_at);
+ }
+ if (addrlen > (int)sizeof(ns->address)) {
+ log(EVDNS_LOG_DEBUG, "Addrlen %d too long.", (int)addrlen);
+ return 2;
+ }
+
+ ns = (struct nameserver *) mm_malloc(sizeof(struct nameserver));
+ if (!ns) return -1;
+
+ memset(ns, 0, sizeof(struct nameserver));
+ ns->base = base;
+
+ evtimer_assign(&ns->timeout_event, ns->base->event_base, nameserver_prod_callback, ns);
+
+ ns->socket = evutil_socket_(address->sa_family,
+ SOCK_DGRAM|EVUTIL_SOCK_NONBLOCK|EVUTIL_SOCK_CLOEXEC, 0);
+ if (ns->socket < 0) { err = 1; goto out1; }
+
+ if (base->global_outgoing_addrlen &&
+ !evutil_sockaddr_is_loopback_(address)) {
+ if (bind(ns->socket,
+ (struct sockaddr*)&base->global_outgoing_address,
+ base->global_outgoing_addrlen) < 0) {
+ log(EVDNS_LOG_WARN,"Couldn't bind to outgoing address");
+ err = 2;
+ goto out2;
+ }
+ }
+
+ memcpy(&ns->address, address, addrlen);
+ ns->addrlen = addrlen;
+ ns->state = 1;
+ event_assign(&ns->event, ns->base->event_base, ns->socket,
+ EV_READ | EV_PERSIST, nameserver_ready_callback, ns);
+ if (!base->disable_when_inactive && event_add(&ns->event, NULL) < 0) {
+ err = 2;
+ goto out2;
+ }
+
+ log(EVDNS_LOG_DEBUG, "Added nameserver %s as %p",
+ evutil_format_sockaddr_port_(address, addrbuf, sizeof(addrbuf)), ns);
+
+ /* insert this nameserver into the list of them */
+ if (!base->server_head) {
+ ns->next = ns->prev = ns;
+ base->server_head = ns;
+ } else {
+ ns->next = base->server_head->next;
+ ns->prev = base->server_head;
+ base->server_head->next = ns;
+ ns->next->prev = ns;
+ }
+
+ base->global_good_nameservers++;
+
+ return 0;
+
+out2:
+ evutil_closesocket(ns->socket);
+out1:
+ event_debug_unassign(&ns->event);
+ mm_free(ns);
+ log(EVDNS_LOG_WARN, "Unable to add nameserver %s: error %d",
+ evutil_format_sockaddr_port_(address, addrbuf, sizeof(addrbuf)), err);
+ return err;
+}
+
+/* exported function */
+int
+evdns_base_nameserver_add(struct evdns_base *base, unsigned long int address)
+{
+ struct sockaddr_in sin;
+ int res;
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_addr.s_addr = address;
+ sin.sin_port = htons(53);
+ sin.sin_family = AF_INET;
+ EVDNS_LOCK(base);
+ res = evdns_nameserver_add_impl_(base, (struct sockaddr*)&sin, sizeof(sin));
+ EVDNS_UNLOCK(base);
+ return res;
+}
+
+int
+evdns_nameserver_add(unsigned long int address) {
+ if (!current_base)
+ current_base = evdns_base_new(NULL, 0);
+ return evdns_base_nameserver_add(current_base, address);
+}
+
+static void
+sockaddr_setport(struct sockaddr *sa, ev_uint16_t port)
+{
+ if (sa->sa_family == AF_INET) {
+ ((struct sockaddr_in *)sa)->sin_port = htons(port);
+ } else if (sa->sa_family == AF_INET6) {
+ ((struct sockaddr_in6 *)sa)->sin6_port = htons(port);
+ }
+}
+
+static ev_uint16_t
+sockaddr_getport(struct sockaddr *sa)
+{
+ if (sa->sa_family == AF_INET) {
+ return ntohs(((struct sockaddr_in *)sa)->sin_port);
+ } else if (sa->sa_family == AF_INET6) {
+ return ntohs(((struct sockaddr_in6 *)sa)->sin6_port);
+ } else {
+ return 0;
+ }
+}
+
+/* exported function */
+int
+evdns_base_nameserver_ip_add(struct evdns_base *base, const char *ip_as_string) {
+ struct sockaddr_storage ss;
+ struct sockaddr *sa;
+ int len = sizeof(ss);
+ int res;
+ if (evutil_parse_sockaddr_port(ip_as_string, (struct sockaddr *)&ss,
+ &len)) {
+ log(EVDNS_LOG_WARN, "Unable to parse nameserver address %s",
+ ip_as_string);
+ return 4;
+ }
+ sa = (struct sockaddr *) &ss;
+ if (sockaddr_getport(sa) == 0)
+ sockaddr_setport(sa, 53);
+
+ EVDNS_LOCK(base);
+ res = evdns_nameserver_add_impl_(base, sa, len);
+ EVDNS_UNLOCK(base);
+ return res;
+}
+
+int
+evdns_nameserver_ip_add(const char *ip_as_string) {
+ if (!current_base)
+ current_base = evdns_base_new(NULL, 0);
+ return evdns_base_nameserver_ip_add(current_base, ip_as_string);
+}
+
+int
+evdns_base_nameserver_sockaddr_add(struct evdns_base *base,
+ const struct sockaddr *sa, ev_socklen_t len, unsigned flags)
+{
+ int res;
+ EVUTIL_ASSERT(base);
+ EVDNS_LOCK(base);
+ res = evdns_nameserver_add_impl_(base, sa, len);
+ EVDNS_UNLOCK(base);
+ return res;
+}
+
+int
+evdns_base_get_nameserver_addr(struct evdns_base *base, int idx,
+ struct sockaddr *sa, ev_socklen_t len)
+{
+ int result = -1;
+ int i;
+ struct nameserver *server;
+ EVDNS_LOCK(base);
+ server = base->server_head;
+ for (i = 0; i < idx && server; ++i, server = server->next) {
+ if (server->next == base->server_head)
+ goto done;
+ }
+ if (! server)
+ goto done;
+
+ if (server->addrlen > len) {
+ result = (int) server->addrlen;
+ goto done;
+ }
+
+ memcpy(sa, &server->address, server->addrlen);
+ result = (int) server->addrlen;
+done:
+ EVDNS_UNLOCK(base);
+ return result;
+}
+
+/* remove from the queue */
+static void
+evdns_request_remove(struct request *req, struct request **head)
+{
+ ASSERT_LOCKED(req->base);
+ ASSERT_VALID_REQUEST(req);
+
+#if 0
+ {
+ struct request *ptr;
+ int found = 0;
+ EVUTIL_ASSERT(*head != NULL);
+
+ ptr = *head;
+ do {
+ if (ptr == req) {
+ found = 1;
+ break;
+ }
+ ptr = ptr->next;
+ } while (ptr != *head);
+ EVUTIL_ASSERT(found);
+
+ EVUTIL_ASSERT(req->next);
+ }
+#endif
+
+ if (req->next == req) {
+ /* only item in the list */
+ *head = NULL;
+ } else {
+ req->next->prev = req->prev;
+ req->prev->next = req->next;
+ if (*head == req) *head = req->next;
+ }
+ req->next = req->prev = NULL;
+}
+
+/* insert into the tail of the queue */
+static void
+evdns_request_insert(struct request *req, struct request **head) {
+ ASSERT_LOCKED(req->base);
+ ASSERT_VALID_REQUEST(req);
+ if (!*head) {
+ *head = req;
+ req->next = req->prev = req;
+ return;
+ }
+
+ req->prev = (*head)->prev;
+ req->prev->next = req;
+ req->next = *head;
+ (*head)->prev = req;
+}
+
+static int
+string_num_dots(const char *s) {
+ int count = 0;
+ while ((s = strchr(s, '.'))) {
+ s++;
+ count++;
+ }
+ return count;
+}
+
+static struct request *
+request_new(struct evdns_base *base, struct evdns_request *handle, int type,
+ const char *name, int flags, evdns_callback_type callback,
+ void *user_ptr) {
+
+ const char issuing_now =
+ (base->global_requests_inflight < base->global_max_requests_inflight) ? 1 : 0;
+
+ const size_t name_len = strlen(name);
+ const size_t request_max_len = evdns_request_len(name_len);
+ const u16 trans_id = issuing_now ? transaction_id_pick(base) : 0xffff;
+ /* the request data is alloced in a single block with the header */
+ struct request *const req =
+ mm_malloc(sizeof(struct request) + request_max_len);
+ int rlen;
+ char namebuf[256];
+ (void) flags;
+
+ ASSERT_LOCKED(base);
+
+ if (!req) return NULL;
+
+ if (name_len >= sizeof(namebuf)) {
+ mm_free(req);
+ return NULL;
+ }
+
+ memset(req, 0, sizeof(struct request));
+ req->base = base;
+
+ evtimer_assign(&req->timeout_event, req->base->event_base, evdns_request_timeout_callback, req);
+
+ if (base->global_randomize_case) {
+ unsigned i;
+ char randbits[(sizeof(namebuf)+7)/8];
+ strlcpy(namebuf, name, sizeof(namebuf));
+ evutil_secure_rng_get_bytes(randbits, (name_len+7)/8);
+ for (i = 0; i < name_len; ++i) {
+ if (EVUTIL_ISALPHA_(namebuf[i])) {
+ if ((randbits[i >> 3] & (1<<(i & 7))))
+ namebuf[i] |= 0x20;
+ else
+ namebuf[i] &= ~0x20;
+ }
+ }
+ name = namebuf;
+ }
+
+ /* request data lives just after the header */
+ req->request = ((u8 *) req) + sizeof(struct request);
+ /* denotes that the request data shouldn't be free()ed */
+ req->request_appended = 1;
+ rlen = evdns_request_data_build(name, name_len, trans_id,
+ type, CLASS_INET, req->request, request_max_len);
+ if (rlen < 0)
+ goto err1;
+
+ req->request_len = rlen;
+ req->trans_id = trans_id;
+ req->tx_count = 0;
+ req->request_type = type;
+ req->user_pointer = user_ptr;
+ req->user_callback = callback;
+ req->ns = issuing_now ? nameserver_pick(base) : NULL;
+ req->next = req->prev = NULL;
+ req->handle = handle;
+ if (handle) {
+ handle->current_req = req;
+ handle->base = base;
+ }
+
+ return req;
+err1:
+ mm_free(req);
+ return NULL;
+}
+
+static void
+request_submit(struct request *const req) {
+ struct evdns_base *base = req->base;
+ ASSERT_LOCKED(base);
+ ASSERT_VALID_REQUEST(req);
+ if (req->ns) {
+ /* if it has a nameserver assigned then this is going */
+ /* straight into the inflight queue */
+ evdns_request_insert(req, &REQ_HEAD(base, req->trans_id));
+
+ base->global_requests_inflight++;
+ req->ns->requests_inflight++;
+
+ evdns_request_transmit(req);
+ } else {
+ evdns_request_insert(req, &base->req_waiting_head);
+ base->global_requests_waiting++;
+ }
+}
+
+/* exported function */
+void
+evdns_cancel_request(struct evdns_base *base, struct evdns_request *handle)
+{
+ struct request *req;
+
+ if (!handle->current_req)
+ return;
+
+ if (!base) {
+ /* This redundancy is silly; can we fix it? (Not for 2.0) XXXX */
+ base = handle->base;
+ if (!base)
+ base = handle->current_req->base;
+ }
+
+ EVDNS_LOCK(base);
+ if (handle->pending_cb) {
+ EVDNS_UNLOCK(base);
+ return;
+ }
+
+ req = handle->current_req;
+ ASSERT_VALID_REQUEST(req);
+
+ reply_schedule_callback(req, 0, DNS_ERR_CANCEL, NULL);
+ if (req->ns) {
+ /* remove from inflight queue */
+ request_finished(req, &REQ_HEAD(base, req->trans_id), 1);
+ } else {
+ /* remove from global_waiting head */
+ request_finished(req, &base->req_waiting_head, 1);
+ }
+ EVDNS_UNLOCK(base);
+}
+
+/* exported function */
+struct evdns_request *
+evdns_base_resolve_ipv4(struct evdns_base *base, const char *name, int flags,
+ evdns_callback_type callback, void *ptr) {
+ struct evdns_request *handle;
+ struct request *req;
+ log(EVDNS_LOG_DEBUG, "Resolve requested for %s", name);
+ handle = mm_calloc(1, sizeof(*handle));
+ if (handle == NULL)
+ return NULL;
+ EVDNS_LOCK(base);
+ if (flags & DNS_QUERY_NO_SEARCH) {
+ req =
+ request_new(base, handle, TYPE_A, name, flags,
+ callback, ptr);
+ if (req)
+ request_submit(req);
+ } else {
+ search_request_new(base, handle, TYPE_A, name, flags,
+ callback, ptr);
+ }
+ if (handle->current_req == NULL) {
+ mm_free(handle);
+ handle = NULL;
+ }
+ EVDNS_UNLOCK(base);
+ return handle;
+}
+
+int evdns_resolve_ipv4(const char *name, int flags,
+ evdns_callback_type callback, void *ptr)
+{
+ return evdns_base_resolve_ipv4(current_base, name, flags, callback, ptr)
+ ? 0 : -1;
+}
+
+
+/* exported function */
+struct evdns_request *
+evdns_base_resolve_ipv6(struct evdns_base *base,
+ const char *name, int flags,
+ evdns_callback_type callback, void *ptr)
+{
+ struct evdns_request *handle;
+ struct request *req;
+ log(EVDNS_LOG_DEBUG, "Resolve requested for %s", name);
+ handle = mm_calloc(1, sizeof(*handle));
+ if (handle == NULL)
+ return NULL;
+ EVDNS_LOCK(base);
+ if (flags & DNS_QUERY_NO_SEARCH) {
+ req = request_new(base, handle, TYPE_AAAA, name, flags,
+ callback, ptr);
+ if (req)
+ request_submit(req);
+ } else {
+ search_request_new(base, handle, TYPE_AAAA, name, flags,
+ callback, ptr);
+ }
+ if (handle->current_req == NULL) {
+ mm_free(handle);
+ handle = NULL;
+ }
+ EVDNS_UNLOCK(base);
+ return handle;
+}
+
+int evdns_resolve_ipv6(const char *name, int flags,
+ evdns_callback_type callback, void *ptr) {
+ return evdns_base_resolve_ipv6(current_base, name, flags, callback, ptr)
+ ? 0 : -1;
+}
+
+struct evdns_request *
+evdns_base_resolve_reverse(struct evdns_base *base, const struct in_addr *in, int flags, evdns_callback_type callback, void *ptr) {
+ char buf[32];
+ struct evdns_request *handle;
+ struct request *req;
+ u32 a;
+ EVUTIL_ASSERT(in);
+ a = ntohl(in->s_addr);
+ evutil_snprintf(buf, sizeof(buf), "%d.%d.%d.%d.in-addr.arpa",
+ (int)(u8)((a )&0xff),
+ (int)(u8)((a>>8 )&0xff),
+ (int)(u8)((a>>16)&0xff),
+ (int)(u8)((a>>24)&0xff));
+ handle = mm_calloc(1, sizeof(*handle));
+ if (handle == NULL)
+ return NULL;
+ log(EVDNS_LOG_DEBUG, "Resolve requested for %s (reverse)", buf);
+ EVDNS_LOCK(base);
+ req = request_new(base, handle, TYPE_PTR, buf, flags, callback, ptr);
+ if (req)
+ request_submit(req);
+ if (handle->current_req == NULL) {
+ mm_free(handle);
+ handle = NULL;
+ }
+ EVDNS_UNLOCK(base);
+ return (handle);
+}
+
+int evdns_resolve_reverse(const struct in_addr *in, int flags, evdns_callback_type callback, void *ptr) {
+ return evdns_base_resolve_reverse(current_base, in, flags, callback, ptr)
+ ? 0 : -1;
+}
+
+struct evdns_request *
+evdns_base_resolve_reverse_ipv6(struct evdns_base *base, const struct in6_addr *in, int flags, evdns_callback_type callback, void *ptr) {
+ /* 32 nybbles, 32 periods, "ip6.arpa", NUL. */
+ char buf[73];
+ char *cp;
+ struct evdns_request *handle;
+ struct request *req;
+ int i;
+ EVUTIL_ASSERT(in);
+ cp = buf;
+ for (i=15; i >= 0; --i) {
+ u8 byte = in->s6_addr[i];
+ *cp++ = "0123456789abcdef"[byte & 0x0f];
+ *cp++ = '.';
+ *cp++ = "0123456789abcdef"[byte >> 4];
+ *cp++ = '.';
+ }
+ EVUTIL_ASSERT(cp + strlen("ip6.arpa") < buf+sizeof(buf));
+ memcpy(cp, "ip6.arpa", strlen("ip6.arpa")+1);
+ handle = mm_calloc(1, sizeof(*handle));
+ if (handle == NULL)
+ return NULL;
+ log(EVDNS_LOG_DEBUG, "Resolve requested for %s (reverse)", buf);
+ EVDNS_LOCK(base);
+ req = request_new(base, handle, TYPE_PTR, buf, flags, callback, ptr);
+ if (req)
+ request_submit(req);
+ if (handle->current_req == NULL) {
+ mm_free(handle);
+ handle = NULL;
+ }
+ EVDNS_UNLOCK(base);
+ return (handle);
+}
+
+int evdns_resolve_reverse_ipv6(const struct in6_addr *in, int flags, evdns_callback_type callback, void *ptr) {
+ return evdns_base_resolve_reverse_ipv6(current_base, in, flags, callback, ptr)
+ ? 0 : -1;
+}
+
+/* ================================================================= */
+/* Search support */
+/* */
+/* the libc resolver has support for searching a number of domains */
+/* to find a name. If nothing else then it takes the single domain */
+/* from the gethostname() call. */
+/* */
+/* It can also be configured via the domain and search options in a */
+/* resolv.conf. */
+/* */
+/* The ndots option controls how many dots it takes for the resolver */
+/* to decide that a name is non-local and so try a raw lookup first. */
+
+struct search_domain {
+ int len;
+ struct search_domain *next;
+ /* the text string is appended to this structure */
+};
+
+struct search_state {
+ int refcount;
+ int ndots;
+ int num_domains;
+ struct search_domain *head;
+};
+
+static void
+search_state_decref(struct search_state *const state) {
+ if (!state) return;
+ state->refcount--;
+ if (!state->refcount) {
+ struct search_domain *next, *dom;
+ for (dom = state->head; dom; dom = next) {
+ next = dom->next;
+ mm_free(dom);
+ }
+ mm_free(state);
+ }
+}
+
+static struct search_state *
+search_state_new(void) {
+ struct search_state *state = (struct search_state *) mm_malloc(sizeof(struct search_state));
+ if (!state) return NULL;
+ memset(state, 0, sizeof(struct search_state));
+ state->refcount = 1;
+ state->ndots = 1;
+
+ return state;
+}
+
+static void
+search_postfix_clear(struct evdns_base *base) {
+ search_state_decref(base->global_search_state);
+
+ base->global_search_state = search_state_new();
+}
+
+/* exported function */
+void
+evdns_base_search_clear(struct evdns_base *base)
+{
+ EVDNS_LOCK(base);
+ search_postfix_clear(base);
+ EVDNS_UNLOCK(base);
+}
+
+void
+evdns_search_clear(void) {
+ evdns_base_search_clear(current_base);
+}
+
+static void
+search_postfix_add(struct evdns_base *base, const char *domain) {
+ size_t domain_len;
+ struct search_domain *sdomain;
+ while (domain[0] == '.') domain++;
+ domain_len = strlen(domain);
+
+ ASSERT_LOCKED(base);
+ if (!base->global_search_state) base->global_search_state = search_state_new();
+ if (!base->global_search_state) return;
+ base->global_search_state->num_domains++;
+
+ sdomain = (struct search_domain *) mm_malloc(sizeof(struct search_domain) + domain_len);
+ if (!sdomain) return;
+ memcpy( ((u8 *) sdomain) + sizeof(struct search_domain), domain, domain_len);
+ sdomain->next = base->global_search_state->head;
+ sdomain->len = (int) domain_len;
+
+ base->global_search_state->head = sdomain;
+}
+
+/* reverse the order of members in the postfix list. This is needed because, */
+/* when parsing resolv.conf we push elements in the wrong order */
+static void
+search_reverse(struct evdns_base *base) {
+ struct search_domain *cur, *prev = NULL, *next;
+ ASSERT_LOCKED(base);
+ cur = base->global_search_state->head;
+ while (cur) {
+ next = cur->next;
+ cur->next = prev;
+ prev = cur;
+ cur = next;
+ }
+
+ base->global_search_state->head = prev;
+}
+
+/* exported function */
+void
+evdns_base_search_add(struct evdns_base *base, const char *domain) {
+ EVDNS_LOCK(base);
+ search_postfix_add(base, domain);
+ EVDNS_UNLOCK(base);
+}
+void
+evdns_search_add(const char *domain) {
+ evdns_base_search_add(current_base, domain);
+}
+
+/* exported function */
+void
+evdns_base_search_ndots_set(struct evdns_base *base, const int ndots) {
+ EVDNS_LOCK(base);
+ if (!base->global_search_state) base->global_search_state = search_state_new();
+ if (base->global_search_state)
+ base->global_search_state->ndots = ndots;
+ EVDNS_UNLOCK(base);
+}
+void
+evdns_search_ndots_set(const int ndots) {
+ evdns_base_search_ndots_set(current_base, ndots);
+}
+
+static void
+search_set_from_hostname(struct evdns_base *base) {
+ char hostname[HOST_NAME_MAX + 1], *domainname;
+
+ ASSERT_LOCKED(base);
+ search_postfix_clear(base);
+ if (gethostname(hostname, sizeof(hostname))) return;
+ domainname = strchr(hostname, '.');
+ if (!domainname) return;
+ search_postfix_add(base, domainname);
+}
+
+/* warning: returns malloced string */
+static char *
+search_make_new(const struct search_state *const state, int n, const char *const base_name) {
+ const size_t base_len = strlen(base_name);
+ const char need_to_append_dot = base_name[base_len - 1] == '.' ? 0 : 1;
+ struct search_domain *dom;
+
+ for (dom = state->head; dom; dom = dom->next) {
+ if (!n--) {
+ /* this is the postfix we want */
+ /* the actual postfix string is kept at the end of the structure */
+ const u8 *const postfix = ((u8 *) dom) + sizeof(struct search_domain);
+ const int postfix_len = dom->len;
+ char *const newname = (char *) mm_malloc(base_len + need_to_append_dot + postfix_len + 1);
+ if (!newname) return NULL;
+ memcpy(newname, base_name, base_len);
+ if (need_to_append_dot) newname[base_len] = '.';
+ memcpy(newname + base_len + need_to_append_dot, postfix, postfix_len);
+ newname[base_len + need_to_append_dot + postfix_len] = 0;
+ return newname;
+ }
+ }
+
+ /* we ran off the end of the list and still didn't find the requested string */
+ EVUTIL_ASSERT(0);
+ return NULL; /* unreachable; stops warnings in some compilers. */
+}
+
+static struct request *
+search_request_new(struct evdns_base *base, struct evdns_request *handle,
+ int type, const char *const name, int flags,
+ evdns_callback_type user_callback, void *user_arg) {
+ ASSERT_LOCKED(base);
+ EVUTIL_ASSERT(type == TYPE_A || type == TYPE_AAAA);
+ EVUTIL_ASSERT(handle->current_req == NULL);
+ if ( ((flags & DNS_QUERY_NO_SEARCH) == 0) &&
+ base->global_search_state &&
+ base->global_search_state->num_domains) {
+ /* we have some domains to search */
+ struct request *req;
+ if (string_num_dots(name) >= base->global_search_state->ndots) {
+ req = request_new(base, handle, type, name, flags, user_callback, user_arg);
+ if (!req) return NULL;
+ handle->search_index = -1;
+ } else {
+ char *const new_name = search_make_new(base->global_search_state, 0, name);
+ if (!new_name) return NULL;
+ req = request_new(base, handle, type, new_name, flags, user_callback, user_arg);
+ mm_free(new_name);
+ if (!req) return NULL;
+ handle->search_index = 0;
+ }
+ EVUTIL_ASSERT(handle->search_origname == NULL);
+ handle->search_origname = mm_strdup(name);
+ if (handle->search_origname == NULL) {
+ /* XXX Should we dealloc req? If yes, how? */
+ if (req)
+ mm_free(req);
+ return NULL;
+ }
+ handle->search_state = base->global_search_state;
+ handle->search_flags = flags;
+ base->global_search_state->refcount++;
+ request_submit(req);
+ return req;
+ } else {
+ struct request *const req = request_new(base, handle, type, name, flags, user_callback, user_arg);
+ if (!req) return NULL;
+ request_submit(req);
+ return req;
+ }
+}
+
+/* this is called when a request has failed to find a name. We need to check */
+/* if it is part of a search and, if so, try the next name in the list */
+/* returns: */
+/* 0 another request has been submitted */
+/* 1 no more requests needed */
+static int
+search_try_next(struct evdns_request *const handle) {
+ struct request *req = handle->current_req;
+ struct evdns_base *base = req->base;
+ struct request *newreq;
+ ASSERT_LOCKED(base);
+ if (handle->search_state) {
+ /* it is part of a search */
+ char *new_name;
+ handle->search_index++;
+ if (handle->search_index >= handle->search_state->num_domains) {
+ /* no more postfixes to try, however we may need to try */
+ /* this name without a postfix */
+ if (string_num_dots(handle->search_origname) < handle->search_state->ndots) {
+ /* yep, we need to try it raw */
+ newreq = request_new(base, NULL, req->request_type, handle->search_origname, handle->search_flags, req->user_callback, req->user_pointer);
+ log(EVDNS_LOG_DEBUG, "Search: trying raw query %s", handle->search_origname);
+ if (newreq) {
+ search_request_finished(handle);
+ goto submit_next;
+ }
+ }
+ return 1;
+ }
+
+ new_name = search_make_new(handle->search_state, handle->search_index, handle->search_origname);
+ if (!new_name) return 1;
+ log(EVDNS_LOG_DEBUG, "Search: now trying %s (%d)", new_name, handle->search_index);
+ newreq = request_new(base, NULL, req->request_type, new_name, handle->search_flags, req->user_callback, req->user_pointer);
+ mm_free(new_name);
+ if (!newreq) return 1;
+ goto submit_next;
+ }
+ return 1;
+
+submit_next:
+ request_finished(req, &REQ_HEAD(req->base, req->trans_id), 0);
+ handle->current_req = newreq;
+ newreq->handle = handle;
+ request_submit(newreq);
+ return 0;
+}
+
+static void
+search_request_finished(struct evdns_request *const handle) {
+ ASSERT_LOCKED(handle->current_req->base);
+ if (handle->search_state) {
+ search_state_decref(handle->search_state);
+ handle->search_state = NULL;
+ }
+ if (handle->search_origname) {
+ mm_free(handle->search_origname);
+ handle->search_origname = NULL;
+ }
+}
+
+/* ================================================================= */
+/* Parsing resolv.conf files */
+
+static void
+evdns_resolv_set_defaults(struct evdns_base *base, int flags) {
+ /* if the file isn't found then we assume a local resolver */
+ ASSERT_LOCKED(base);
+ if (flags & DNS_OPTION_SEARCH) search_set_from_hostname(base);
+ if (flags & DNS_OPTION_NAMESERVERS) evdns_base_nameserver_ip_add(base,"127.0.0.1");
+}
+
+#ifndef EVENT__HAVE_STRTOK_R
+static char *
+strtok_r(char *s, const char *delim, char **state) {
+ char *cp, *start;
+ start = cp = s ? s : *state;
+ if (!cp)
+ return NULL;
+ while (*cp && !strchr(delim, *cp))
+ ++cp;
+ if (!*cp) {
+ if (cp == start)
+ return NULL;
+ *state = NULL;
+ return start;
+ } else {
+ *cp++ = '\0';
+ *state = cp;
+ return start;
+ }
+}
+#endif
+
+/* helper version of atoi which returns -1 on error */
+static int
+strtoint(const char *const str)
+{
+ char *endptr;
+ const int r = strtol(str, &endptr, 10);
+ if (*endptr) return -1;
+ return r;
+}
+
+/* Parse a number of seconds into a timeval; return -1 on error. */
+static int
+evdns_strtotimeval(const char *const str, struct timeval *out)
+{
+ double d;
+ char *endptr;
+ d = strtod(str, &endptr);
+ if (*endptr) return -1;
+ if (d < 0) return -1;
+ out->tv_sec = (int) d;
+ out->tv_usec = (int) ((d - (int) d)*1000000);
+ if (out->tv_sec == 0 && out->tv_usec < 1000) /* less than 1 msec */
+ return -1;
+ return 0;
+}
+
+/* helper version of atoi that returns -1 on error and clips to bounds. */
+static int
+strtoint_clipped(const char *const str, int min, int max)
+{
+ int r = strtoint(str);
+ if (r == -1)
+ return r;
+ else if (r<min)
+ return min;
+ else if (r>max)
+ return max;
+ else
+ return r;
+}
+
+static int
+evdns_base_set_max_requests_inflight(struct evdns_base *base, int maxinflight)
+{
+ int old_n_heads = base->n_req_heads, n_heads;
+ struct request **old_heads = base->req_heads, **new_heads, *req;
+ int i;
+
+ ASSERT_LOCKED(base);
+ if (maxinflight < 1)
+ maxinflight = 1;
+ n_heads = (maxinflight+4) / 5;
+ EVUTIL_ASSERT(n_heads > 0);
+ new_heads = mm_calloc(n_heads, sizeof(struct request*));
+ if (!new_heads)
+ return (-1);
+ if (old_heads) {
+ for (i = 0; i < old_n_heads; ++i) {
+ while (old_heads[i]) {
+ req = old_heads[i];
+ evdns_request_remove(req, &old_heads[i]);
+ evdns_request_insert(req, &new_heads[req->trans_id % n_heads]);
+ }
+ }
+ mm_free(old_heads);
+ }
+ base->req_heads = new_heads;
+ base->n_req_heads = n_heads;
+ base->global_max_requests_inflight = maxinflight;
+ return (0);
+}
+
+/* exported function */
+int
+evdns_base_set_option(struct evdns_base *base,
+ const char *option, const char *val)
+{
+ int res;
+ EVDNS_LOCK(base);
+ res = evdns_base_set_option_impl(base, option, val, DNS_OPTIONS_ALL);
+ EVDNS_UNLOCK(base);
+ return res;
+}
+
+static inline int
+str_matches_option(const char *s1, const char *optionname)
+{
+ /* Option names are given as "option:" We accept either 'option' in
+ * s1, or 'option:randomjunk'. The latter form is to implement the
+ * resolv.conf parser. */
+ size_t optlen = strlen(optionname);
+ size_t slen = strlen(s1);
+ if (slen == optlen || slen == optlen - 1)
+ return !strncmp(s1, optionname, slen);
+ else if (slen > optlen)
+ return !strncmp(s1, optionname, optlen);
+ else
+ return 0;
+}
+
+static int
+evdns_base_set_option_impl(struct evdns_base *base,
+ const char *option, const char *val, int flags)
+{
+ ASSERT_LOCKED(base);
+ if (str_matches_option(option, "ndots:")) {
+ const int ndots = strtoint(val);
+ if (ndots == -1) return -1;
+ if (!(flags & DNS_OPTION_SEARCH)) return 0;
+ log(EVDNS_LOG_DEBUG, "Setting ndots to %d", ndots);
+ if (!base->global_search_state) base->global_search_state = search_state_new();
+ if (!base->global_search_state) return -1;
+ base->global_search_state->ndots = ndots;
+ } else if (str_matches_option(option, "timeout:")) {
+ struct timeval tv;
+ if (evdns_strtotimeval(val, &tv) == -1) return -1;
+ if (!(flags & DNS_OPTION_MISC)) return 0;
+ log(EVDNS_LOG_DEBUG, "Setting timeout to %s", val);
+ memcpy(&base->global_timeout, &tv, sizeof(struct timeval));
+ } else if (str_matches_option(option, "getaddrinfo-allow-skew:")) {
+ struct timeval tv;
+ if (evdns_strtotimeval(val, &tv) == -1) return -1;
+ if (!(flags & DNS_OPTION_MISC)) return 0;
+ log(EVDNS_LOG_DEBUG, "Setting getaddrinfo-allow-skew to %s",
+ val);
+ memcpy(&base->global_getaddrinfo_allow_skew, &tv,
+ sizeof(struct timeval));
+ } else if (str_matches_option(option, "max-timeouts:")) {
+ const int maxtimeout = strtoint_clipped(val, 1, 255);
+ if (maxtimeout == -1) return -1;
+ if (!(flags & DNS_OPTION_MISC)) return 0;
+ log(EVDNS_LOG_DEBUG, "Setting maximum allowed timeouts to %d",
+ maxtimeout);
+ base->global_max_nameserver_timeout = maxtimeout;
+ } else if (str_matches_option(option, "max-inflight:")) {
+ const int maxinflight = strtoint_clipped(val, 1, 65000);
+ if (maxinflight == -1) return -1;
+ if (!(flags & DNS_OPTION_MISC)) return 0;
+ log(EVDNS_LOG_DEBUG, "Setting maximum inflight requests to %d",
+ maxinflight);
+ evdns_base_set_max_requests_inflight(base, maxinflight);
+ } else if (str_matches_option(option, "attempts:")) {
+ int retries = strtoint(val);
+ if (retries == -1) return -1;
+ if (retries > 255) retries = 255;
+ if (!(flags & DNS_OPTION_MISC)) return 0;
+ log(EVDNS_LOG_DEBUG, "Setting retries to %d", retries);
+ base->global_max_retransmits = retries;
+ } else if (str_matches_option(option, "randomize-case:")) {
+ int randcase = strtoint(val);
+ if (!(flags & DNS_OPTION_MISC)) return 0;
+ base->global_randomize_case = randcase;
+ } else if (str_matches_option(option, "bind-to:")) {
+ /* XXX This only applies to successive nameservers, not
+ * to already-configured ones. We might want to fix that. */
+ int len = sizeof(base->global_outgoing_address);
+ if (!(flags & DNS_OPTION_NAMESERVERS)) return 0;
+ if (evutil_parse_sockaddr_port(val,
+ (struct sockaddr*)&base->global_outgoing_address, &len))
+ return -1;
+ base->global_outgoing_addrlen = len;
+ } else if (str_matches_option(option, "initial-probe-timeout:")) {
+ struct timeval tv;
+ if (evdns_strtotimeval(val, &tv) == -1) return -1;
+ if (tv.tv_sec > 3600)
+ tv.tv_sec = 3600;
+ if (!(flags & DNS_OPTION_MISC)) return 0;
+ log(EVDNS_LOG_DEBUG, "Setting initial probe timeout to %s",
+ val);
+ memcpy(&base->global_nameserver_probe_initial_timeout, &tv,
+ sizeof(tv));
+ }
+ return 0;
+}
+
+int
+evdns_set_option(const char *option, const char *val, int flags)
+{
+ if (!current_base)
+ current_base = evdns_base_new(NULL, 0);
+ return evdns_base_set_option(current_base, option, val);
+}
+
+static void
+resolv_conf_parse_line(struct evdns_base *base, char *const start, int flags) {
+ char *strtok_state;
+ static const char *const delims = " \t";
+#define NEXT_TOKEN strtok_r(NULL, delims, &strtok_state)
+
+
+ char *const first_token = strtok_r(start, delims, &strtok_state);
+ ASSERT_LOCKED(base);
+ if (!first_token) return;
+
+ if (!strcmp(first_token, "nameserver") && (flags & DNS_OPTION_NAMESERVERS)) {
+ const char *const nameserver = NEXT_TOKEN;
+
+ if (nameserver)
+ evdns_base_nameserver_ip_add(base, nameserver);
+ } else if (!strcmp(first_token, "domain") && (flags & DNS_OPTION_SEARCH)) {
+ const char *const domain = NEXT_TOKEN;
+ if (domain) {
+ search_postfix_clear(base);
+ search_postfix_add(base, domain);
+ }
+ } else if (!strcmp(first_token, "search") && (flags & DNS_OPTION_SEARCH)) {
+ const char *domain;
+ search_postfix_clear(base);
+
+ while ((domain = NEXT_TOKEN)) {
+ search_postfix_add(base, domain);
+ }
+ search_reverse(base);
+ } else if (!strcmp(first_token, "options")) {
+ const char *option;
+ while ((option = NEXT_TOKEN)) {
+ const char *val = strchr(option, ':');
+ evdns_base_set_option_impl(base, option, val ? val+1 : "", flags);
+ }
+ }
+#undef NEXT_TOKEN
+}
+
+/* exported function */
+/* returns: */
+/* 0 no errors */
+/* 1 failed to open file */
+/* 2 failed to stat file */
+/* 3 file too large */
+/* 4 out of memory */
+/* 5 short read from file */
+int
+evdns_base_resolv_conf_parse(struct evdns_base *base, int flags, const char *const filename) {
+ int res;
+ EVDNS_LOCK(base);
+ res = evdns_base_resolv_conf_parse_impl(base, flags, filename);
+ EVDNS_UNLOCK(base);
+ return res;
+}
+
+static char *
+evdns_get_default_hosts_filename(void)
+{
+#ifdef _WIN32
+ /* Windows is a little coy about where it puts its configuration
+ * files. Sure, they're _usually_ in C:\windows\system32, but
+ * there's no reason in principle they couldn't be in
+ * W:\hoboken chicken emergency\
+ */
+ char path[MAX_PATH+1];
+ static const char hostfile[] = "\\drivers\\etc\\hosts";
+ char *path_out;
+ size_t len_out;
+
+ if (! SHGetSpecialFolderPathA(NULL, path, CSIDL_SYSTEM, 0))
+ return NULL;
+ len_out = strlen(path)+strlen(hostfile)+1;
+ path_out = mm_malloc(len_out);
+ evutil_snprintf(path_out, len_out, "%s%s", path, hostfile);
+ return path_out;
+#else
+ return mm_strdup("/etc/hosts");
+#endif
+}
+
+static int
+evdns_base_resolv_conf_parse_impl(struct evdns_base *base, int flags, const char *const filename) {
+ size_t n;
+ char *resolv;
+ char *start;
+ int err = 0;
+
+ log(EVDNS_LOG_DEBUG, "Parsing resolv.conf file %s", filename);
+
+ if (flags & DNS_OPTION_HOSTSFILE) {
+ char *fname = evdns_get_default_hosts_filename();
+ evdns_base_load_hosts(base, fname);
+ if (fname)
+ mm_free(fname);
+ }
+
+ if ((err = evutil_read_file_(filename, &resolv, &n, 0)) < 0) {
+ if (err == -1) {
+ /* No file. */
+ evdns_resolv_set_defaults(base, flags);
+ return 1;
+ } else {
+ return 2;
+ }
+ }
+
+ start = resolv;
+ for (;;) {
+ char *const newline = strchr(start, '\n');
+ if (!newline) {
+ resolv_conf_parse_line(base, start, flags);
+ break;
+ } else {
+ *newline = 0;
+ resolv_conf_parse_line(base, start, flags);
+ start = newline + 1;
+ }
+ }
+
+ if (!base->server_head && (flags & DNS_OPTION_NAMESERVERS)) {
+ /* no nameservers were configured. */
+ evdns_base_nameserver_ip_add(base, "127.0.0.1");
+ err = 6;
+ }
+ if (flags & DNS_OPTION_SEARCH && (!base->global_search_state || base->global_search_state->num_domains == 0)) {
+ search_set_from_hostname(base);
+ }
+
+ mm_free(resolv);
+ return err;
+}
+
+int
+evdns_resolv_conf_parse(int flags, const char *const filename) {
+ if (!current_base)
+ current_base = evdns_base_new(NULL, 0);
+ return evdns_base_resolv_conf_parse(current_base, flags, filename);
+}
+
+
+#ifdef _WIN32
+/* Add multiple nameservers from a space-or-comma-separated list. */
+static int
+evdns_nameserver_ip_add_line(struct evdns_base *base, const char *ips) {
+ const char *addr;
+ char *buf;
+ int r;
+ ASSERT_LOCKED(base);
+ while (*ips) {
+ while (isspace(*ips) || *ips == ',' || *ips == '\t')
+ ++ips;
+ addr = ips;
+ while (isdigit(*ips) || *ips == '.' || *ips == ':' ||
+ *ips=='[' || *ips==']')
+ ++ips;
+ buf = mm_malloc(ips-addr+1);
+ if (!buf) return 4;
+ memcpy(buf, addr, ips-addr);
+ buf[ips-addr] = '\0';
+ r = evdns_base_nameserver_ip_add(base, buf);
+ mm_free(buf);
+ if (r) return r;
+ }
+ return 0;
+}
+
+typedef DWORD(WINAPI *GetNetworkParams_fn_t)(FIXED_INFO *, DWORD*);
+
+/* Use the windows GetNetworkParams interface in iphlpapi.dll to */
+/* figure out what our nameservers are. */
+static int
+load_nameservers_with_getnetworkparams(struct evdns_base *base)
+{
+ /* Based on MSDN examples and inspection of c-ares code. */
+ FIXED_INFO *fixed;
+ HMODULE handle = 0;
+ ULONG size = sizeof(FIXED_INFO);
+ void *buf = NULL;
+ int status = 0, r, added_any;
+ IP_ADDR_STRING *ns;
+ GetNetworkParams_fn_t fn;
+
+ ASSERT_LOCKED(base);
+ if (!(handle = evutil_load_windows_system_library_(
+ TEXT("iphlpapi.dll")))) {
+ log(EVDNS_LOG_WARN, "Could not open iphlpapi.dll");
+ status = -1;
+ goto done;
+ }
+ if (!(fn = (GetNetworkParams_fn_t) GetProcAddress(handle, "GetNetworkParams"))) {
+ log(EVDNS_LOG_WARN, "Could not get address of function.");
+ status = -1;
+ goto done;
+ }
+
+ buf = mm_malloc(size);
+ if (!buf) { status = 4; goto done; }
+ fixed = buf;
+ r = fn(fixed, &size);
+ if (r != ERROR_SUCCESS && r != ERROR_BUFFER_OVERFLOW) {
+ status = -1;
+ goto done;
+ }
+ if (r != ERROR_SUCCESS) {
+ mm_free(buf);
+ buf = mm_malloc(size);
+ if (!buf) { status = 4; goto done; }
+ fixed = buf;
+ r = fn(fixed, &size);
+ if (r != ERROR_SUCCESS) {
+ log(EVDNS_LOG_DEBUG, "fn() failed.");
+ status = -1;
+ goto done;
+ }
+ }
+
+ EVUTIL_ASSERT(fixed);
+ added_any = 0;
+ ns = &(fixed->DnsServerList);
+ while (ns) {
+ r = evdns_nameserver_ip_add_line(base, ns->IpAddress.String);
+ if (r) {
+ log(EVDNS_LOG_DEBUG,"Could not add nameserver %s to list,error: %d",
+ (ns->IpAddress.String),(int)GetLastError());
+ status = r;
+ } else {
+ ++added_any;
+ log(EVDNS_LOG_DEBUG,"Successfully added %s as nameserver",ns->IpAddress.String);
+ }
+
+ ns = ns->Next;
+ }
+
+ if (!added_any) {
+ log(EVDNS_LOG_DEBUG, "No nameservers added.");
+ if (status == 0)
+ status = -1;
+ } else {
+ status = 0;
+ }
+
+ done:
+ if (buf)
+ mm_free(buf);
+ if (handle)
+ FreeLibrary(handle);
+ return status;
+}
+
+static int
+config_nameserver_from_reg_key(struct evdns_base *base, HKEY key, const TCHAR *subkey)
+{
+ char *buf;
+ DWORD bufsz = 0, type = 0;
+ int status = 0;
+
+ ASSERT_LOCKED(base);
+ if (RegQueryValueEx(key, subkey, 0, &type, NULL, &bufsz)
+ != ERROR_MORE_DATA)
+ return -1;
+ if (!(buf = mm_malloc(bufsz)))
+ return -1;
+
+ if (RegQueryValueEx(key, subkey, 0, &type, (LPBYTE)buf, &bufsz)
+ == ERROR_SUCCESS && bufsz > 1) {
+ status = evdns_nameserver_ip_add_line(base,buf);
+ }
+
+ mm_free(buf);
+ return status;
+}
+
+#define SERVICES_KEY TEXT("System\\CurrentControlSet\\Services\\")
+#define WIN_NS_9X_KEY SERVICES_KEY TEXT("VxD\\MSTCP")
+#define WIN_NS_NT_KEY SERVICES_KEY TEXT("Tcpip\\Parameters")
+
+static int
+load_nameservers_from_registry(struct evdns_base *base)
+{
+ int found = 0;
+ int r;
+#define TRY(k, name) \
+ if (!found && config_nameserver_from_reg_key(base,k,TEXT(name)) == 0) { \
+ log(EVDNS_LOG_DEBUG,"Found nameservers in %s/%s",#k,name); \
+ found = 1; \
+ } else if (!found) { \
+ log(EVDNS_LOG_DEBUG,"Didn't find nameservers in %s/%s", \
+ #k,#name); \
+ }
+
+ ASSERT_LOCKED(base);
+
+ if (((int)GetVersion()) > 0) { /* NT */
+ HKEY nt_key = 0, interfaces_key = 0;
+
+ if (RegOpenKeyEx(HKEY_LOCAL_MACHINE, WIN_NS_NT_KEY, 0,
+ KEY_READ, &nt_key) != ERROR_SUCCESS) {
+ log(EVDNS_LOG_DEBUG,"Couldn't open nt key, %d",(int)GetLastError());
+ return -1;
+ }
+ r = RegOpenKeyEx(nt_key, TEXT("Interfaces"), 0,
+ KEY_QUERY_VALUE|KEY_ENUMERATE_SUB_KEYS,
+ &interfaces_key);
+ if (r != ERROR_SUCCESS) {
+ log(EVDNS_LOG_DEBUG,"Couldn't open interfaces key, %d",(int)GetLastError());
+ return -1;
+ }
+ TRY(nt_key, "NameServer");
+ TRY(nt_key, "DhcpNameServer");
+ TRY(interfaces_key, "NameServer");
+ TRY(interfaces_key, "DhcpNameServer");
+ RegCloseKey(interfaces_key);
+ RegCloseKey(nt_key);
+ } else {
+ HKEY win_key = 0;
+ if (RegOpenKeyEx(HKEY_LOCAL_MACHINE, WIN_NS_9X_KEY, 0,
+ KEY_READ, &win_key) != ERROR_SUCCESS) {
+ log(EVDNS_LOG_DEBUG, "Couldn't open registry key, %d", (int)GetLastError());
+ return -1;
+ }
+ TRY(win_key, "NameServer");
+ RegCloseKey(win_key);
+ }
+
+ if (found == 0) {
+ log(EVDNS_LOG_WARN,"Didn't find any nameservers.");
+ }
+
+ return found ? 0 : -1;
+#undef TRY
+}
+
+int
+evdns_base_config_windows_nameservers(struct evdns_base *base)
+{
+ int r;
+ char *fname;
+ if (base == NULL)
+ base = current_base;
+ if (base == NULL)
+ return -1;
+ EVDNS_LOCK(base);
+ fname = evdns_get_default_hosts_filename();
+ log(EVDNS_LOG_DEBUG, "Loading hosts entries from %s", fname);
+ evdns_base_load_hosts(base, fname);
+ if (fname)
+ mm_free(fname);
+
+ if (load_nameservers_with_getnetworkparams(base) == 0) {
+ EVDNS_UNLOCK(base);
+ return 0;
+ }
+ r = load_nameservers_from_registry(base);
+
+ EVDNS_UNLOCK(base);
+ return r;
+}
+
+int
+evdns_config_windows_nameservers(void)
+{
+ if (!current_base) {
+ current_base = evdns_base_new(NULL, 1);
+ return current_base == NULL ? -1 : 0;
+ } else {
+ return evdns_base_config_windows_nameservers(current_base);
+ }
+}
+#endif
+
+struct evdns_base *
+evdns_base_new(struct event_base *event_base, int flags)
+{
+ struct evdns_base *base;
+
+ if (evutil_secure_rng_init() < 0) {
+ log(EVDNS_LOG_WARN, "Unable to seed random number generator; "
+ "DNS can't run.");
+ return NULL;
+ }
+
+ /* Give the evutil library a hook into its evdns-enabled
+ * functionality. We can't just call evdns_getaddrinfo directly or
+ * else libevent-core will depend on libevent-extras. */
+ evutil_set_evdns_getaddrinfo_fn_(evdns_getaddrinfo);
+
+ base = mm_malloc(sizeof(struct evdns_base));
+ if (base == NULL)
+ return (NULL);
+ memset(base, 0, sizeof(struct evdns_base));
+ base->req_waiting_head = NULL;
+
+ EVTHREAD_ALLOC_LOCK(base->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ EVDNS_LOCK(base);
+
+ /* Set max requests inflight and allocate req_heads. */
+ base->req_heads = NULL;
+
+ evdns_base_set_max_requests_inflight(base, 64);
+
+ base->server_head = NULL;
+ base->event_base = event_base;
+ base->global_good_nameservers = base->global_requests_inflight =
+ base->global_requests_waiting = 0;
+
+ base->global_timeout.tv_sec = 5;
+ base->global_timeout.tv_usec = 0;
+ base->global_max_reissues = 1;
+ base->global_max_retransmits = 3;
+ base->global_max_nameserver_timeout = 3;
+ base->global_search_state = NULL;
+ base->global_randomize_case = 1;
+ base->global_getaddrinfo_allow_skew.tv_sec = 3;
+ base->global_getaddrinfo_allow_skew.tv_usec = 0;
+ base->global_nameserver_probe_initial_timeout.tv_sec = 10;
+ base->global_nameserver_probe_initial_timeout.tv_usec = 0;
+
+ TAILQ_INIT(&base->hostsdb);
+
+#define EVDNS_BASE_ALL_FLAGS (0x8001)
+ if (flags & ~EVDNS_BASE_ALL_FLAGS) {
+ flags = EVDNS_BASE_INITIALIZE_NAMESERVERS;
+ log(EVDNS_LOG_WARN,
+ "Unrecognized flag passed to evdns_base_new(). Assuming "
+ "you meant EVDNS_BASE_INITIALIZE_NAMESERVERS.");
+ }
+#undef EVDNS_BASE_ALL_FLAGS
+
+ if (flags & EVDNS_BASE_INITIALIZE_NAMESERVERS) {
+ int r;
+#ifdef _WIN32
+ r = evdns_base_config_windows_nameservers(base);
+#else
+ r = evdns_base_resolv_conf_parse(base, DNS_OPTIONS_ALL, "/etc/resolv.conf");
+#endif
+ if (r == -1) {
+ evdns_base_free_and_unlock(base, 0);
+ return NULL;
+ }
+ }
+ if (flags & EVDNS_BASE_DISABLE_WHEN_INACTIVE) {
+ base->disable_when_inactive = 1;
+ }
+
+ EVDNS_UNLOCK(base);
+ return base;
+}
+
+int
+evdns_init(void)
+{
+ struct evdns_base *base = evdns_base_new(NULL, 1);
+ if (base) {
+ current_base = base;
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+const char *
+evdns_err_to_string(int err)
+{
+ switch (err) {
+ case DNS_ERR_NONE: return "no error";
+ case DNS_ERR_FORMAT: return "misformatted query";
+ case DNS_ERR_SERVERFAILED: return "server failed";
+ case DNS_ERR_NOTEXIST: return "name does not exist";
+ case DNS_ERR_NOTIMPL: return "query not implemented";
+ case DNS_ERR_REFUSED: return "refused";
+
+ case DNS_ERR_TRUNCATED: return "reply truncated or ill-formed";
+ case DNS_ERR_UNKNOWN: return "unknown";
+ case DNS_ERR_TIMEOUT: return "request timed out";
+ case DNS_ERR_SHUTDOWN: return "dns subsystem shut down";
+ case DNS_ERR_CANCEL: return "dns request canceled";
+ case DNS_ERR_NODATA: return "no records in the reply";
+ default: return "[Unknown error code]";
+ }
+}
+
+static void
+evdns_nameserver_free(struct nameserver *server)
+{
+ if (server->socket >= 0)
+ evutil_closesocket(server->socket);
+ (void) event_del(&server->event);
+ event_debug_unassign(&server->event);
+ if (server->state == 0)
+ (void) event_del(&server->timeout_event);
+ if (server->probe_request) {
+ evdns_cancel_request(server->base, server->probe_request);
+ server->probe_request = NULL;
+ }
+ event_debug_unassign(&server->timeout_event);
+ mm_free(server);
+}
+
+static void
+evdns_base_free_and_unlock(struct evdns_base *base, int fail_requests)
+{
+ struct nameserver *server, *server_next;
+ struct search_domain *dom, *dom_next;
+ int i;
+
+ /* Requires that we hold the lock. */
+
+ /* TODO(nickm) we might need to refcount here. */
+
+ for (i = 0; i < base->n_req_heads; ++i) {
+ while (base->req_heads[i]) {
+ if (fail_requests)
+ reply_schedule_callback(base->req_heads[i], 0, DNS_ERR_SHUTDOWN, NULL);
+ request_finished(base->req_heads[i], &REQ_HEAD(base, base->req_heads[i]->trans_id), 1);
+ }
+ }
+ while (base->req_waiting_head) {
+ if (fail_requests)
+ reply_schedule_callback(base->req_waiting_head, 0, DNS_ERR_SHUTDOWN, NULL);
+ request_finished(base->req_waiting_head, &base->req_waiting_head, 1);
+ }
+ base->global_requests_inflight = base->global_requests_waiting = 0;
+
+ for (server = base->server_head; server; server = server_next) {
+ server_next = server->next;
+ evdns_nameserver_free(server);
+ if (server_next == base->server_head)
+ break;
+ }
+ base->server_head = NULL;
+ base->global_good_nameservers = 0;
+
+ if (base->global_search_state) {
+ for (dom = base->global_search_state->head; dom; dom = dom_next) {
+ dom_next = dom->next;
+ mm_free(dom);
+ }
+ mm_free(base->global_search_state);
+ base->global_search_state = NULL;
+ }
+
+ {
+ struct hosts_entry *victim;
+ while ((victim = TAILQ_FIRST(&base->hostsdb))) {
+ TAILQ_REMOVE(&base->hostsdb, victim, next);
+ mm_free(victim);
+ }
+ }
+
+ mm_free(base->req_heads);
+
+ EVDNS_UNLOCK(base);
+ EVTHREAD_FREE_LOCK(base->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+
+ mm_free(base);
+}
+
+void
+evdns_base_free(struct evdns_base *base, int fail_requests)
+{
+ EVDNS_LOCK(base);
+ evdns_base_free_and_unlock(base, fail_requests);
+}
+
+void
+evdns_base_clear_host_addresses(struct evdns_base *base)
+{
+ struct hosts_entry *victim;
+ EVDNS_LOCK(base);
+ while ((victim = TAILQ_FIRST(&base->hostsdb))) {
+ TAILQ_REMOVE(&base->hostsdb, victim, next);
+ mm_free(victim);
+ }
+ EVDNS_UNLOCK(base);
+}
+
+void
+evdns_shutdown(int fail_requests)
+{
+ if (current_base) {
+ struct evdns_base *b = current_base;
+ current_base = NULL;
+ evdns_base_free(b, fail_requests);
+ }
+ evdns_log_fn = NULL;
+}
+
+static int
+evdns_base_parse_hosts_line(struct evdns_base *base, char *line)
+{
+ char *strtok_state;
+ static const char *const delims = " \t";
+ char *const addr = strtok_r(line, delims, &strtok_state);
+ char *hostname, *hash;
+ struct sockaddr_storage ss;
+ int socklen = sizeof(ss);
+ ASSERT_LOCKED(base);
+
+#define NEXT_TOKEN strtok_r(NULL, delims, &strtok_state)
+
+ if (!addr || *addr == '#')
+ return 0;
+
+ memset(&ss, 0, sizeof(ss));
+ if (evutil_parse_sockaddr_port(addr, (struct sockaddr*)&ss, &socklen)<0)
+ return -1;
+ if (socklen > (int)sizeof(struct sockaddr_in6))
+ return -1;
+
+ if (sockaddr_getport((struct sockaddr*)&ss))
+ return -1;
+
+ while ((hostname = NEXT_TOKEN)) {
+ struct hosts_entry *he;
+ size_t namelen;
+ if ((hash = strchr(hostname, '#'))) {
+ if (hash == hostname)
+ return 0;
+ *hash = '\0';
+ }
+
+ namelen = strlen(hostname);
+
+ he = mm_calloc(1, sizeof(struct hosts_entry)+namelen);
+ if (!he)
+ return -1;
+ EVUTIL_ASSERT(socklen <= (int)sizeof(he->addr));
+ memcpy(&he->addr, &ss, socklen);
+ memcpy(he->hostname, hostname, namelen+1);
+ he->addrlen = socklen;
+
+ TAILQ_INSERT_TAIL(&base->hostsdb, he, next);
+
+ if (hash)
+ return 0;
+ }
+
+ return 0;
+#undef NEXT_TOKEN
+}
+
+static int
+evdns_base_load_hosts_impl(struct evdns_base *base, const char *hosts_fname)
+{
+ char *str=NULL, *cp, *eol;
+ size_t len;
+ int err=0;
+
+ ASSERT_LOCKED(base);
+
+ if (hosts_fname == NULL ||
+ (err = evutil_read_file_(hosts_fname, &str, &len, 0)) < 0) {
+ char tmp[64];
+ strlcpy(tmp, "127.0.0.1 localhost", sizeof(tmp));
+ evdns_base_parse_hosts_line(base, tmp);
+ strlcpy(tmp, "::1 localhost", sizeof(tmp));
+ evdns_base_parse_hosts_line(base, tmp);
+ return err ? -1 : 0;
+ }
+
+ /* This will break early if there is a NUL in the hosts file.
+ * Probably not a problem.*/
+ cp = str;
+ for (;;) {
+ eol = strchr(cp, '\n');
+
+ if (eol) {
+ *eol = '\0';
+ evdns_base_parse_hosts_line(base, cp);
+ cp = eol+1;
+ } else {
+ evdns_base_parse_hosts_line(base, cp);
+ break;
+ }
+ }
+
+ mm_free(str);
+ return 0;
+}
+
+int
+evdns_base_load_hosts(struct evdns_base *base, const char *hosts_fname)
+{
+ int res;
+ if (!base)
+ base = current_base;
+ EVDNS_LOCK(base);
+ res = evdns_base_load_hosts_impl(base, hosts_fname);
+ EVDNS_UNLOCK(base);
+ return res;
+}
+
+/* A single request for a getaddrinfo, either v4 or v6. */
+struct getaddrinfo_subrequest {
+ struct evdns_request *r;
+ ev_uint32_t type;
+};
+
+/* State data used to implement an in-progress getaddrinfo. */
+struct evdns_getaddrinfo_request {
+ struct evdns_base *evdns_base;
+ /* Copy of the modified 'hints' data that we'll use to build
+ * answers. */
+ struct evutil_addrinfo hints;
+ /* The callback to invoke when we're done */
+ evdns_getaddrinfo_cb user_cb;
+ /* User-supplied data to give to the callback. */
+ void *user_data;
+ /* The port to use when building sockaddrs. */
+ ev_uint16_t port;
+ /* The sub_request for an A record (if any) */
+ struct getaddrinfo_subrequest ipv4_request;
+ /* The sub_request for an AAAA record (if any) */
+ struct getaddrinfo_subrequest ipv6_request;
+
+ /* The cname result that we were told (if any) */
+ char *cname_result;
+
+ /* If we have one request answered and one request still inflight,
+ * then this field holds the answer from the first request... */
+ struct evutil_addrinfo *pending_result;
+ /* And this event is a timeout that will tell us to cancel the second
+ * request if it's taking a long time. */
+ struct event timeout;
+
+ /* And this field holds the error code from the first request... */
+ int pending_error;
+ /* If this is set, the user canceled this request. */
+ unsigned user_canceled : 1;
+ /* If this is set, the user can no longer cancel this request; we're
+ * just waiting for the free. */
+ unsigned request_done : 1;
+};
+
+/* Convert an evdns errors to the equivalent getaddrinfo error. */
+static int
+evdns_err_to_getaddrinfo_err(int e1)
+{
+ /* XXX Do this better! */
+ if (e1 == DNS_ERR_NONE)
+ return 0;
+ else if (e1 == DNS_ERR_NOTEXIST)
+ return EVUTIL_EAI_NONAME;
+ else
+ return EVUTIL_EAI_FAIL;
+}
+
+/* Return the more informative of two getaddrinfo errors. */
+static int
+getaddrinfo_merge_err(int e1, int e2)
+{
+ /* XXXX be cleverer here. */
+ if (e1 == 0)
+ return e2;
+ else
+ return e1;
+}
+
+static void
+free_getaddrinfo_request(struct evdns_getaddrinfo_request *data)
+{
+ /* DO NOT CALL this if either of the requests is pending. Only once
+ * both callbacks have been invoked is it safe to free the request */
+ if (data->pending_result)
+ evutil_freeaddrinfo(data->pending_result);
+ if (data->cname_result)
+ mm_free(data->cname_result);
+ event_del(&data->timeout);
+ mm_free(data);
+ return;
+}
+
+static void
+add_cname_to_reply(struct evdns_getaddrinfo_request *data,
+ struct evutil_addrinfo *ai)
+{
+ if (data->cname_result && ai) {
+ ai->ai_canonname = data->cname_result;
+ data->cname_result = NULL;
+ }
+}
+
+/* Callback: invoked when one request in a mixed-format A/AAAA getaddrinfo
+ * request has finished, but the other one took too long to answer. Pass
+ * along the answer we got, and cancel the other request.
+ */
+static void
+evdns_getaddrinfo_timeout_cb(evutil_socket_t fd, short what, void *ptr)
+{
+ int v4_timedout = 0, v6_timedout = 0;
+ struct evdns_getaddrinfo_request *data = ptr;
+
+ /* Cancel any pending requests, and note which one */
+ if (data->ipv4_request.r) {
+ /* XXXX This does nothing if the request's callback is already
+ * running (pending_cb is set). */
+ evdns_cancel_request(NULL, data->ipv4_request.r);
+ v4_timedout = 1;
+ EVDNS_LOCK(data->evdns_base);
+ ++data->evdns_base->getaddrinfo_ipv4_timeouts;
+ EVDNS_UNLOCK(data->evdns_base);
+ }
+ if (data->ipv6_request.r) {
+ /* XXXX This does nothing if the request's callback is already
+ * running (pending_cb is set). */
+ evdns_cancel_request(NULL, data->ipv6_request.r);
+ v6_timedout = 1;
+ EVDNS_LOCK(data->evdns_base);
+ ++data->evdns_base->getaddrinfo_ipv6_timeouts;
+ EVDNS_UNLOCK(data->evdns_base);
+ }
+
+ /* We only use this timeout callback when we have an answer for
+ * one address. */
+ EVUTIL_ASSERT(!v4_timedout || !v6_timedout);
+
+ /* Report the outcome of the other request that didn't time out. */
+ if (data->pending_result) {
+ add_cname_to_reply(data, data->pending_result);
+ data->user_cb(0, data->pending_result, data->user_data);
+ data->pending_result = NULL;
+ } else {
+ int e = data->pending_error;
+ if (!e)
+ e = EVUTIL_EAI_AGAIN;
+ data->user_cb(e, NULL, data->user_data);
+ }
+
+ data->user_cb = NULL; /* prevent double-call if evdns callbacks are
+ * in-progress. XXXX It would be better if this
+ * weren't necessary. */
+
+ if (!v4_timedout && !v6_timedout) {
+ /* should be impossible? XXXX */
+ free_getaddrinfo_request(data);
+ }
+}
+
+static int
+evdns_getaddrinfo_set_timeout(struct evdns_base *evdns_base,
+ struct evdns_getaddrinfo_request *data)
+{
+ return event_add(&data->timeout, &evdns_base->global_getaddrinfo_allow_skew);
+}
+
+static inline int
+evdns_result_is_answer(int result)
+{
+ return (result != DNS_ERR_NOTIMPL && result != DNS_ERR_REFUSED &&
+ result != DNS_ERR_SERVERFAILED && result != DNS_ERR_CANCEL);
+}
+
+static void
+evdns_getaddrinfo_gotresolve(int result, char type, int count,
+ int ttl, void *addresses, void *arg)
+{
+ int i;
+ struct getaddrinfo_subrequest *req = arg;
+ struct getaddrinfo_subrequest *other_req;
+ struct evdns_getaddrinfo_request *data;
+
+ struct evutil_addrinfo *res;
+
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+ struct sockaddr *sa;
+ int socklen, addrlen;
+ void *addrp;
+ int err;
+ int user_canceled;
+
+ EVUTIL_ASSERT(req->type == DNS_IPv4_A || req->type == DNS_IPv6_AAAA);
+ if (req->type == DNS_IPv4_A) {
+ data = EVUTIL_UPCAST(req, struct evdns_getaddrinfo_request, ipv4_request);
+ other_req = &data->ipv6_request;
+ } else {
+ data = EVUTIL_UPCAST(req, struct evdns_getaddrinfo_request, ipv6_request);
+ other_req = &data->ipv4_request;
+ }
+
+ /** Called from evdns_base_free() with @fail_requests == 1 */
+ if (result != DNS_ERR_SHUTDOWN) {
+ EVDNS_LOCK(data->evdns_base);
+ if (evdns_result_is_answer(result)) {
+ if (req->type == DNS_IPv4_A)
+ ++data->evdns_base->getaddrinfo_ipv4_answered;
+ else
+ ++data->evdns_base->getaddrinfo_ipv6_answered;
+ }
+ user_canceled = data->user_canceled;
+ if (other_req->r == NULL)
+ data->request_done = 1;
+ EVDNS_UNLOCK(data->evdns_base);
+ } else {
+ data->evdns_base = NULL;
+ user_canceled = data->user_canceled;
+ }
+
+ req->r = NULL;
+
+ if (result == DNS_ERR_CANCEL && ! user_canceled) {
+ /* Internal cancel request from timeout or internal error.
+ * we already answered the user. */
+ if (other_req->r == NULL)
+ free_getaddrinfo_request(data);
+ return;
+ }
+
+ if (data->user_cb == NULL) {
+ /* We already answered. XXXX This shouldn't be needed; see
+ * comments in evdns_getaddrinfo_timeout_cb */
+ free_getaddrinfo_request(data);
+ return;
+ }
+
+ if (result == DNS_ERR_NONE) {
+ if (count == 0)
+ err = EVUTIL_EAI_NODATA;
+ else
+ err = 0;
+ } else {
+ err = evdns_err_to_getaddrinfo_err(result);
+ }
+
+ if (err) {
+ /* Looks like we got an error. */
+ if (other_req->r) {
+ /* The other request is still working; maybe it will
+ * succeed. */
+ /* XXXX handle failure from set_timeout */
+ if (result != DNS_ERR_SHUTDOWN) {
+ evdns_getaddrinfo_set_timeout(data->evdns_base, data);
+ }
+ data->pending_error = err;
+ return;
+ }
+
+ if (user_canceled) {
+ data->user_cb(EVUTIL_EAI_CANCEL, NULL, data->user_data);
+ } else if (data->pending_result) {
+ /* If we have an answer waiting, and we weren't
+ * canceled, ignore this error. */
+ add_cname_to_reply(data, data->pending_result);
+ data->user_cb(0, data->pending_result, data->user_data);
+ data->pending_result = NULL;
+ } else {
+ if (data->pending_error)
+ err = getaddrinfo_merge_err(err,
+ data->pending_error);
+ data->user_cb(err, NULL, data->user_data);
+ }
+ free_getaddrinfo_request(data);
+ return;
+ } else if (user_canceled) {
+ if (other_req->r) {
+ /* The other request is still working; let it hit this
+ * callback with EVUTIL_EAI_CANCEL callback and report
+ * the failure. */
+ return;
+ }
+ data->user_cb(EVUTIL_EAI_CANCEL, NULL, data->user_data);
+ free_getaddrinfo_request(data);
+ return;
+ }
+
+ /* Looks like we got some answers. We should turn them into addrinfos
+ * and then either queue those or return them all. */
+ EVUTIL_ASSERT(type == DNS_IPv4_A || type == DNS_IPv6_AAAA);
+
+ if (type == DNS_IPv4_A) {
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_port = htons(data->port);
+
+ sa = (struct sockaddr *)&sin;
+ socklen = sizeof(sin);
+ addrlen = 4;
+ addrp = &sin.sin_addr.s_addr;
+ } else {
+ memset(&sin6, 0, sizeof(sin6));
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_port = htons(data->port);
+
+ sa = (struct sockaddr *)&sin6;
+ socklen = sizeof(sin6);
+ addrlen = 16;
+ addrp = &sin6.sin6_addr.s6_addr;
+ }
+
+ res = NULL;
+ for (i=0; i < count; ++i) {
+ struct evutil_addrinfo *ai;
+ memcpy(addrp, ((char*)addresses)+i*addrlen, addrlen);
+ ai = evutil_new_addrinfo_(sa, socklen, &data->hints);
+ if (!ai) {
+ if (other_req->r) {
+ evdns_cancel_request(NULL, other_req->r);
+ }
+ data->user_cb(EVUTIL_EAI_MEMORY, NULL, data->user_data);
+ if (res)
+ evutil_freeaddrinfo(res);
+
+ if (other_req->r == NULL)
+ free_getaddrinfo_request(data);
+ return;
+ }
+ res = evutil_addrinfo_append_(res, ai);
+ }
+
+ if (other_req->r) {
+ /* The other request is still in progress; wait for it */
+ /* XXXX handle failure from set_timeout */
+ evdns_getaddrinfo_set_timeout(data->evdns_base, data);
+ data->pending_result = res;
+ return;
+ } else {
+ /* The other request is done or never started; append its
+ * results (if any) and return them. */
+ if (data->pending_result) {
+ if (req->type == DNS_IPv4_A)
+ res = evutil_addrinfo_append_(res,
+ data->pending_result);
+ else
+ res = evutil_addrinfo_append_(
+ data->pending_result, res);
+ data->pending_result = NULL;
+ }
+
+ /* Call the user callback. */
+ add_cname_to_reply(data, res);
+ data->user_cb(0, res, data->user_data);
+
+ /* Free data. */
+ free_getaddrinfo_request(data);
+ }
+}
+
+static struct hosts_entry *
+find_hosts_entry(struct evdns_base *base, const char *hostname,
+ struct hosts_entry *find_after)
+{
+ struct hosts_entry *e;
+
+ if (find_after)
+ e = TAILQ_NEXT(find_after, next);
+ else
+ e = TAILQ_FIRST(&base->hostsdb);
+
+ for (; e; e = TAILQ_NEXT(e, next)) {
+ if (!evutil_ascii_strcasecmp(e->hostname, hostname))
+ return e;
+ }
+ return NULL;
+}
+
+static int
+evdns_getaddrinfo_fromhosts(struct evdns_base *base,
+ const char *nodename, struct evutil_addrinfo *hints, ev_uint16_t port,
+ struct evutil_addrinfo **res)
+{
+ int n_found = 0;
+ struct hosts_entry *e;
+ struct evutil_addrinfo *ai=NULL;
+ int f = hints->ai_family;
+
+ EVDNS_LOCK(base);
+ for (e = find_hosts_entry(base, nodename, NULL); e;
+ e = find_hosts_entry(base, nodename, e)) {
+ struct evutil_addrinfo *ai_new;
+ ++n_found;
+ if ((e->addr.sa.sa_family == AF_INET && f == PF_INET6) ||
+ (e->addr.sa.sa_family == AF_INET6 && f == PF_INET))
+ continue;
+ ai_new = evutil_new_addrinfo_(&e->addr.sa, e->addrlen, hints);
+ if (!ai_new) {
+ n_found = 0;
+ goto out;
+ }
+ sockaddr_setport(ai_new->ai_addr, port);
+ ai = evutil_addrinfo_append_(ai, ai_new);
+ }
+ EVDNS_UNLOCK(base);
+out:
+ if (n_found) {
+ /* Note that we return an empty answer if we found entries for
+ * this hostname but none were of the right address type. */
+ *res = ai;
+ return 0;
+ } else {
+ if (ai)
+ evutil_freeaddrinfo(ai);
+ return -1;
+ }
+}
+
+struct evdns_getaddrinfo_request *
+evdns_getaddrinfo(struct evdns_base *dns_base,
+ const char *nodename, const char *servname,
+ const struct evutil_addrinfo *hints_in,
+ evdns_getaddrinfo_cb cb, void *arg)
+{
+ struct evdns_getaddrinfo_request *data;
+ struct evutil_addrinfo hints;
+ struct evutil_addrinfo *res = NULL;
+ int err;
+ int port = 0;
+ int want_cname = 0;
+
+ if (!dns_base) {
+ dns_base = current_base;
+ if (!dns_base) {
+ log(EVDNS_LOG_WARN,
+ "Call to getaddrinfo_async with no "
+ "evdns_base configured.");
+ cb(EVUTIL_EAI_FAIL, NULL, arg); /* ??? better error? */
+ return NULL;
+ }
+ }
+
+ /* If we _must_ answer this immediately, do so. */
+ if ((hints_in && (hints_in->ai_flags & EVUTIL_AI_NUMERICHOST))) {
+ res = NULL;
+ err = evutil_getaddrinfo(nodename, servname, hints_in, &res);
+ cb(err, res, arg);
+ return NULL;
+ }
+
+ if (hints_in) {
+ memcpy(&hints, hints_in, sizeof(hints));
+ } else {
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = PF_UNSPEC;
+ }
+
+ evutil_adjust_hints_for_addrconfig_(&hints);
+
+ /* Now try to see if we _can_ answer immediately. */
+ /* (It would be nice to do this by calling getaddrinfo directly, with
+ * AI_NUMERICHOST, on plaforms that have it, but we can't: there isn't
+ * a reliable way to distinguish the "that wasn't a numeric host!" case
+ * from any other EAI_NONAME cases.) */
+ err = evutil_getaddrinfo_common_(nodename, servname, &hints, &res, &port);
+ if (err != EVUTIL_EAI_NEED_RESOLVE) {
+ cb(err, res, arg);
+ return NULL;
+ }
+
+ /* If there is an entry in the hosts file, we should give it now. */
+ if (!evdns_getaddrinfo_fromhosts(dns_base, nodename, &hints, port, &res)) {
+ cb(0, res, arg);
+ return NULL;
+ }
+
+ /* Okay, things are serious now. We're going to need to actually
+ * launch a request.
+ */
+ data = mm_calloc(1,sizeof(struct evdns_getaddrinfo_request));
+ if (!data) {
+ cb(EVUTIL_EAI_MEMORY, NULL, arg);
+ return NULL;
+ }
+
+ memcpy(&data->hints, &hints, sizeof(data->hints));
+ data->port = (ev_uint16_t)port;
+ data->ipv4_request.type = DNS_IPv4_A;
+ data->ipv6_request.type = DNS_IPv6_AAAA;
+ data->user_cb = cb;
+ data->user_data = arg;
+ data->evdns_base = dns_base;
+
+ want_cname = (hints.ai_flags & EVUTIL_AI_CANONNAME);
+
+ /* If we are asked for a PF_UNSPEC address, we launch two requests in
+ * parallel: one for an A address and one for an AAAA address. We
+ * can't send just one request, since many servers only answer one
+ * question per DNS request.
+ *
+ * Once we have the answer to one request, we allow for a short
+ * timeout before we report it, to see if the other one arrives. If
+ * they both show up in time, then we report both the answers.
+ *
+ * If too many addresses of one type time out or fail, we should stop
+ * launching those requests. (XXX we don't do that yet.)
+ */
+
+ if (hints.ai_family != PF_INET6) {
+ log(EVDNS_LOG_DEBUG, "Sending request for %s on ipv4 as %p",
+ nodename, &data->ipv4_request);
+
+ data->ipv4_request.r = evdns_base_resolve_ipv4(dns_base,
+ nodename, 0, evdns_getaddrinfo_gotresolve,
+ &data->ipv4_request);
+ if (want_cname && data->ipv4_request.r)
+ data->ipv4_request.r->current_req->put_cname_in_ptr =
+ &data->cname_result;
+ }
+ if (hints.ai_family != PF_INET) {
+ log(EVDNS_LOG_DEBUG, "Sending request for %s on ipv6 as %p",
+ nodename, &data->ipv6_request);
+
+ data->ipv6_request.r = evdns_base_resolve_ipv6(dns_base,
+ nodename, 0, evdns_getaddrinfo_gotresolve,
+ &data->ipv6_request);
+ if (want_cname && data->ipv6_request.r)
+ data->ipv6_request.r->current_req->put_cname_in_ptr =
+ &data->cname_result;
+ }
+
+ evtimer_assign(&data->timeout, dns_base->event_base,
+ evdns_getaddrinfo_timeout_cb, data);
+
+ if (data->ipv4_request.r || data->ipv6_request.r) {
+ return data;
+ } else {
+ mm_free(data);
+ cb(EVUTIL_EAI_FAIL, NULL, arg);
+ return NULL;
+ }
+}
+
+void
+evdns_getaddrinfo_cancel(struct evdns_getaddrinfo_request *data)
+{
+ EVDNS_LOCK(data->evdns_base);
+ if (data->request_done) {
+ EVDNS_UNLOCK(data->evdns_base);
+ return;
+ }
+ event_del(&data->timeout);
+ data->user_canceled = 1;
+ if (data->ipv4_request.r)
+ evdns_cancel_request(data->evdns_base, data->ipv4_request.r);
+ if (data->ipv6_request.r)
+ evdns_cancel_request(data->evdns_base, data->ipv6_request.r);
+ EVDNS_UNLOCK(data->evdns_base);
+}
diff --git a/libs/libevent/src/event-internal.h b/libs/libevent/src/event-internal.h
new file mode 100644
index 0000000000..66dcfc329c
--- /dev/null
+++ b/libs/libevent/src/event-internal.h
@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVENT_INTERNAL_H_INCLUDED_
+#define EVENT_INTERNAL_H_INCLUDED_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <time.h>
+#include <sys/queue.h>
+#include "event2/event_struct.h"
+#include "minheap-internal.h"
+#include "evsignal-internal.h"
+#include "mm-internal.h"
+#include "defer-internal.h"
+
+/* map union members back */
+
+/* mutually exclusive */
+#define ev_signal_next ev_.ev_signal.ev_signal_next
+#define ev_io_next ev_.ev_io.ev_io_next
+#define ev_io_timeout ev_.ev_io.ev_timeout
+
+/* used only by signals */
+#define ev_ncalls ev_.ev_signal.ev_ncalls
+#define ev_pncalls ev_.ev_signal.ev_pncalls
+
+#define ev_pri ev_evcallback.evcb_pri
+#define ev_flags ev_evcallback.evcb_flags
+#define ev_closure ev_evcallback.evcb_closure
+#define ev_callback ev_evcallback.evcb_cb_union.evcb_callback
+#define ev_arg ev_evcallback.evcb_arg
+
+/** @name Event closure codes
+
+ Possible values for evcb_closure in struct event_callback
+
+ @{
+ */
+/** A regular event. Uses the evcb_callback callback */
+#define EV_CLOSURE_EVENT 0
+/** A signal event. Uses the evcb_callback callback */
+#define EV_CLOSURE_EVENT_SIGNAL 1
+/** A persistent non-signal event. Uses the evcb_callback callback */
+#define EV_CLOSURE_EVENT_PERSIST 2
+/** A simple callback. Uses the evcb_selfcb callback. */
+#define EV_CLOSURE_CB_SELF 3
+/** A finalizing callback. Uses the evcb_cbfinalize callback. */
+#define EV_CLOSURE_CB_FINALIZE 4
+/** A finalizing event. Uses the evcb_evfinalize callback. */
+#define EV_CLOSURE_EVENT_FINALIZE 5
+/** A finalizing event that should get freed after. Uses the evcb_evfinalize
+ * callback. */
+#define EV_CLOSURE_EVENT_FINALIZE_FREE 6
+/** @} */
+
+/** Structure to define the backend of a given event_base. */
+struct eventop {
+ /** The name of this backend. */
+ const char *name;
+ /** Function to set up an event_base to use this backend. It should
+ * create a new structure holding whatever information is needed to
+ * run the backend, and return it. The returned pointer will get
+ * stored by event_init into the event_base.evbase field. On failure,
+ * this function should return NULL. */
+ void *(*init)(struct event_base *);
+ /** Enable reading/writing on a given fd or signal. 'events' will be
+ * the events that we're trying to enable: one or more of EV_READ,
+ * EV_WRITE, EV_SIGNAL, and EV_ET. 'old' will be those events that
+ * were enabled on this fd previously. 'fdinfo' will be a structure
+ * associated with the fd by the evmap; its size is defined by the
+ * fdinfo field below. It will be set to 0 the first time the fd is
+ * added. The function should return 0 on success and -1 on error.
+ */
+ int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
+ /** As "add", except 'events' contains the events we mean to disable. */
+ int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
+ /** Function to implement the core of an event loop. It must see which
+ added events are ready, and cause event_active to be called for each
+ active event (usually via event_io_active or such). It should
+ return 0 on success and -1 on error.
+ */
+ int (*dispatch)(struct event_base *, struct timeval *);
+ /** Function to clean up and free our data from the event_base. */
+ void (*dealloc)(struct event_base *);
+ /** Flag: set if we need to reinitialize the event base after we fork.
+ */
+ int need_reinit;
+ /** Bit-array of supported event_method_features that this backend can
+ * provide. */
+ enum event_method_feature features;
+ /** Length of the extra information we should record for each fd that
+ has one or more active events. This information is recorded
+ as part of the evmap entry for each fd, and passed as an argument
+ to the add and del functions above.
+ */
+ size_t fdinfo_len;
+};
+
+#ifdef _WIN32
+/* If we're on win32, then file descriptors are not nice low densely packed
+ integers. Instead, they are pointer-like windows handles, and we want to
+ use a hashtable instead of an array to map fds to events.
+*/
+#define EVMAP_USE_HT
+#endif
+
+/* #define HT_CACHE_HASH_VALS */
+
+#ifdef EVMAP_USE_HT
+#define HT_NO_CACHE_HASH_VALUES
+#include "ht-internal.h"
+struct event_map_entry;
+HT_HEAD(event_io_map, event_map_entry);
+#else
+#define event_io_map event_signal_map
+#endif
+
+/* Used to map signal numbers to a list of events. If EVMAP_USE_HT is not
+ defined, this structure is also used as event_io_map, which maps fds to a
+ list of events.
+*/
+struct event_signal_map {
+ /* An array of evmap_io * or of evmap_signal *; empty entries are
+ * set to NULL. */
+ void **entries;
+ /* The number of entries available in entries */
+ int nentries;
+};
+
+/* A list of events waiting on a given 'common' timeout value. Ordinarily,
+ * events waiting for a timeout wait on a minheap. Sometimes, however, a
+ * queue can be faster.
+ **/
+struct common_timeout_list {
+ /* List of events currently waiting in the queue. */
+ struct event_list events;
+ /* 'magic' timeval used to indicate the duration of events in this
+ * queue. */
+ struct timeval duration;
+ /* Event that triggers whenever one of the events in the queue is
+ * ready to activate */
+ struct event timeout_event;
+ /* The event_base that this timeout list is part of */
+ struct event_base *base;
+};
+
+/** Mask used to get the real tv_usec value from a common timeout. */
+#define COMMON_TIMEOUT_MICROSECONDS_MASK 0x000fffff
+
+struct event_change;
+
+/* List of 'changes' since the last call to eventop.dispatch. Only maintained
+ * if the backend is using changesets. */
+struct event_changelist {
+ struct event_change *changes;
+ int n_changes;
+ int changes_size;
+};
+
+#ifndef EVENT__DISABLE_DEBUG_MODE
+/* Global internal flag: set to one if debug mode is on. */
+extern int event_debug_mode_on_;
+#define EVENT_DEBUG_MODE_IS_ON() (event_debug_mode_on_)
+#else
+#define EVENT_DEBUG_MODE_IS_ON() (0)
+#endif
+
+TAILQ_HEAD(evcallback_list, event_callback);
+
+/* Sets up an event for processing once */
+struct event_once {
+ LIST_ENTRY(event_once) next_once;
+ struct event ev;
+
+ void (*cb)(evutil_socket_t, short, void *);
+ void *arg;
+};
+
+struct event_base {
+ /** Function pointers and other data to describe this event_base's
+ * backend. */
+ const struct eventop *evsel;
+ /** Pointer to backend-specific data. */
+ void *evbase;
+
+ /** List of changes to tell backend about at next dispatch. Only used
+ * by the O(1) backends. */
+ struct event_changelist changelist;
+
+ /** Function pointers used to describe the backend that this event_base
+ * uses for signals */
+ const struct eventop *evsigsel;
+ /** Data to implement the common signal handelr code. */
+ struct evsig_info sig;
+
+ /** Number of virtual events */
+ int virtual_event_count;
+ /** Maximum number of virtual events active */
+ int virtual_event_count_max;
+ /** Number of total events added to this event_base */
+ int event_count;
+ /** Maximum number of total events added to this event_base */
+ int event_count_max;
+ /** Number of total events active in this event_base */
+ int event_count_active;
+ /** Maximum number of total events active in this event_base */
+ int event_count_active_max;
+
+ /** Set if we should terminate the loop once we're done processing
+ * events. */
+ int event_gotterm;
+ /** Set if we should terminate the loop immediately */
+ int event_break;
+ /** Set if we should start a new instance of the loop immediately. */
+ int event_continue;
+
+ /** The currently running priority of events */
+ int event_running_priority;
+
+ /** Set if we're running the event_base_loop function, to prevent
+ * reentrant invocation. */
+ int running_loop;
+
+ /** Set to the number of deferred_cbs we've made 'active' in the
+ * loop. This is a hack to prevent starvation; it would be smarter
+ * to just use event_config_set_max_dispatch_interval's max_callbacks
+ * feature */
+ int n_deferreds_queued;
+
+ /* Active event management. */
+ /** An array of nactivequeues queues for active event_callbacks (ones
+ * that have triggered, and whose callbacks need to be called). Low
+ * priority numbers are more important, and stall higher ones.
+ */
+ struct evcallback_list *activequeues;
+ /** The length of the activequeues array */
+ int nactivequeues;
+ /** A list of event_callbacks that should become active the next time
+ * we process events, but not this time. */
+ struct evcallback_list active_later_queue;
+
+ /* common timeout logic */
+
+ /** An array of common_timeout_list* for all of the common timeout
+ * values we know. */
+ struct common_timeout_list **common_timeout_queues;
+ /** The number of entries used in common_timeout_queues */
+ int n_common_timeouts;
+ /** The total size of common_timeout_queues. */
+ int n_common_timeouts_allocated;
+
+ /** Mapping from file descriptors to enabled (added) events */
+ struct event_io_map io;
+
+ /** Mapping from signal numbers to enabled (added) events. */
+ struct event_signal_map sigmap;
+
+ /** Priority queue of events with timeouts. */
+ struct min_heap timeheap;
+
+ /** Stored timeval: used to avoid calling gettimeofday/clock_gettime
+ * too often. */
+ struct timeval tv_cache;
+
+ struct evutil_monotonic_timer monotonic_timer;
+
+ /** Difference between internal time (maybe from clock_gettime) and
+ * gettimeofday. */
+ struct timeval tv_clock_diff;
+ /** Second in which we last updated tv_clock_diff, in monotonic time. */
+ time_t last_updated_clock_diff;
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ /* threading support */
+ /** The thread currently running the event_loop for this base */
+ unsigned long th_owner_id;
+ /** A lock to prevent conflicting accesses to this event_base */
+ void *th_base_lock;
+ /** A condition that gets signalled when we're done processing an
+ * event with waiters on it. */
+ void *current_event_cond;
+ /** Number of threads blocking on current_event_cond. */
+ int current_event_waiters;
+#endif
+ /** The event whose callback is executing right now */
+ struct event_callback *current_event;
+
+#ifdef _WIN32
+ /** IOCP support structure, if IOCP is enabled. */
+ struct event_iocp_port *iocp;
+#endif
+
+ /** Flags that this base was configured with */
+ enum event_base_config_flag flags;
+
+ struct timeval max_dispatch_time;
+ int max_dispatch_callbacks;
+ int limit_callbacks_after_prio;
+
+ /* Notify main thread to wake up break, etc. */
+ /** True if the base already has a pending notify, and we don't need
+ * to add any more. */
+ int is_notify_pending;
+ /** A socketpair used by some th_notify functions to wake up the main
+ * thread. */
+ evutil_socket_t th_notify_fd[2];
+ /** An event used by some th_notify functions to wake up the main
+ * thread. */
+ struct event th_notify;
+ /** A function used to wake up the main thread from another thread. */
+ int (*th_notify_fn)(struct event_base *base);
+
+ /** Saved seed for weak random number generator. Some backends use
+ * this to produce fairness among sockets. Protected by th_base_lock. */
+ struct evutil_weakrand_state weakrand_seed;
+
+ /** List of event_onces that have not yet fired. */
+ LIST_HEAD(once_event_list, event_once) once_events;
+
+};
+
+struct event_config_entry {
+ TAILQ_ENTRY(event_config_entry) next;
+
+ const char *avoid_method;
+};
+
+/** Internal structure: describes the configuration we want for an event_base
+ * that we're about to allocate. */
+struct event_config {
+ TAILQ_HEAD(event_configq, event_config_entry) entries;
+
+ int n_cpus_hint;
+ struct timeval max_dispatch_interval;
+ int max_dispatch_callbacks;
+ int limit_callbacks_after_prio;
+ enum event_method_feature require_features;
+ enum event_base_config_flag flags;
+};
+
+/* Internal use only: Functions that might be missing from <sys/queue.h> */
+#ifndef TAILQ_FIRST
+#define TAILQ_FIRST(head) ((head)->tqh_first)
+#endif
+#ifndef TAILQ_END
+#define TAILQ_END(head) NULL
+#endif
+#ifndef TAILQ_NEXT
+#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+#endif
+
+#ifndef TAILQ_FOREACH
+#define TAILQ_FOREACH(var, head, field) \
+ for ((var) = TAILQ_FIRST(head); \
+ (var) != TAILQ_END(head); \
+ (var) = TAILQ_NEXT(var, field))
+#endif
+
+#ifndef TAILQ_INSERT_BEFORE
+#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
+ (elm)->field.tqe_next = (listelm); \
+ *(listelm)->field.tqe_prev = (elm); \
+ (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
+} while (0)
+#endif
+
+#define N_ACTIVE_CALLBACKS(base) \
+ ((base)->event_count_active)
+
+int evsig_set_handler_(struct event_base *base, int evsignal,
+ void (*fn)(int));
+int evsig_restore_handler_(struct event_base *base, int evsignal);
+
+int event_add_nolock_(struct event *ev,
+ const struct timeval *tv, int tv_is_absolute);
+/** Argument for event_del_nolock_. Tells event_del not to block on the event
+ * if it's running in another thread. */
+#define EVENT_DEL_NOBLOCK 0
+/** Argument for event_del_nolock_. Tells event_del to block on the event
+ * if it's running in another thread, regardless of its value for EV_FINALIZE
+ */
+#define EVENT_DEL_BLOCK 1
+/** Argument for event_del_nolock_. Tells event_del to block on the event
+ * if it is running in another thread and it doesn't have EV_FINALIZE set.
+ */
+#define EVENT_DEL_AUTOBLOCK 2
+/** Argument for event_del_nolock_. Tells event_del to procede even if the
+ * event is set up for finalization rather for regular use.*/
+#define EVENT_DEL_EVEN_IF_FINALIZING 3
+int event_del_nolock_(struct event *ev, int blocking);
+int event_remove_timer_nolock_(struct event *ev);
+
+void event_active_nolock_(struct event *ev, int res, short count);
+int event_callback_activate_(struct event_base *, struct event_callback *);
+int event_callback_activate_nolock_(struct event_base *, struct event_callback *);
+int event_callback_cancel_(struct event_base *base,
+ struct event_callback *evcb);
+
+void event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *));
+void event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *));
+int event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcb, void (*cb)(struct event_callback *, void *));
+
+
+void event_active_later_(struct event *ev, int res);
+void event_active_later_nolock_(struct event *ev, int res);
+int event_callback_activate_later_nolock_(struct event_base *base,
+ struct event_callback *evcb);
+int event_callback_cancel_nolock_(struct event_base *base,
+ struct event_callback *evcb, int even_if_finalizing);
+void event_callback_init_(struct event_base *base,
+ struct event_callback *cb);
+
+/* FIXME document. */
+void event_base_add_virtual_(struct event_base *base);
+void event_base_del_virtual_(struct event_base *base);
+
+/** For debugging: unless assertions are disabled, verify the referential
+ integrity of the internal data structures of 'base'. This operation can
+ be expensive.
+
+ Returns on success; aborts on failure.
+*/
+void event_base_assert_ok_(struct event_base *base);
+void event_base_assert_ok_nolock_(struct event_base *base);
+
+
+/* Helper function: Call 'fn' exactly once every inserted or active event in
+ * the event_base 'base'.
+ *
+ * If fn returns 0, continue on to the next event. Otherwise, return the same
+ * value that fn returned.
+ *
+ * Requires that 'base' be locked.
+ */
+int event_base_foreach_event_nolock_(struct event_base *base,
+ event_base_foreach_event_cb cb, void *arg);
+
+/* Cleanup function to reset debug mode during shutdown.
+ *
+ * Calling this function doesn't mean it'll be possible to re-enable
+ * debug mode if any events were added.
+ */
+void event_disable_debug_mode(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT_INTERNAL_H_INCLUDED_ */
diff --git a/libs/libevent/src/event.c b/libs/libevent/src/event.c
new file mode 100644
index 0000000000..503003e249
--- /dev/null
+++ b/libs/libevent/src/event.c
@@ -0,0 +1,3940 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+#include <sys/types.h>
+#if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#ifdef EVENT__HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#include <stdio.h>
+#include <stdlib.h>
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#include <ctype.h>
+#include <errno.h>
+#include <signal.h>
+#include <string.h>
+#include <time.h>
+#include <limits.h>
+
+#include "event2/event.h"
+#include "event2/event_struct.h"
+#include "event2/event_compat.h"
+#include "event-internal.h"
+#include "defer-internal.h"
+#include "evthread-internal.h"
+#include "event2/thread.h"
+#include "event2/util.h"
+#include "log-internal.h"
+#include "evmap-internal.h"
+#include "iocp-internal.h"
+#include "changelist-internal.h"
+#define HT_NO_CACHE_HASH_VALUES
+#include "ht-internal.h"
+#include "util-internal.h"
+
+
+#ifdef EVENT__HAVE_WORKING_KQUEUE
+#include "kqueue-internal.h"
+#endif
+
+#ifdef EVENT__HAVE_EVENT_PORTS
+extern const struct eventop evportops;
+#endif
+#ifdef EVENT__HAVE_SELECT
+extern const struct eventop selectops;
+#endif
+#ifdef EVENT__HAVE_POLL
+extern const struct eventop pollops;
+#endif
+#ifdef EVENT__HAVE_EPOLL
+extern const struct eventop epollops;
+#endif
+#ifdef EVENT__HAVE_WORKING_KQUEUE
+extern const struct eventop kqops;
+#endif
+#ifdef EVENT__HAVE_DEVPOLL
+extern const struct eventop devpollops;
+#endif
+#ifdef _WIN32
+extern const struct eventop win32ops;
+#endif
+
+/* Array of backends in order of preference. */
+static const struct eventop *eventops[] = {
+#ifdef EVENT__HAVE_EVENT_PORTS
+ &evportops,
+#endif
+#ifdef EVENT__HAVE_WORKING_KQUEUE
+ &kqops,
+#endif
+#ifdef EVENT__HAVE_EPOLL
+ &epollops,
+#endif
+#ifdef EVENT__HAVE_DEVPOLL
+ &devpollops,
+#endif
+#ifdef EVENT__HAVE_POLL
+ &pollops,
+#endif
+#ifdef EVENT__HAVE_SELECT
+ &selectops,
+#endif
+#ifdef _WIN32
+ &win32ops,
+#endif
+ NULL
+};
+
+/* Global state; deprecated */
+struct event_base *event_global_current_base_ = NULL;
+#define current_base event_global_current_base_
+
+/* Global state */
+
+static void *event_self_cbarg_ptr_ = NULL;
+
+/* Prototypes */
+static void event_queue_insert_active(struct event_base *, struct event_callback *);
+static void event_queue_insert_active_later(struct event_base *, struct event_callback *);
+static void event_queue_insert_timeout(struct event_base *, struct event *);
+static void event_queue_insert_inserted(struct event_base *, struct event *);
+static void event_queue_remove_active(struct event_base *, struct event_callback *);
+static void event_queue_remove_active_later(struct event_base *, struct event_callback *);
+static void event_queue_remove_timeout(struct event_base *, struct event *);
+static void event_queue_remove_inserted(struct event_base *, struct event *);
+static void event_queue_make_later_events_active(struct event_base *base);
+
+static int evthread_make_base_notifiable_nolock_(struct event_base *base);
+static int event_del_(struct event *ev, int blocking);
+
+#ifdef USE_REINSERT_TIMEOUT
+/* This code seems buggy; only turn it on if we find out what the trouble is. */
+static void event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
+#endif
+
+static int event_haveevents(struct event_base *);
+
+static int event_process_active(struct event_base *);
+
+static int timeout_next(struct event_base *, struct timeval **);
+static void timeout_process(struct event_base *);
+
+static inline void event_signal_closure(struct event_base *, struct event *ev);
+static inline void event_persist_closure(struct event_base *, struct event *ev);
+
+static int evthread_notify_base(struct event_base *base);
+
+static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
+ struct event *ev);
+
+#ifndef EVENT__DISABLE_DEBUG_MODE
+/* These functions implement a hashtable of which 'struct event *' structures
+ * have been setup or added. We don't want to trust the content of the struct
+ * event itself, since we're trying to work through cases where an event gets
+ * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
+ */
+
+struct event_debug_entry {
+ HT_ENTRY(event_debug_entry) node;
+ const struct event *ptr;
+ unsigned added : 1;
+};
+
+static inline unsigned
+hash_debug_entry(const struct event_debug_entry *e)
+{
+ /* We need to do this silliness to convince compilers that we
+ * honestly mean to cast e->ptr to an integer, and discard any
+ * part of it that doesn't fit in an unsigned.
+ */
+ unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
+ /* Our hashtable implementation is pretty sensitive to low bits,
+ * and every struct event is over 64 bytes in size, so we can
+ * just say >>6. */
+ return (u >> 6);
+}
+
+static inline int
+eq_debug_entry(const struct event_debug_entry *a,
+ const struct event_debug_entry *b)
+{
+ return a->ptr == b->ptr;
+}
+
+int event_debug_mode_on_ = 0;
+
+
+#if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
+/**
+ * @brief debug mode variable which is set for any function/structure that needs
+ * to be shared across threads (if thread support is enabled).
+ *
+ * When and if evthreads are initialized, this variable will be evaluated,
+ * and if set to something other than zero, this means the evthread setup
+ * functions were called out of order.
+ *
+ * See: "Locks and threading" in the documentation.
+ */
+int event_debug_created_threadable_ctx_ = 0;
+#endif
+
+/* Set if it's too late to enable event_debug_mode. */
+static int event_debug_mode_too_late = 0;
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+static void *event_debug_map_lock_ = NULL;
+#endif
+static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
+ HT_INITIALIZER();
+
+HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
+ eq_debug_entry)
+HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
+ eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
+
+/* Macro: record that ev is now setup (that is, ready for an add) */
+#define event_debug_note_setup_(ev) do { \
+ if (event_debug_mode_on_) { \
+ struct event_debug_entry *dent,find; \
+ find.ptr = (ev); \
+ EVLOCK_LOCK(event_debug_map_lock_, 0); \
+ dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
+ if (dent) { \
+ dent->added = 0; \
+ } else { \
+ dent = mm_malloc(sizeof(*dent)); \
+ if (!dent) \
+ event_err(1, \
+ "Out of memory in debugging code"); \
+ dent->ptr = (ev); \
+ dent->added = 0; \
+ HT_INSERT(event_debug_map, &global_debug_map, dent); \
+ } \
+ EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
+ } \
+ event_debug_mode_too_late = 1; \
+ } while (0)
+/* Macro: record that ev is no longer setup */
+#define event_debug_note_teardown_(ev) do { \
+ if (event_debug_mode_on_) { \
+ struct event_debug_entry *dent,find; \
+ find.ptr = (ev); \
+ EVLOCK_LOCK(event_debug_map_lock_, 0); \
+ dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
+ if (dent) \
+ mm_free(dent); \
+ EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
+ } \
+ event_debug_mode_too_late = 1; \
+ } while (0)
+/* Macro: record that ev is now added */
+#define event_debug_note_add_(ev) do { \
+ if (event_debug_mode_on_) { \
+ struct event_debug_entry *dent,find; \
+ find.ptr = (ev); \
+ EVLOCK_LOCK(event_debug_map_lock_, 0); \
+ dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
+ if (dent) { \
+ dent->added = 1; \
+ } else { \
+ event_errx(EVENT_ERR_ABORT_, \
+ "%s: noting an add on a non-setup event %p" \
+ " (events: 0x%x, fd: "EV_SOCK_FMT \
+ ", flags: 0x%x)", \
+ __func__, (ev), (ev)->ev_events, \
+ EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
+ } \
+ EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
+ } \
+ event_debug_mode_too_late = 1; \
+ } while (0)
+/* Macro: record that ev is no longer added */
+#define event_debug_note_del_(ev) do { \
+ if (event_debug_mode_on_) { \
+ struct event_debug_entry *dent,find; \
+ find.ptr = (ev); \
+ EVLOCK_LOCK(event_debug_map_lock_, 0); \
+ dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
+ if (dent) { \
+ dent->added = 0; \
+ } else { \
+ event_errx(EVENT_ERR_ABORT_, \
+ "%s: noting a del on a non-setup event %p" \
+ " (events: 0x%x, fd: "EV_SOCK_FMT \
+ ", flags: 0x%x)", \
+ __func__, (ev), (ev)->ev_events, \
+ EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
+ } \
+ EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
+ } \
+ event_debug_mode_too_late = 1; \
+ } while (0)
+/* Macro: assert that ev is setup (i.e., okay to add or inspect) */
+#define event_debug_assert_is_setup_(ev) do { \
+ if (event_debug_mode_on_) { \
+ struct event_debug_entry *dent,find; \
+ find.ptr = (ev); \
+ EVLOCK_LOCK(event_debug_map_lock_, 0); \
+ dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
+ if (!dent) { \
+ event_errx(EVENT_ERR_ABORT_, \
+ "%s called on a non-initialized event %p" \
+ " (events: 0x%x, fd: "EV_SOCK_FMT\
+ ", flags: 0x%x)", \
+ __func__, (ev), (ev)->ev_events, \
+ EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
+ } \
+ EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
+ } \
+ } while (0)
+/* Macro: assert that ev is not added (i.e., okay to tear down or set
+ * up again) */
+#define event_debug_assert_not_added_(ev) do { \
+ if (event_debug_mode_on_) { \
+ struct event_debug_entry *dent,find; \
+ find.ptr = (ev); \
+ EVLOCK_LOCK(event_debug_map_lock_, 0); \
+ dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
+ if (dent && dent->added) { \
+ event_errx(EVENT_ERR_ABORT_, \
+ "%s called on an already added event %p" \
+ " (events: 0x%x, fd: "EV_SOCK_FMT", " \
+ "flags: 0x%x)", \
+ __func__, (ev), (ev)->ev_events, \
+ EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
+ } \
+ EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
+ } \
+ } while (0)
+#else
+#define event_debug_note_setup_(ev) \
+ ((void)0)
+#define event_debug_note_teardown_(ev) \
+ ((void)0)
+#define event_debug_note_add_(ev) \
+ ((void)0)
+#define event_debug_note_del_(ev) \
+ ((void)0)
+#define event_debug_assert_is_setup_(ev) \
+ ((void)0)
+#define event_debug_assert_not_added_(ev) \
+ ((void)0)
+#endif
+
+#define EVENT_BASE_ASSERT_LOCKED(base) \
+ EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
+
+/* How often (in seconds) do we check for changes in wall clock time relative
+ * to monotonic time? Set this to -1 for 'never.' */
+#define CLOCK_SYNC_INTERVAL 5
+
+/** Set 'tp' to the current time according to 'base'. We must hold the lock
+ * on 'base'. If there is a cached time, return it. Otherwise, use
+ * clock_gettime or gettimeofday as appropriate to find out the right time.
+ * Return 0 on success, -1 on failure.
+ */
+static int
+gettime(struct event_base *base, struct timeval *tp)
+{
+ EVENT_BASE_ASSERT_LOCKED(base);
+
+ if (base->tv_cache.tv_sec) {
+ *tp = base->tv_cache;
+ return (0);
+ }
+
+ if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
+ return -1;
+ }
+
+ if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
+ < tp->tv_sec) {
+ struct timeval tv;
+ evutil_gettimeofday(&tv,NULL);
+ evutil_timersub(&tv, tp, &base->tv_clock_diff);
+ base->last_updated_clock_diff = tp->tv_sec;
+ }
+
+ return 0;
+}
+
+int
+event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
+{
+ int r;
+ if (!base) {
+ base = current_base;
+ if (!current_base)
+ return evutil_gettimeofday(tv, NULL);
+ }
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ if (base->tv_cache.tv_sec == 0) {
+ r = evutil_gettimeofday(tv, NULL);
+ } else {
+ evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
+ r = 0;
+ }
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return r;
+}
+
+/** Make 'base' have no current cached time. */
+static inline void
+clear_time_cache(struct event_base *base)
+{
+ base->tv_cache.tv_sec = 0;
+}
+
+/** Replace the cached time in 'base' with the current time. */
+static inline void
+update_time_cache(struct event_base *base)
+{
+ base->tv_cache.tv_sec = 0;
+ if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
+ gettime(base, &base->tv_cache);
+}
+
+int
+event_base_update_cache_time(struct event_base *base)
+{
+
+ if (!base) {
+ base = current_base;
+ if (!current_base)
+ return -1;
+ }
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ if (base->running_loop)
+ update_time_cache(base);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return 0;
+}
+
+static inline struct event *
+event_callback_to_event(struct event_callback *evcb)
+{
+ EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
+ return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
+}
+
+static inline struct event_callback *
+event_to_event_callback(struct event *ev)
+{
+ return &ev->ev_evcallback;
+}
+
+struct event_base *
+event_init(void)
+{
+ struct event_base *base = event_base_new_with_config(NULL);
+
+ if (base == NULL) {
+ event_errx(1, "%s: Unable to construct event_base", __func__);
+ return NULL;
+ }
+
+ current_base = base;
+
+ return (base);
+}
+
+struct event_base *
+event_base_new(void)
+{
+ struct event_base *base = NULL;
+ struct event_config *cfg = event_config_new();
+ if (cfg) {
+ base = event_base_new_with_config(cfg);
+ event_config_free(cfg);
+ }
+ return base;
+}
+
+/** Return true iff 'method' is the name of a method that 'cfg' tells us to
+ * avoid. */
+static int
+event_config_is_avoided_method(const struct event_config *cfg,
+ const char *method)
+{
+ struct event_config_entry *entry;
+
+ TAILQ_FOREACH(entry, &cfg->entries, next) {
+ if (entry->avoid_method != NULL &&
+ strcmp(entry->avoid_method, method) == 0)
+ return (1);
+ }
+
+ return (0);
+}
+
+/** Return true iff 'method' is disabled according to the environment. */
+static int
+event_is_method_disabled(const char *name)
+{
+ char environment[64];
+ int i;
+
+ evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
+ for (i = 8; environment[i] != '\0'; ++i)
+ environment[i] = EVUTIL_TOUPPER_(environment[i]);
+ /* Note that evutil_getenv_() ignores the environment entirely if
+ * we're setuid */
+ return (evutil_getenv_(environment) != NULL);
+}
+
+int
+event_base_get_features(const struct event_base *base)
+{
+ return base->evsel->features;
+}
+
+void
+event_enable_debug_mode(void)
+{
+#ifndef EVENT__DISABLE_DEBUG_MODE
+ if (event_debug_mode_on_)
+ event_errx(1, "%s was called twice!", __func__);
+ if (event_debug_mode_too_late)
+ event_errx(1, "%s must be called *before* creating any events "
+ "or event_bases",__func__);
+
+ event_debug_mode_on_ = 1;
+
+ HT_INIT(event_debug_map, &global_debug_map);
+#endif
+}
+
+void
+event_disable_debug_mode(void)
+{
+#ifndef EVENT__DISABLE_DEBUG_MODE
+ struct event_debug_entry **ent, *victim;
+
+ EVLOCK_LOCK(event_debug_map_lock_, 0);
+ for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
+ victim = *ent;
+ ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
+ mm_free(victim);
+ }
+ HT_CLEAR(event_debug_map, &global_debug_map);
+ EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
+
+ event_debug_mode_on_ = 0;
+#endif
+}
+
+struct event_base *
+event_base_new_with_config(const struct event_config *cfg)
+{
+ int i;
+ struct event_base *base;
+ int should_check_environment;
+
+#ifndef EVENT__DISABLE_DEBUG_MODE
+ event_debug_mode_too_late = 1;
+#endif
+
+ if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
+ event_warn("%s: calloc", __func__);
+ return NULL;
+ }
+
+ if (cfg)
+ base->flags = cfg->flags;
+
+ should_check_environment =
+ !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
+
+ {
+ struct timeval tmp;
+ int precise_time =
+ cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
+ int flags;
+ if (should_check_environment && !precise_time) {
+ precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
+ base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
+ }
+ flags = precise_time ? EV_MONOT_PRECISE : 0;
+ evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
+
+ gettime(base, &tmp);
+ }
+
+ min_heap_ctor_(&base->timeheap);
+
+ base->sig.ev_signal_pair[0] = -1;
+ base->sig.ev_signal_pair[1] = -1;
+ base->th_notify_fd[0] = -1;
+ base->th_notify_fd[1] = -1;
+
+ TAILQ_INIT(&base->active_later_queue);
+
+ evmap_io_initmap_(&base->io);
+ evmap_signal_initmap_(&base->sigmap);
+ event_changelist_init_(&base->changelist);
+
+ base->evbase = NULL;
+
+ if (cfg) {
+ memcpy(&base->max_dispatch_time,
+ &cfg->max_dispatch_interval, sizeof(struct timeval));
+ base->limit_callbacks_after_prio =
+ cfg->limit_callbacks_after_prio;
+ } else {
+ base->max_dispatch_time.tv_sec = -1;
+ base->limit_callbacks_after_prio = 1;
+ }
+ if (cfg && cfg->max_dispatch_callbacks >= 0) {
+ base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
+ } else {
+ base->max_dispatch_callbacks = INT_MAX;
+ }
+ if (base->max_dispatch_callbacks == INT_MAX &&
+ base->max_dispatch_time.tv_sec == -1)
+ base->limit_callbacks_after_prio = INT_MAX;
+
+ for (i = 0; eventops[i] && !base->evbase; i++) {
+ if (cfg != NULL) {
+ /* determine if this backend should be avoided */
+ if (event_config_is_avoided_method(cfg,
+ eventops[i]->name))
+ continue;
+ if ((eventops[i]->features & cfg->require_features)
+ != cfg->require_features)
+ continue;
+ }
+
+ /* also obey the environment variables */
+ if (should_check_environment &&
+ event_is_method_disabled(eventops[i]->name))
+ continue;
+
+ base->evsel = eventops[i];
+
+ base->evbase = base->evsel->init(base);
+ }
+
+ if (base->evbase == NULL) {
+ event_warnx("%s: no event mechanism available",
+ __func__);
+ base->evsel = NULL;
+ event_base_free(base);
+ return NULL;
+ }
+
+ if (evutil_getenv_("EVENT_SHOW_METHOD"))
+ event_msgx("libevent using: %s", base->evsel->name);
+
+ /* allocate a single active event queue */
+ if (event_base_priority_init(base, 1) < 0) {
+ event_base_free(base);
+ return NULL;
+ }
+
+ /* prepare for threading */
+
+#if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
+ event_debug_created_threadable_ctx_ = 1;
+#endif
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ if (EVTHREAD_LOCKING_ENABLED() &&
+ (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
+ int r;
+ EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
+ EVTHREAD_ALLOC_COND(base->current_event_cond);
+ r = evthread_make_base_notifiable(base);
+ if (r<0) {
+ event_warnx("%s: Unable to make base notifiable.", __func__);
+ event_base_free(base);
+ return NULL;
+ }
+ }
+#endif
+
+#ifdef _WIN32
+ if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
+ event_base_start_iocp_(base, cfg->n_cpus_hint);
+#endif
+
+ return (base);
+}
+
+int
+event_base_start_iocp_(struct event_base *base, int n_cpus)
+{
+#ifdef _WIN32
+ if (base->iocp)
+ return 0;
+ base->iocp = event_iocp_port_launch_(n_cpus);
+ if (!base->iocp) {
+ event_warnx("%s: Couldn't launch IOCP", __func__);
+ return -1;
+ }
+ return 0;
+#else
+ return -1;
+#endif
+}
+
+void
+event_base_stop_iocp_(struct event_base *base)
+{
+#ifdef _WIN32
+ int rv;
+
+ if (!base->iocp)
+ return;
+ rv = event_iocp_shutdown_(base->iocp, -1);
+ EVUTIL_ASSERT(rv >= 0);
+ base->iocp = NULL;
+#endif
+}
+
+static int
+event_base_cancel_single_callback_(struct event_base *base,
+ struct event_callback *evcb,
+ int run_finalizers)
+{
+ int result = 0;
+
+ if (evcb->evcb_flags & EVLIST_INIT) {
+ struct event *ev = event_callback_to_event(evcb);
+ if (!(ev->ev_flags & EVLIST_INTERNAL)) {
+ event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
+ result = 1;
+ }
+ } else {
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ event_callback_cancel_nolock_(base, evcb, 1);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ result = 1;
+ }
+
+ if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
+ switch (evcb->evcb_closure) {
+ case EV_CLOSURE_EVENT_FINALIZE:
+ case EV_CLOSURE_EVENT_FINALIZE_FREE: {
+ struct event *ev = event_callback_to_event(evcb);
+ ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
+ if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
+ mm_free(ev);
+ break;
+ }
+ case EV_CLOSURE_CB_FINALIZE:
+ evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
+ break;
+ default:
+ break;
+ }
+ }
+ return result;
+}
+
+static int event_base_free_queues_(struct event_base *base, int run_finalizers)
+{
+ int deleted = 0, i;
+
+ for (i = 0; i < base->nactivequeues; ++i) {
+ struct event_callback *evcb, *next;
+ for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
+ next = TAILQ_NEXT(evcb, evcb_active_next);
+ deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
+ evcb = next;
+ }
+ }
+
+ {
+ struct event_callback *evcb;
+ while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
+ deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
+ }
+ }
+
+ return deleted;
+}
+
+static void
+event_base_free_(struct event_base *base, int run_finalizers)
+{
+ int i, n_deleted=0;
+ struct event *ev;
+ /* XXXX grab the lock? If there is contention when one thread frees
+ * the base, then the contending thread will be very sad soon. */
+
+ /* event_base_free(NULL) is how to free the current_base if we
+ * made it with event_init and forgot to hold a reference to it. */
+ if (base == NULL && current_base)
+ base = current_base;
+ /* Don't actually free NULL. */
+ if (base == NULL) {
+ event_warnx("%s: no base to free", __func__);
+ return;
+ }
+ /* XXX(niels) - check for internal events first */
+
+#ifdef _WIN32
+ event_base_stop_iocp_(base);
+#endif
+
+ /* threading fds if we have them */
+ if (base->th_notify_fd[0] != -1) {
+ event_del(&base->th_notify);
+ EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
+ if (base->th_notify_fd[1] != -1)
+ EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
+ base->th_notify_fd[0] = -1;
+ base->th_notify_fd[1] = -1;
+ event_debug_unassign(&base->th_notify);
+ }
+
+ /* Delete all non-internal events. */
+ evmap_delete_all_(base);
+
+ while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
+ event_del(ev);
+ ++n_deleted;
+ }
+ for (i = 0; i < base->n_common_timeouts; ++i) {
+ struct common_timeout_list *ctl =
+ base->common_timeout_queues[i];
+ event_del(&ctl->timeout_event); /* Internal; doesn't count */
+ event_debug_unassign(&ctl->timeout_event);
+ for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
+ struct event *next = TAILQ_NEXT(ev,
+ ev_timeout_pos.ev_next_with_common_timeout);
+ if (!(ev->ev_flags & EVLIST_INTERNAL)) {
+ event_del(ev);
+ ++n_deleted;
+ }
+ ev = next;
+ }
+ mm_free(ctl);
+ }
+ if (base->common_timeout_queues)
+ mm_free(base->common_timeout_queues);
+
+ for (;;) {
+ /* For finalizers we can register yet another finalizer out from
+ * finalizer, and iff finalizer will be in active_later_queue we can
+ * add finalizer to activequeues, and we will have events in
+ * activequeues after this function returns, which is not what we want
+ * (we even have an assertion for this).
+ *
+ * A simple case is bufferevent with underlying (i.e. filters).
+ */
+ int i = event_base_free_queues_(base, run_finalizers);
+ if (!i) {
+ break;
+ }
+ n_deleted += i;
+ }
+
+ if (n_deleted)
+ event_debug(("%s: %d events were still set in base",
+ __func__, n_deleted));
+
+ while (LIST_FIRST(&base->once_events)) {
+ struct event_once *eonce = LIST_FIRST(&base->once_events);
+ LIST_REMOVE(eonce, next_once);
+ mm_free(eonce);
+ }
+
+ if (base->evsel != NULL && base->evsel->dealloc != NULL)
+ base->evsel->dealloc(base);
+
+ for (i = 0; i < base->nactivequeues; ++i)
+ EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
+
+ EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
+ min_heap_dtor_(&base->timeheap);
+
+ mm_free(base->activequeues);
+
+ evmap_io_clear_(&base->io);
+ evmap_signal_clear_(&base->sigmap);
+ event_changelist_freemem_(&base->changelist);
+
+ EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
+ EVTHREAD_FREE_COND(base->current_event_cond);
+
+ /* If we're freeing current_base, there won't be a current_base. */
+ if (base == current_base)
+ current_base = NULL;
+ mm_free(base);
+}
+
+void
+event_base_free_nofinalize(struct event_base *base)
+{
+ event_base_free_(base, 0);
+}
+
+void
+event_base_free(struct event_base *base)
+{
+ event_base_free_(base, 1);
+}
+
+/* Fake eventop; used to disable the backend temporarily inside event_reinit
+ * so that we can call event_del() on an event without telling the backend.
+ */
+static int
+nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
+ short events, void *fdinfo)
+{
+ return 0;
+}
+const struct eventop nil_eventop = {
+ "nil",
+ NULL, /* init: unused. */
+ NULL, /* add: unused. */
+ nil_backend_del, /* del: used, so needs to be killed. */
+ NULL, /* dispatch: unused. */
+ NULL, /* dealloc: unused. */
+ 0, 0, 0
+};
+
+/* reinitialize the event base after a fork */
+int
+event_reinit(struct event_base *base)
+{
+ const struct eventop *evsel;
+ int res = 0;
+ int was_notifiable = 0;
+ int had_signal_added = 0;
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
+ evsel = base->evsel;
+
+ /* check if this event mechanism requires reinit on the backend */
+ if (evsel->need_reinit) {
+ /* We're going to call event_del() on our notify events (the
+ * ones that tell about signals and wakeup events). But we
+ * don't actually want to tell the backend to change its
+ * state, since it might still share some resource (a kqueue,
+ * an epoll fd) with the parent process, and we don't want to
+ * delete the fds from _that_ backend, we temporarily stub out
+ * the evsel with a replacement.
+ */
+ base->evsel = &nil_eventop;
+ }
+
+ /* We need to re-create a new signal-notification fd and a new
+ * thread-notification fd. Otherwise, we'll still share those with
+ * the parent process, which would make any notification sent to them
+ * get received by one or both of the event loops, more or less at
+ * random.
+ */
+ if (base->sig.ev_signal_added) {
+ event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
+ event_debug_unassign(&base->sig.ev_signal);
+ memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
+ had_signal_added = 1;
+ base->sig.ev_signal_added = 0;
+ }
+ if (base->sig.ev_signal_pair[0] != -1)
+ EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
+ if (base->sig.ev_signal_pair[1] != -1)
+ EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
+ if (base->th_notify_fn != NULL) {
+ was_notifiable = 1;
+ base->th_notify_fn = NULL;
+ }
+ if (base->th_notify_fd[0] != -1) {
+ event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
+ EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
+ if (base->th_notify_fd[1] != -1)
+ EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
+ base->th_notify_fd[0] = -1;
+ base->th_notify_fd[1] = -1;
+ event_debug_unassign(&base->th_notify);
+ }
+
+ /* Replace the original evsel. */
+ base->evsel = evsel;
+
+ if (evsel->need_reinit) {
+ /* Reconstruct the backend through brute-force, so that we do
+ * not share any structures with the parent process. For some
+ * backends, this is necessary: epoll and kqueue, for
+ * instance, have events associated with a kernel
+ * structure. If didn't reinitialize, we'd share that
+ * structure with the parent process, and any changes made by
+ * the parent would affect our backend's behavior (and vice
+ * versa).
+ */
+ if (base->evsel->dealloc != NULL)
+ base->evsel->dealloc(base);
+ base->evbase = evsel->init(base);
+ if (base->evbase == NULL) {
+ event_errx(1,
+ "%s: could not reinitialize event mechanism",
+ __func__);
+ res = -1;
+ goto done;
+ }
+
+ /* Empty out the changelist (if any): we are starting from a
+ * blank slate. */
+ event_changelist_freemem_(&base->changelist);
+
+ /* Tell the event maps to re-inform the backend about all
+ * pending events. This will make the signal notification
+ * event get re-created if necessary. */
+ if (evmap_reinit_(base) < 0)
+ res = -1;
+ } else {
+ res = evsig_init_(base);
+ if (res == 0 && had_signal_added) {
+ res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
+ if (res == 0)
+ base->sig.ev_signal_added = 1;
+ }
+ }
+
+ /* If we were notifiable before, and nothing just exploded, become
+ * notifiable again. */
+ if (was_notifiable && res == 0)
+ res = evthread_make_base_notifiable_nolock_(base);
+
+done:
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return (res);
+}
+
+/* Get the monotonic time for this event_base' timer */
+int
+event_gettime_monotonic(struct event_base *base, struct timeval *tv)
+{
+ int rv = -1;
+
+ if (base && tv) {
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ }
+
+ return rv;
+}
+
+const char **
+event_get_supported_methods(void)
+{
+ static const char **methods = NULL;
+ const struct eventop **method;
+ const char **tmp;
+ int i = 0, k;
+
+ /* count all methods */
+ for (method = &eventops[0]; *method != NULL; ++method) {
+ ++i;
+ }
+
+ /* allocate one more than we need for the NULL pointer */
+ tmp = mm_calloc((i + 1), sizeof(char *));
+ if (tmp == NULL)
+ return (NULL);
+
+ /* populate the array with the supported methods */
+ for (k = 0, i = 0; eventops[k] != NULL; ++k) {
+ tmp[i++] = eventops[k]->name;
+ }
+ tmp[i] = NULL;
+
+ if (methods != NULL)
+ mm_free((char**)methods);
+
+ methods = tmp;
+
+ return (methods);
+}
+
+struct event_config *
+event_config_new(void)
+{
+ struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
+
+ if (cfg == NULL)
+ return (NULL);
+
+ TAILQ_INIT(&cfg->entries);
+ cfg->max_dispatch_interval.tv_sec = -1;
+ cfg->max_dispatch_callbacks = INT_MAX;
+ cfg->limit_callbacks_after_prio = 1;
+
+ return (cfg);
+}
+
+static void
+event_config_entry_free(struct event_config_entry *entry)
+{
+ if (entry->avoid_method != NULL)
+ mm_free((char *)entry->avoid_method);
+ mm_free(entry);
+}
+
+void
+event_config_free(struct event_config *cfg)
+{
+ struct event_config_entry *entry;
+
+ while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
+ TAILQ_REMOVE(&cfg->entries, entry, next);
+ event_config_entry_free(entry);
+ }
+ mm_free(cfg);
+}
+
+int
+event_config_set_flag(struct event_config *cfg, int flag)
+{
+ if (!cfg)
+ return -1;
+ cfg->flags |= flag;
+ return 0;
+}
+
+int
+event_config_avoid_method(struct event_config *cfg, const char *method)
+{
+ struct event_config_entry *entry = mm_malloc(sizeof(*entry));
+ if (entry == NULL)
+ return (-1);
+
+ if ((entry->avoid_method = mm_strdup(method)) == NULL) {
+ mm_free(entry);
+ return (-1);
+ }
+
+ TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
+
+ return (0);
+}
+
+int
+event_config_require_features(struct event_config *cfg,
+ int features)
+{
+ if (!cfg)
+ return (-1);
+ cfg->require_features = features;
+ return (0);
+}
+
+int
+event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
+{
+ if (!cfg)
+ return (-1);
+ cfg->n_cpus_hint = cpus;
+ return (0);
+}
+
+int
+event_config_set_max_dispatch_interval(struct event_config *cfg,
+ const struct timeval *max_interval, int max_callbacks, int min_priority)
+{
+ if (max_interval)
+ memcpy(&cfg->max_dispatch_interval, max_interval,
+ sizeof(struct timeval));
+ else
+ cfg->max_dispatch_interval.tv_sec = -1;
+ cfg->max_dispatch_callbacks =
+ max_callbacks >= 0 ? max_callbacks : INT_MAX;
+ if (min_priority < 0)
+ min_priority = 0;
+ cfg->limit_callbacks_after_prio = min_priority;
+ return (0);
+}
+
+int
+event_priority_init(int npriorities)
+{
+ return event_base_priority_init(current_base, npriorities);
+}
+
+int
+event_base_priority_init(struct event_base *base, int npriorities)
+{
+ int i, r;
+ r = -1;
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
+ if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
+ || npriorities >= EVENT_MAX_PRIORITIES)
+ goto err;
+
+ if (npriorities == base->nactivequeues)
+ goto ok;
+
+ if (base->nactivequeues) {
+ mm_free(base->activequeues);
+ base->nactivequeues = 0;
+ }
+
+ /* Allocate our priority queues */
+ base->activequeues = (struct evcallback_list *)
+ mm_calloc(npriorities, sizeof(struct evcallback_list));
+ if (base->activequeues == NULL) {
+ event_warn("%s: calloc", __func__);
+ goto err;
+ }
+ base->nactivequeues = npriorities;
+
+ for (i = 0; i < base->nactivequeues; ++i) {
+ TAILQ_INIT(&base->activequeues[i]);
+ }
+
+ok:
+ r = 0;
+err:
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return (r);
+}
+
+int
+event_base_get_npriorities(struct event_base *base)
+{
+
+ int n;
+ if (base == NULL)
+ base = current_base;
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ n = base->nactivequeues;
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return (n);
+}
+
+int
+event_base_get_num_events(struct event_base *base, unsigned int type)
+{
+ int r = 0;
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
+ if (type & EVENT_BASE_COUNT_ACTIVE)
+ r += base->event_count_active;
+
+ if (type & EVENT_BASE_COUNT_VIRTUAL)
+ r += base->virtual_event_count;
+
+ if (type & EVENT_BASE_COUNT_ADDED)
+ r += base->event_count;
+
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+
+ return r;
+}
+
+int
+event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
+{
+ int r = 0;
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
+ if (type & EVENT_BASE_COUNT_ACTIVE) {
+ r += base->event_count_active_max;
+ if (clear)
+ base->event_count_active_max = 0;
+ }
+
+ if (type & EVENT_BASE_COUNT_VIRTUAL) {
+ r += base->virtual_event_count_max;
+ if (clear)
+ base->virtual_event_count_max = 0;
+ }
+
+ if (type & EVENT_BASE_COUNT_ADDED) {
+ r += base->event_count_max;
+ if (clear)
+ base->event_count_max = 0;
+ }
+
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+
+ return r;
+}
+
+/* Returns true iff we're currently watching any events. */
+static int
+event_haveevents(struct event_base *base)
+{
+ /* Caller must hold th_base_lock */
+ return (base->virtual_event_count > 0 || base->event_count > 0);
+}
+
+/* "closure" function called when processing active signal events */
+static inline void
+event_signal_closure(struct event_base *base, struct event *ev)
+{
+ short ncalls;
+ int should_break;
+
+ /* Allows deletes to work */
+ ncalls = ev->ev_ncalls;
+ if (ncalls != 0)
+ ev->ev_pncalls = &ncalls;
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ while (ncalls) {
+ ncalls--;
+ ev->ev_ncalls = ncalls;
+ if (ncalls == 0)
+ ev->ev_pncalls = NULL;
+ (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ should_break = base->event_break;
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+
+ if (should_break) {
+ if (ncalls != 0)
+ ev->ev_pncalls = NULL;
+ return;
+ }
+ }
+}
+
+/* Common timeouts are special timeouts that are handled as queues rather than
+ * in the minheap. This is more efficient than the minheap if we happen to
+ * know that we're going to get several thousands of timeout events all with
+ * the same timeout value.
+ *
+ * Since all our timeout handling code assumes timevals can be copied,
+ * assigned, etc, we can't use "magic pointer" to encode these common
+ * timeouts. Searching through a list to see if every timeout is common could
+ * also get inefficient. Instead, we take advantage of the fact that tv_usec
+ * is 32 bits long, but only uses 20 of those bits (since it can never be over
+ * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
+ * of index into the event_base's aray of common timeouts.
+ */
+
+#define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK
+#define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
+#define COMMON_TIMEOUT_IDX_SHIFT 20
+#define COMMON_TIMEOUT_MASK 0xf0000000
+#define COMMON_TIMEOUT_MAGIC 0x50000000
+
+#define COMMON_TIMEOUT_IDX(tv) \
+ (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
+
+/** Return true iff if 'tv' is a common timeout in 'base' */
+static inline int
+is_common_timeout(const struct timeval *tv,
+ const struct event_base *base)
+{
+ int idx;
+ if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
+ return 0;
+ idx = COMMON_TIMEOUT_IDX(tv);
+ return idx < base->n_common_timeouts;
+}
+
+/* True iff tv1 and tv2 have the same common-timeout index, or if neither
+ * one is a common timeout. */
+static inline int
+is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
+{
+ return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
+ (tv2->tv_usec & ~MICROSECONDS_MASK);
+}
+
+/** Requires that 'tv' is a common timeout. Return the corresponding
+ * common_timeout_list. */
+static inline struct common_timeout_list *
+get_common_timeout_list(struct event_base *base, const struct timeval *tv)
+{
+ return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
+}
+
+#if 0
+static inline int
+common_timeout_ok(const struct timeval *tv,
+ struct event_base *base)
+{
+ const struct timeval *expect =
+ &get_common_timeout_list(base, tv)->duration;
+ return tv->tv_sec == expect->tv_sec &&
+ tv->tv_usec == expect->tv_usec;
+}
+#endif
+
+/* Add the timeout for the first event in given common timeout list to the
+ * event_base's minheap. */
+static void
+common_timeout_schedule(struct common_timeout_list *ctl,
+ const struct timeval *now, struct event *head)
+{
+ struct timeval timeout = head->ev_timeout;
+ timeout.tv_usec &= MICROSECONDS_MASK;
+ event_add_nolock_(&ctl->timeout_event, &timeout, 1);
+}
+
+/* Callback: invoked when the timeout for a common timeout queue triggers.
+ * This means that (at least) the first event in that queue should be run,
+ * and the timeout should be rescheduled if there are more events. */
+static void
+common_timeout_callback(evutil_socket_t fd, short what, void *arg)
+{
+ struct timeval now;
+ struct common_timeout_list *ctl = arg;
+ struct event_base *base = ctl->base;
+ struct event *ev = NULL;
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ gettime(base, &now);
+ while (1) {
+ ev = TAILQ_FIRST(&ctl->events);
+ if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
+ (ev->ev_timeout.tv_sec == now.tv_sec &&
+ (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
+ break;
+ event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
+ event_active_nolock_(ev, EV_TIMEOUT, 1);
+ }
+ if (ev)
+ common_timeout_schedule(ctl, &now, ev);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+#define MAX_COMMON_TIMEOUTS 256
+
+const struct timeval *
+event_base_init_common_timeout(struct event_base *base,
+ const struct timeval *duration)
+{
+ int i;
+ struct timeval tv;
+ const struct timeval *result=NULL;
+ struct common_timeout_list *new_ctl;
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ if (duration->tv_usec > 1000000) {
+ memcpy(&tv, duration, sizeof(struct timeval));
+ if (is_common_timeout(duration, base))
+ tv.tv_usec &= MICROSECONDS_MASK;
+ tv.tv_sec += tv.tv_usec / 1000000;
+ tv.tv_usec %= 1000000;
+ duration = &tv;
+ }
+ for (i = 0; i < base->n_common_timeouts; ++i) {
+ const struct common_timeout_list *ctl =
+ base->common_timeout_queues[i];
+ if (duration->tv_sec == ctl->duration.tv_sec &&
+ duration->tv_usec ==
+ (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
+ EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
+ result = &ctl->duration;
+ goto done;
+ }
+ }
+ if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
+ event_warnx("%s: Too many common timeouts already in use; "
+ "we only support %d per event_base", __func__,
+ MAX_COMMON_TIMEOUTS);
+ goto done;
+ }
+ if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
+ int n = base->n_common_timeouts < 16 ? 16 :
+ base->n_common_timeouts*2;
+ struct common_timeout_list **newqueues =
+ mm_realloc(base->common_timeout_queues,
+ n*sizeof(struct common_timeout_queue *));
+ if (!newqueues) {
+ event_warn("%s: realloc",__func__);
+ goto done;
+ }
+ base->n_common_timeouts_allocated = n;
+ base->common_timeout_queues = newqueues;
+ }
+ new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
+ if (!new_ctl) {
+ event_warn("%s: calloc",__func__);
+ goto done;
+ }
+ TAILQ_INIT(&new_ctl->events);
+ new_ctl->duration.tv_sec = duration->tv_sec;
+ new_ctl->duration.tv_usec =
+ duration->tv_usec | COMMON_TIMEOUT_MAGIC |
+ (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
+ evtimer_assign(&new_ctl->timeout_event, base,
+ common_timeout_callback, new_ctl);
+ new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
+ event_priority_set(&new_ctl->timeout_event, 0);
+ new_ctl->base = base;
+ base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
+ result = &new_ctl->duration;
+
+done:
+ if (result)
+ EVUTIL_ASSERT(is_common_timeout(result, base));
+
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return result;
+}
+
+/* Closure function invoked when we're activating a persistent event. */
+static inline void
+event_persist_closure(struct event_base *base, struct event *ev)
+{
+ void (*evcb_callback)(evutil_socket_t, short, void *);
+
+ // Other fields of *ev that must be stored before executing
+ evutil_socket_t evcb_fd;
+ short evcb_res;
+ void *evcb_arg;
+
+ /* reschedule the persistent event if we have a timeout. */
+ if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
+ /* If there was a timeout, we want it to run at an interval of
+ * ev_io_timeout after the last time it was _scheduled_ for,
+ * not ev_io_timeout after _now_. If it fired for another
+ * reason, though, the timeout ought to start ticking _now_. */
+ struct timeval run_at, relative_to, delay, now;
+ ev_uint32_t usec_mask = 0;
+ EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
+ &ev->ev_io_timeout));
+ gettime(base, &now);
+ if (is_common_timeout(&ev->ev_timeout, base)) {
+ delay = ev->ev_io_timeout;
+ usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
+ delay.tv_usec &= MICROSECONDS_MASK;
+ if (ev->ev_res & EV_TIMEOUT) {
+ relative_to = ev->ev_timeout;
+ relative_to.tv_usec &= MICROSECONDS_MASK;
+ } else {
+ relative_to = now;
+ }
+ } else {
+ delay = ev->ev_io_timeout;
+ if (ev->ev_res & EV_TIMEOUT) {
+ relative_to = ev->ev_timeout;
+ } else {
+ relative_to = now;
+ }
+ }
+ evutil_timeradd(&relative_to, &delay, &run_at);
+ if (evutil_timercmp(&run_at, &now, <)) {
+ /* Looks like we missed at least one invocation due to
+ * a clock jump, not running the event loop for a
+ * while, really slow callbacks, or
+ * something. Reschedule relative to now.
+ */
+ evutil_timeradd(&now, &delay, &run_at);
+ }
+ run_at.tv_usec |= usec_mask;
+ event_add_nolock_(ev, &run_at, 1);
+ }
+
+ // Save our callback before we release the lock
+ evcb_callback = ev->ev_callback;
+ evcb_fd = ev->ev_fd;
+ evcb_res = ev->ev_res;
+ evcb_arg = ev->ev_arg;
+
+ // Release the lock
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+
+ // Execute the callback
+ (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
+}
+
+/*
+ Helper for event_process_active to process all the events in a single queue,
+ releasing the lock as we go. This function requires that the lock be held
+ when it's invoked. Returns -1 if we get a signal or an event_break that
+ means we should stop processing any active events now. Otherwise returns
+ the number of non-internal event_callbacks that we processed.
+*/
+static int
+event_process_active_single_queue(struct event_base *base,
+ struct evcallback_list *activeq,
+ int max_to_process, const struct timeval *endtime)
+{
+ struct event_callback *evcb;
+ int count = 0;
+
+ EVUTIL_ASSERT(activeq != NULL);
+
+ for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
+ struct event *ev=NULL;
+ if (evcb->evcb_flags & EVLIST_INIT) {
+ ev = event_callback_to_event(evcb);
+
+ if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
+ event_queue_remove_active(base, evcb);
+ else
+ event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
+ event_debug((
+ "event_process_active: event: %p, %s%s%scall %p",
+ ev,
+ ev->ev_res & EV_READ ? "EV_READ " : " ",
+ ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
+ ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
+ ev->ev_callback));
+ } else {
+ event_queue_remove_active(base, evcb);
+ event_debug(("event_process_active: event_callback %p, "
+ "closure %d, call %p",
+ evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
+ }
+
+ if (!(evcb->evcb_flags & EVLIST_INTERNAL))
+ ++count;
+
+
+ base->current_event = evcb;
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ base->current_event_waiters = 0;
+#endif
+
+ switch (evcb->evcb_closure) {
+ case EV_CLOSURE_EVENT_SIGNAL:
+ EVUTIL_ASSERT(ev != NULL);
+ event_signal_closure(base, ev);
+ break;
+ case EV_CLOSURE_EVENT_PERSIST:
+ EVUTIL_ASSERT(ev != NULL);
+ event_persist_closure(base, ev);
+ break;
+ case EV_CLOSURE_EVENT: {
+ void (*evcb_callback)(evutil_socket_t, short, void *);
+ EVUTIL_ASSERT(ev != NULL);
+ evcb_callback = *ev->ev_callback;
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ evcb_callback(ev->ev_fd, ev->ev_res, ev->ev_arg);
+ }
+ break;
+ case EV_CLOSURE_CB_SELF: {
+ void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ evcb_selfcb(evcb, evcb->evcb_arg);
+ }
+ break;
+ case EV_CLOSURE_EVENT_FINALIZE:
+ case EV_CLOSURE_EVENT_FINALIZE_FREE: {
+ void (*evcb_evfinalize)(struct event *, void *);
+ int evcb_closure = evcb->evcb_closure;
+ EVUTIL_ASSERT(ev != NULL);
+ base->current_event = NULL;
+ evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
+ EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ evcb_evfinalize(ev, ev->ev_arg);
+ event_debug_note_teardown_(ev);
+ if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
+ mm_free(ev);
+ }
+ break;
+ case EV_CLOSURE_CB_FINALIZE: {
+ void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
+ base->current_event = NULL;
+ EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ evcb_cbfinalize(evcb, evcb->evcb_arg);
+ }
+ break;
+ default:
+ EVUTIL_ASSERT(0);
+ }
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ base->current_event = NULL;
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ if (base->current_event_waiters) {
+ base->current_event_waiters = 0;
+ EVTHREAD_COND_BROADCAST(base->current_event_cond);
+ }
+#endif
+
+ if (base->event_break)
+ return -1;
+ if (count >= max_to_process)
+ return count;
+ if (count && endtime) {
+ struct timeval now;
+ update_time_cache(base);
+ gettime(base, &now);
+ if (evutil_timercmp(&now, endtime, >=))
+ return count;
+ }
+ if (base->event_continue)
+ break;
+ }
+ return count;
+}
+
+/*
+ * Active events are stored in priority queues. Lower priorities are always
+ * process before higher priorities. Low priority events can starve high
+ * priority ones.
+ */
+
+static int
+event_process_active(struct event_base *base)
+{
+ /* Caller must hold th_base_lock */
+ struct evcallback_list *activeq = NULL;
+ int i, c = 0;
+ const struct timeval *endtime;
+ struct timeval tv;
+ const int maxcb = base->max_dispatch_callbacks;
+ const int limit_after_prio = base->limit_callbacks_after_prio;
+ if (base->max_dispatch_time.tv_sec >= 0) {
+ update_time_cache(base);
+ gettime(base, &tv);
+ evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
+ endtime = &tv;
+ } else {
+ endtime = NULL;
+ }
+
+ for (i = 0; i < base->nactivequeues; ++i) {
+ if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
+ base->event_running_priority = i;
+ activeq = &base->activequeues[i];
+ if (i < limit_after_prio)
+ c = event_process_active_single_queue(base, activeq,
+ INT_MAX, NULL);
+ else
+ c = event_process_active_single_queue(base, activeq,
+ maxcb, endtime);
+ if (c < 0) {
+ goto done;
+ } else if (c > 0)
+ break; /* Processed a real event; do not
+ * consider lower-priority events */
+ /* If we get here, all of the events we processed
+ * were internal. Continue. */
+ }
+ }
+
+done:
+ base->event_running_priority = -1;
+
+ return c;
+}
+
+/*
+ * Wait continuously for events. We exit only if no events are left.
+ */
+
+int
+event_dispatch(void)
+{
+ return (event_loop(0));
+}
+
+int
+event_base_dispatch(struct event_base *event_base)
+{
+ return (event_base_loop(event_base, 0));
+}
+
+const char *
+event_base_get_method(const struct event_base *base)
+{
+ EVUTIL_ASSERT(base);
+ return (base->evsel->name);
+}
+
+/** Callback: used to implement event_base_loopexit by telling the event_base
+ * that it's time to exit its loop. */
+static void
+event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
+{
+ struct event_base *base = arg;
+ base->event_gotterm = 1;
+}
+
+int
+event_loopexit(const struct timeval *tv)
+{
+ return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
+ current_base, tv));
+}
+
+int
+event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
+{
+ return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
+ event_base, tv));
+}
+
+int
+event_loopbreak(void)
+{
+ return (event_base_loopbreak(current_base));
+}
+
+int
+event_base_loopbreak(struct event_base *event_base)
+{
+ int r = 0;
+ if (event_base == NULL)
+ return (-1);
+
+ EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
+ event_base->event_break = 1;
+
+ if (EVBASE_NEED_NOTIFY(event_base)) {
+ r = evthread_notify_base(event_base);
+ } else {
+ r = (0);
+ }
+ EVBASE_RELEASE_LOCK(event_base, th_base_lock);
+ return r;
+}
+
+int
+event_base_loopcontinue(struct event_base *event_base)
+{
+ int r = 0;
+ if (event_base == NULL)
+ return (-1);
+
+ EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
+ event_base->event_continue = 1;
+
+ if (EVBASE_NEED_NOTIFY(event_base)) {
+ r = evthread_notify_base(event_base);
+ } else {
+ r = (0);
+ }
+ EVBASE_RELEASE_LOCK(event_base, th_base_lock);
+ return r;
+}
+
+int
+event_base_got_break(struct event_base *event_base)
+{
+ int res;
+ EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
+ res = event_base->event_break;
+ EVBASE_RELEASE_LOCK(event_base, th_base_lock);
+ return res;
+}
+
+int
+event_base_got_exit(struct event_base *event_base)
+{
+ int res;
+ EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
+ res = event_base->event_gotterm;
+ EVBASE_RELEASE_LOCK(event_base, th_base_lock);
+ return res;
+}
+
+/* not thread safe */
+
+int
+event_loop(int flags)
+{
+ return event_base_loop(current_base, flags);
+}
+
+int
+event_base_loop(struct event_base *base, int flags)
+{
+ const struct eventop *evsel = base->evsel;
+ struct timeval tv;
+ struct timeval *tv_p;
+ int res, done, retval = 0;
+
+ /* Grab the lock. We will release it inside evsel.dispatch, and again
+ * as we invoke user callbacks. */
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
+ if (base->running_loop) {
+ event_warnx("%s: reentrant invocation. Only one event_base_loop"
+ " can run on each event_base at once.", __func__);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return -1;
+ }
+
+ base->running_loop = 1;
+
+ clear_time_cache(base);
+
+ if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
+ evsig_set_base_(base);
+
+ done = 0;
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ base->th_owner_id = EVTHREAD_GET_ID();
+#endif
+
+ base->event_gotterm = base->event_break = 0;
+
+ while (!done) {
+ base->event_continue = 0;
+ base->n_deferreds_queued = 0;
+
+ /* Terminate the loop if we have been asked to */
+ if (base->event_gotterm) {
+ break;
+ }
+
+ if (base->event_break) {
+ break;
+ }
+
+ tv_p = &tv;
+ if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
+ timeout_next(base, &tv_p);
+ } else {
+ /*
+ * if we have active events, we just poll new events
+ * without waiting.
+ */
+ evutil_timerclear(&tv);
+ }
+
+ /* If we have no events, we just exit */
+ if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
+ !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
+ event_debug(("%s: no events registered.", __func__));
+ retval = 1;
+ goto done;
+ }
+
+ event_queue_make_later_events_active(base);
+
+ clear_time_cache(base);
+
+ res = evsel->dispatch(base, tv_p);
+
+ if (res == -1) {
+ event_debug(("%s: dispatch returned unsuccessfully.",
+ __func__));
+ retval = -1;
+ goto done;
+ }
+
+ update_time_cache(base);
+
+ timeout_process(base);
+
+ if (N_ACTIVE_CALLBACKS(base)) {
+ int n = event_process_active(base);
+ if ((flags & EVLOOP_ONCE)
+ && N_ACTIVE_CALLBACKS(base) == 0
+ && n != 0)
+ done = 1;
+ } else if (flags & EVLOOP_NONBLOCK)
+ done = 1;
+ }
+ event_debug(("%s: asked to terminate loop.", __func__));
+
+done:
+ clear_time_cache(base);
+ base->running_loop = 0;
+
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+
+ return (retval);
+}
+
+/* One-time callback to implement event_base_once: invokes the user callback,
+ * then deletes the allocated storage */
+static void
+event_once_cb(evutil_socket_t fd, short events, void *arg)
+{
+ struct event_once *eonce = arg;
+
+ (*eonce->cb)(fd, events, eonce->arg);
+ EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
+ LIST_REMOVE(eonce, next_once);
+ EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
+ event_debug_unassign(&eonce->ev);
+ mm_free(eonce);
+}
+
+/* not threadsafe, event scheduled once. */
+int
+event_once(evutil_socket_t fd, short events,
+ void (*callback)(evutil_socket_t, short, void *),
+ void *arg, const struct timeval *tv)
+{
+ return event_base_once(current_base, fd, events, callback, arg, tv);
+}
+
+/* Schedules an event once */
+int
+event_base_once(struct event_base *base, evutil_socket_t fd, short events,
+ void (*callback)(evutil_socket_t, short, void *),
+ void *arg, const struct timeval *tv)
+{
+ struct event_once *eonce;
+ int res = 0;
+ int activate = 0;
+
+ /* We cannot support signals that just fire once, or persistent
+ * events. */
+ if (events & (EV_SIGNAL|EV_PERSIST))
+ return (-1);
+
+ if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
+ return (-1);
+
+ eonce->cb = callback;
+ eonce->arg = arg;
+
+ if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
+ evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
+
+ if (tv == NULL || ! evutil_timerisset(tv)) {
+ /* If the event is going to become active immediately,
+ * don't put it on the timeout queue. This is one
+ * idiom for scheduling a callback, so let's make
+ * it fast (and order-preserving). */
+ activate = 1;
+ }
+ } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
+ events &= EV_READ|EV_WRITE|EV_CLOSED;
+
+ event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
+ } else {
+ /* Bad event combination */
+ mm_free(eonce);
+ return (-1);
+ }
+
+ if (res == 0) {
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ if (activate)
+ event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
+ else
+ res = event_add_nolock_(&eonce->ev, tv, 0);
+
+ if (res != 0) {
+ mm_free(eonce);
+ return (res);
+ } else {
+ LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
+ }
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ }
+
+ return (0);
+}
+
+int
+event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
+{
+ if (!base)
+ base = current_base;
+ if (arg == &event_self_cbarg_ptr_)
+ arg = ev;
+
+ event_debug_assert_not_added_(ev);
+
+ ev->ev_base = base;
+
+ ev->ev_callback = callback;
+ ev->ev_arg = arg;
+ ev->ev_fd = fd;
+ ev->ev_events = events;
+ ev->ev_res = 0;
+ ev->ev_flags = EVLIST_INIT;
+ ev->ev_ncalls = 0;
+ ev->ev_pncalls = NULL;
+
+ if (events & EV_SIGNAL) {
+ if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
+ event_warnx("%s: EV_SIGNAL is not compatible with "
+ "EV_READ, EV_WRITE or EV_CLOSED", __func__);
+ return -1;
+ }
+ ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
+ } else {
+ if (events & EV_PERSIST) {
+ evutil_timerclear(&ev->ev_io_timeout);
+ ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
+ } else {
+ ev->ev_closure = EV_CLOSURE_EVENT;
+ }
+ }
+
+ min_heap_elem_init_(ev);
+
+ if (base != NULL) {
+ /* by default, we put new events into the middle priority */
+ ev->ev_pri = base->nactivequeues / 2;
+ }
+
+ event_debug_note_setup_(ev);
+
+ return 0;
+}
+
+int
+event_base_set(struct event_base *base, struct event *ev)
+{
+ /* Only innocent events may be assigned to a different base */
+ if (ev->ev_flags != EVLIST_INIT)
+ return (-1);
+
+ event_debug_assert_is_setup_(ev);
+
+ ev->ev_base = base;
+ ev->ev_pri = base->nactivequeues/2;
+
+ return (0);
+}
+
+void
+event_set(struct event *ev, evutil_socket_t fd, short events,
+ void (*callback)(evutil_socket_t, short, void *), void *arg)
+{
+ int r;
+ r = event_assign(ev, current_base, fd, events, callback, arg);
+ EVUTIL_ASSERT(r == 0);
+}
+
+void *
+event_self_cbarg(void)
+{
+ return &event_self_cbarg_ptr_;
+}
+
+struct event *
+event_base_get_running_event(struct event_base *base)
+{
+ struct event *ev = NULL;
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ if (EVBASE_IN_THREAD(base)) {
+ struct event_callback *evcb = base->current_event;
+ if (evcb->evcb_flags & EVLIST_INIT)
+ ev = event_callback_to_event(evcb);
+ }
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return ev;
+}
+
+struct event *
+event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
+{
+ struct event *ev;
+ ev = mm_malloc(sizeof(struct event));
+ if (ev == NULL)
+ return (NULL);
+ if (event_assign(ev, base, fd, events, cb, arg) < 0) {
+ mm_free(ev);
+ return (NULL);
+ }
+
+ return (ev);
+}
+
+void
+event_free(struct event *ev)
+{
+ /* This is disabled, so that events which have been finalized be a
+ * valid target for event_free(). That's */
+ // event_debug_assert_is_setup_(ev);
+
+ /* make sure that this event won't be coming back to haunt us. */
+ event_del(ev);
+ event_debug_note_teardown_(ev);
+ mm_free(ev);
+
+}
+
+void
+event_debug_unassign(struct event *ev)
+{
+ event_debug_assert_not_added_(ev);
+ event_debug_note_teardown_(ev);
+
+ ev->ev_flags &= ~EVLIST_INIT;
+}
+
+#define EVENT_FINALIZE_FREE_ 0x10000
+static int
+event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
+{
+ ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
+ EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
+
+ event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
+ ev->ev_closure = closure;
+ ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
+ event_active_nolock_(ev, EV_FINALIZE, 1);
+ ev->ev_flags |= EVLIST_FINALIZING;
+ return 0;
+}
+
+static int
+event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
+{
+ int r;
+ struct event_base *base = ev->ev_base;
+ if (EVUTIL_FAILURE_CHECK(!base)) {
+ event_warnx("%s: event has no event_base set.", __func__);
+ return -1;
+ }
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ r = event_finalize_nolock_(base, flags, ev, cb);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return r;
+}
+
+int
+event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
+{
+ return event_finalize_impl_(flags, ev, cb);
+}
+
+int
+event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
+{
+ return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
+}
+
+void
+event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
+{
+ struct event *ev = NULL;
+ if (evcb->evcb_flags & EVLIST_INIT) {
+ ev = event_callback_to_event(evcb);
+ event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
+ } else {
+ event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
+ }
+
+ evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
+ evcb->evcb_cb_union.evcb_cbfinalize = cb;
+ event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
+ evcb->evcb_flags |= EVLIST_FINALIZING;
+}
+
+void
+event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
+{
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ event_callback_finalize_nolock_(base, flags, evcb, cb);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+/** Internal: Finalize all of the n_cbs callbacks in evcbs. The provided
+ * callback will be invoked on *one of them*, after they have *all* been
+ * finalized. */
+int
+event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
+{
+ int n_pending = 0, i;
+
+ if (base == NULL)
+ base = current_base;
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
+ event_debug(("%s: %d events finalizing", __func__, n_cbs));
+
+ /* At most one can be currently executing; the rest we just
+ * cancel... But we always make sure that the finalize callback
+ * runs. */
+ for (i = 0; i < n_cbs; ++i) {
+ struct event_callback *evcb = evcbs[i];
+ if (evcb == base->current_event) {
+ event_callback_finalize_nolock_(base, 0, evcb, cb);
+ ++n_pending;
+ } else {
+ event_callback_cancel_nolock_(base, evcb, 0);
+ }
+ }
+
+ if (n_pending == 0) {
+ /* Just do the first one. */
+ event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
+ }
+
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return 0;
+}
+
+/*
+ * Set's the priority of an event - if an event is already scheduled
+ * changing the priority is going to fail.
+ */
+
+int
+event_priority_set(struct event *ev, int pri)
+{
+ event_debug_assert_is_setup_(ev);
+
+ if (ev->ev_flags & EVLIST_ACTIVE)
+ return (-1);
+ if (pri < 0 || pri >= ev->ev_base->nactivequeues)
+ return (-1);
+
+ ev->ev_pri = pri;
+
+ return (0);
+}
+
+/*
+ * Checks if a specific event is pending or scheduled.
+ */
+
+int
+event_pending(const struct event *ev, short event, struct timeval *tv)
+{
+ int flags = 0;
+
+ if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
+ event_warnx("%s: event has no event_base set.", __func__);
+ return 0;
+ }
+
+ EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
+ event_debug_assert_is_setup_(ev);
+
+ if (ev->ev_flags & EVLIST_INSERTED)
+ flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
+ if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
+ flags |= ev->ev_res;
+ if (ev->ev_flags & EVLIST_TIMEOUT)
+ flags |= EV_TIMEOUT;
+
+ event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
+
+ /* See if there is a timeout that we should report */
+ if (tv != NULL && (flags & event & EV_TIMEOUT)) {
+ struct timeval tmp = ev->ev_timeout;
+ tmp.tv_usec &= MICROSECONDS_MASK;
+ /* correctly remamp to real time */
+ evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
+ }
+
+ EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
+
+ return (flags & event);
+}
+
+int
+event_initialized(const struct event *ev)
+{
+ if (!(ev->ev_flags & EVLIST_INIT))
+ return 0;
+
+ return 1;
+}
+
+void
+event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
+{
+ event_debug_assert_is_setup_(event);
+
+ if (base_out)
+ *base_out = event->ev_base;
+ if (fd_out)
+ *fd_out = event->ev_fd;
+ if (events_out)
+ *events_out = event->ev_events;
+ if (callback_out)
+ *callback_out = event->ev_callback;
+ if (arg_out)
+ *arg_out = event->ev_arg;
+}
+
+size_t
+event_get_struct_event_size(void)
+{
+ return sizeof(struct event);
+}
+
+evutil_socket_t
+event_get_fd(const struct event *ev)
+{
+ event_debug_assert_is_setup_(ev);
+ return ev->ev_fd;
+}
+
+struct event_base *
+event_get_base(const struct event *ev)
+{
+ event_debug_assert_is_setup_(ev);
+ return ev->ev_base;
+}
+
+short
+event_get_events(const struct event *ev)
+{
+ event_debug_assert_is_setup_(ev);
+ return ev->ev_events;
+}
+
+event_callback_fn
+event_get_callback(const struct event *ev)
+{
+ event_debug_assert_is_setup_(ev);
+ return ev->ev_callback;
+}
+
+void *
+event_get_callback_arg(const struct event *ev)
+{
+ event_debug_assert_is_setup_(ev);
+ return ev->ev_arg;
+}
+
+int
+event_get_priority(const struct event *ev)
+{
+ event_debug_assert_is_setup_(ev);
+ return ev->ev_pri;
+}
+
+int
+event_add(struct event *ev, const struct timeval *tv)
+{
+ int res;
+
+ if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
+ event_warnx("%s: event has no event_base set.", __func__);
+ return -1;
+ }
+
+ EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
+
+ res = event_add_nolock_(ev, tv, 0);
+
+ EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
+
+ return (res);
+}
+
+/* Helper callback: wake an event_base from another thread. This version
+ * works by writing a byte to one end of a socketpair, so that the event_base
+ * listening on the other end will wake up as the corresponding event
+ * triggers */
+static int
+evthread_notify_base_default(struct event_base *base)
+{
+ char buf[1];
+ int r;
+ buf[0] = (char) 0;
+#ifdef _WIN32
+ r = send(base->th_notify_fd[1], buf, 1, 0);
+#else
+ r = write(base->th_notify_fd[1], buf, 1);
+#endif
+ return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
+}
+
+#ifdef EVENT__HAVE_EVENTFD
+/* Helper callback: wake an event_base from another thread. This version
+ * assumes that you have a working eventfd() implementation. */
+static int
+evthread_notify_base_eventfd(struct event_base *base)
+{
+ ev_uint64_t msg = 1;
+ int r;
+ do {
+ r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
+ } while (r < 0 && errno == EAGAIN);
+
+ return (r < 0) ? -1 : 0;
+}
+#endif
+
+
+/** Tell the thread currently running the event_loop for base (if any) that it
+ * needs to stop waiting in its dispatch function (if it is) and process all
+ * active callbacks. */
+static int
+evthread_notify_base(struct event_base *base)
+{
+ EVENT_BASE_ASSERT_LOCKED(base);
+ if (!base->th_notify_fn)
+ return -1;
+ if (base->is_notify_pending)
+ return 0;
+ base->is_notify_pending = 1;
+ return base->th_notify_fn(base);
+}
+
+/* Implementation function to remove a timeout on a currently pending event.
+ */
+int
+event_remove_timer_nolock_(struct event *ev)
+{
+ struct event_base *base = ev->ev_base;
+
+ EVENT_BASE_ASSERT_LOCKED(base);
+ event_debug_assert_is_setup_(ev);
+
+ event_debug(("event_remove_timer_nolock: event: %p", ev));
+
+ /* If it's not pending on a timeout, we don't need to do anything. */
+ if (ev->ev_flags & EVLIST_TIMEOUT) {
+ event_queue_remove_timeout(base, ev);
+ evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
+ }
+
+ return (0);
+}
+
+int
+event_remove_timer(struct event *ev)
+{
+ int res;
+
+ if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
+ event_warnx("%s: event has no event_base set.", __func__);
+ return -1;
+ }
+
+ EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
+
+ res = event_remove_timer_nolock_(ev);
+
+ EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
+
+ return (res);
+}
+
+/* Implementation function to add an event. Works just like event_add,
+ * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
+ * we treat tv as an absolute time, not as an interval to add to the current
+ * time */
+int
+event_add_nolock_(struct event *ev, const struct timeval *tv,
+ int tv_is_absolute)
+{
+ struct event_base *base = ev->ev_base;
+ int res = 0;
+ int notify = 0;
+
+ EVENT_BASE_ASSERT_LOCKED(base);
+ event_debug_assert_is_setup_(ev);
+
+ event_debug((
+ "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
+ ev,
+ EV_SOCK_ARG(ev->ev_fd),
+ ev->ev_events & EV_READ ? "EV_READ " : " ",
+ ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
+ ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
+ tv ? "EV_TIMEOUT " : " ",
+ ev->ev_callback));
+
+ EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
+
+ if (ev->ev_flags & EVLIST_FINALIZING) {
+ /* XXXX debug */
+ return (-1);
+ }
+
+ /*
+ * prepare for timeout insertion further below, if we get a
+ * failure on any step, we should not change any state.
+ */
+ if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
+ if (min_heap_reserve_(&base->timeheap,
+ 1 + min_heap_size_(&base->timeheap)) == -1)
+ return (-1); /* ENOMEM == errno */
+ }
+
+ /* If the main thread is currently executing a signal event's
+ * callback, and we are not the main thread, then we want to wait
+ * until the callback is done before we mess with the event, or else
+ * we can race on ev_ncalls and ev_pncalls below. */
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ if (base->current_event == event_to_event_callback(ev) &&
+ (ev->ev_events & EV_SIGNAL)
+ && !EVBASE_IN_THREAD(base)) {
+ ++base->current_event_waiters;
+ EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
+ }
+#endif
+
+ if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
+ !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
+ if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
+ res = evmap_io_add_(base, ev->ev_fd, ev);
+ else if (ev->ev_events & EV_SIGNAL)
+ res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
+ if (res != -1)
+ event_queue_insert_inserted(base, ev);
+ if (res == 1) {
+ /* evmap says we need to notify the main thread. */
+ notify = 1;
+ res = 0;
+ }
+ }
+
+ /*
+ * we should change the timeout state only if the previous event
+ * addition succeeded.
+ */
+ if (res != -1 && tv != NULL) {
+ struct timeval now;
+ int common_timeout;
+#ifdef USE_REINSERT_TIMEOUT
+ int was_common;
+ int old_timeout_idx;
+#endif
+
+ /*
+ * for persistent timeout events, we remember the
+ * timeout value and re-add the event.
+ *
+ * If tv_is_absolute, this was already set.
+ */
+ if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
+ ev->ev_io_timeout = *tv;
+
+#ifndef USE_REINSERT_TIMEOUT
+ if (ev->ev_flags & EVLIST_TIMEOUT) {
+ event_queue_remove_timeout(base, ev);
+ }
+#endif
+
+ /* Check if it is active due to a timeout. Rescheduling
+ * this timeout before the callback can be executed
+ * removes it from the active list. */
+ if ((ev->ev_flags & EVLIST_ACTIVE) &&
+ (ev->ev_res & EV_TIMEOUT)) {
+ if (ev->ev_events & EV_SIGNAL) {
+ /* See if we are just active executing
+ * this event in a loop
+ */
+ if (ev->ev_ncalls && ev->ev_pncalls) {
+ /* Abort loop */
+ *ev->ev_pncalls = 0;
+ }
+ }
+
+ event_queue_remove_active(base, event_to_event_callback(ev));
+ }
+
+ gettime(base, &now);
+
+ common_timeout = is_common_timeout(tv, base);
+#ifdef USE_REINSERT_TIMEOUT
+ was_common = is_common_timeout(&ev->ev_timeout, base);
+ old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
+#endif
+
+ if (tv_is_absolute) {
+ ev->ev_timeout = *tv;
+ } else if (common_timeout) {
+ struct timeval tmp = *tv;
+ tmp.tv_usec &= MICROSECONDS_MASK;
+ evutil_timeradd(&now, &tmp, &ev->ev_timeout);
+ ev->ev_timeout.tv_usec |=
+ (tv->tv_usec & ~MICROSECONDS_MASK);
+ } else {
+ evutil_timeradd(&now, tv, &ev->ev_timeout);
+ }
+
+ event_debug((
+ "event_add: event %p, timeout in %d seconds %d useconds, call %p",
+ ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
+
+#ifdef USE_REINSERT_TIMEOUT
+ event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
+#else
+ event_queue_insert_timeout(base, ev);
+#endif
+
+ if (common_timeout) {
+ struct common_timeout_list *ctl =
+ get_common_timeout_list(base, &ev->ev_timeout);
+ if (ev == TAILQ_FIRST(&ctl->events)) {
+ common_timeout_schedule(ctl, &now, ev);
+ }
+ } else {
+ struct event* top = NULL;
+ /* See if the earliest timeout is now earlier than it
+ * was before: if so, we will need to tell the main
+ * thread to wake up earlier than it would otherwise.
+ * We double check the timeout of the top element to
+ * handle time distortions due to system suspension.
+ */
+ if (min_heap_elt_is_top_(ev))
+ notify = 1;
+ else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
+ evutil_timercmp(&top->ev_timeout, &now, <))
+ notify = 1;
+ }
+ }
+
+ /* if we are not in the right thread, we need to wake up the loop */
+ if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
+ evthread_notify_base(base);
+
+ event_debug_note_add_(ev);
+
+ return (res);
+}
+
+static int
+event_del_(struct event *ev, int blocking)
+{
+ int res;
+
+ if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
+ event_warnx("%s: event has no event_base set.", __func__);
+ return -1;
+ }
+
+ EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
+
+ res = event_del_nolock_(ev, blocking);
+
+ EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
+
+ return (res);
+}
+
+int
+event_del(struct event *ev)
+{
+ return event_del_(ev, EVENT_DEL_AUTOBLOCK);
+}
+
+int
+event_del_block(struct event *ev)
+{
+ return event_del_(ev, EVENT_DEL_BLOCK);
+}
+
+int
+event_del_noblock(struct event *ev)
+{
+ return event_del_(ev, EVENT_DEL_NOBLOCK);
+}
+
+/** Helper for event_del: always called with th_base_lock held.
+ *
+ * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
+ * EVEN_IF_FINALIZING} values. See those for more information.
+ */
+int
+event_del_nolock_(struct event *ev, int blocking)
+{
+ struct event_base *base;
+ int res = 0, notify = 0;
+
+ event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
+ ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
+
+ /* An event without a base has not been added */
+ if (ev->ev_base == NULL)
+ return (-1);
+
+ EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
+
+ if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
+ if (ev->ev_flags & EVLIST_FINALIZING) {
+ /* XXXX Debug */
+ return 0;
+ }
+ }
+
+ /* If the main thread is currently executing this event's callback,
+ * and we are not the main thread, then we want to wait until the
+ * callback is done before we start removing the event. That way,
+ * when this function returns, it will be safe to free the
+ * user-supplied argument. */
+ base = ev->ev_base;
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ if (blocking != EVENT_DEL_NOBLOCK &&
+ base->current_event == event_to_event_callback(ev) &&
+ !EVBASE_IN_THREAD(base) &&
+ (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
+ ++base->current_event_waiters;
+ EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
+ }
+#endif
+
+ EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
+
+ /* See if we are just active executing this event in a loop */
+ if (ev->ev_events & EV_SIGNAL) {
+ if (ev->ev_ncalls && ev->ev_pncalls) {
+ /* Abort loop */
+ *ev->ev_pncalls = 0;
+ }
+ }
+
+ if (ev->ev_flags & EVLIST_TIMEOUT) {
+ /* NOTE: We never need to notify the main thread because of a
+ * deleted timeout event: all that could happen if we don't is
+ * that the dispatch loop might wake up too early. But the
+ * point of notifying the main thread _is_ to wake up the
+ * dispatch loop early anyway, so we wouldn't gain anything by
+ * doing it.
+ */
+ event_queue_remove_timeout(base, ev);
+ }
+
+ if (ev->ev_flags & EVLIST_ACTIVE)
+ event_queue_remove_active(base, event_to_event_callback(ev));
+ else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
+ event_queue_remove_active_later(base, event_to_event_callback(ev));
+
+ if (ev->ev_flags & EVLIST_INSERTED) {
+ event_queue_remove_inserted(base, ev);
+ if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
+ res = evmap_io_del_(base, ev->ev_fd, ev);
+ else
+ res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
+ if (res == 1) {
+ /* evmap says we need to notify the main thread. */
+ notify = 1;
+ res = 0;
+ }
+ }
+
+ /* if we are not in the right thread, we need to wake up the loop */
+ if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
+ evthread_notify_base(base);
+
+ event_debug_note_del_(ev);
+
+ return (res);
+}
+
+void
+event_active(struct event *ev, int res, short ncalls)
+{
+ if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
+ event_warnx("%s: event has no event_base set.", __func__);
+ return;
+ }
+
+ EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
+
+ event_debug_assert_is_setup_(ev);
+
+ event_active_nolock_(ev, res, ncalls);
+
+ EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
+}
+
+
+void
+event_active_nolock_(struct event *ev, int res, short ncalls)
+{
+ struct event_base *base;
+
+ event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
+ ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
+
+ base = ev->ev_base;
+ EVENT_BASE_ASSERT_LOCKED(base);
+
+ if (ev->ev_flags & EVLIST_FINALIZING) {
+ /* XXXX debug */
+ return;
+ }
+
+ switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
+ default:
+ case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
+ EVUTIL_ASSERT(0);
+ break;
+ case EVLIST_ACTIVE:
+ /* We get different kinds of events, add them together */
+ ev->ev_res |= res;
+ return;
+ case EVLIST_ACTIVE_LATER:
+ ev->ev_res |= res;
+ break;
+ case 0:
+ ev->ev_res = res;
+ break;
+ }
+
+ if (ev->ev_pri < base->event_running_priority)
+ base->event_continue = 1;
+
+ if (ev->ev_events & EV_SIGNAL) {
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ if (base->current_event == event_to_event_callback(ev) &&
+ !EVBASE_IN_THREAD(base)) {
+ ++base->current_event_waiters;
+ EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
+ }
+#endif
+ ev->ev_ncalls = ncalls;
+ ev->ev_pncalls = NULL;
+ }
+
+ event_callback_activate_nolock_(base, event_to_event_callback(ev));
+}
+
+void
+event_active_later_(struct event *ev, int res)
+{
+ EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
+ event_active_later_nolock_(ev, res);
+ EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
+}
+
+void
+event_active_later_nolock_(struct event *ev, int res)
+{
+ struct event_base *base = ev->ev_base;
+ EVENT_BASE_ASSERT_LOCKED(base);
+
+ if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
+ /* We get different kinds of events, add them together */
+ ev->ev_res |= res;
+ return;
+ }
+
+ ev->ev_res = res;
+
+ event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
+}
+
+int
+event_callback_activate_(struct event_base *base,
+ struct event_callback *evcb)
+{
+ int r;
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ r = event_callback_activate_nolock_(base, evcb);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return r;
+}
+
+int
+event_callback_activate_nolock_(struct event_base *base,
+ struct event_callback *evcb)
+{
+ int r = 1;
+
+ if (evcb->evcb_flags & EVLIST_FINALIZING)
+ return 0;
+
+ switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
+ default:
+ EVUTIL_ASSERT(0);
+ case EVLIST_ACTIVE_LATER:
+ event_queue_remove_active_later(base, evcb);
+ r = 0;
+ break;
+ case EVLIST_ACTIVE:
+ return 0;
+ case 0:
+ break;
+ }
+
+ event_queue_insert_active(base, evcb);
+
+ if (EVBASE_NEED_NOTIFY(base))
+ evthread_notify_base(base);
+
+ return r;
+}
+
+int
+event_callback_activate_later_nolock_(struct event_base *base,
+ struct event_callback *evcb)
+{
+ if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
+ return 0;
+
+ event_queue_insert_active_later(base, evcb);
+ if (EVBASE_NEED_NOTIFY(base))
+ evthread_notify_base(base);
+ return 1;
+}
+
+void
+event_callback_init_(struct event_base *base,
+ struct event_callback *cb)
+{
+ memset(cb, 0, sizeof(*cb));
+ cb->evcb_pri = base->nactivequeues - 1;
+}
+
+int
+event_callback_cancel_(struct event_base *base,
+ struct event_callback *evcb)
+{
+ int r;
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ r = event_callback_cancel_nolock_(base, evcb, 0);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return r;
+}
+
+int
+event_callback_cancel_nolock_(struct event_base *base,
+ struct event_callback *evcb, int even_if_finalizing)
+{
+ if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
+ return 0;
+
+ if (evcb->evcb_flags & EVLIST_INIT)
+ return event_del_nolock_(event_callback_to_event(evcb),
+ even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
+
+ switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
+ default:
+ case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
+ EVUTIL_ASSERT(0);
+ break;
+ case EVLIST_ACTIVE:
+ /* We get different kinds of events, add them together */
+ event_queue_remove_active(base, evcb);
+ return 0;
+ case EVLIST_ACTIVE_LATER:
+ event_queue_remove_active_later(base, evcb);
+ break;
+ case 0:
+ break;
+ }
+
+ return 0;
+}
+
+void
+event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
+{
+ memset(cb, 0, sizeof(*cb));
+ cb->evcb_cb_union.evcb_selfcb = fn;
+ cb->evcb_arg = arg;
+ cb->evcb_pri = priority;
+ cb->evcb_closure = EV_CLOSURE_CB_SELF;
+}
+
+void
+event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
+{
+ cb->evcb_pri = priority;
+}
+
+void
+event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
+{
+ if (!base)
+ base = current_base;
+ event_callback_cancel_(base, cb);
+}
+
+#define MAX_DEFERREDS_QUEUED 32
+int
+event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
+{
+ int r = 1;
+ if (!base)
+ base = current_base;
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
+ r = event_callback_activate_later_nolock_(base, cb);
+ } else {
+ r = event_callback_activate_nolock_(base, cb);
+ if (r) {
+ ++base->n_deferreds_queued;
+ }
+ }
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return r;
+}
+
+static int
+timeout_next(struct event_base *base, struct timeval **tv_p)
+{
+ /* Caller must hold th_base_lock */
+ struct timeval now;
+ struct event *ev;
+ struct timeval *tv = *tv_p;
+ int res = 0;
+
+ ev = min_heap_top_(&base->timeheap);
+
+ if (ev == NULL) {
+ /* if no time-based events are active wait for I/O */
+ *tv_p = NULL;
+ goto out;
+ }
+
+ if (gettime(base, &now) == -1) {
+ res = -1;
+ goto out;
+ }
+
+ if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
+ evutil_timerclear(tv);
+ goto out;
+ }
+
+ evutil_timersub(&ev->ev_timeout, &now, tv);
+
+ EVUTIL_ASSERT(tv->tv_sec >= 0);
+ EVUTIL_ASSERT(tv->tv_usec >= 0);
+ event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
+
+out:
+ return (res);
+}
+
+/* Activate every event whose timeout has elapsed. */
+static void
+timeout_process(struct event_base *base)
+{
+ /* Caller must hold lock. */
+ struct timeval now;
+ struct event *ev;
+
+ if (min_heap_empty_(&base->timeheap)) {
+ return;
+ }
+
+ gettime(base, &now);
+
+ while ((ev = min_heap_top_(&base->timeheap))) {
+ if (evutil_timercmp(&ev->ev_timeout, &now, >))
+ break;
+
+ /* delete this event from the I/O queues */
+ event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
+
+ event_debug(("timeout_process: event: %p, call %p",
+ ev, ev->ev_callback));
+ event_active_nolock_(ev, EV_TIMEOUT, 1);
+ }
+}
+
+#if (EVLIST_INTERNAL >> 4) != 1
+#error "Mismatch for value of EVLIST_INTERNAL"
+#endif
+
+#ifndef MAX
+#define MAX(a,b) (((a)>(b))?(a):(b))
+#endif
+
+#define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
+
+/* These are a fancy way to spell
+ if (flags & EVLIST_INTERNAL)
+ base->event_count--/++;
+*/
+#define DECR_EVENT_COUNT(base,flags) \
+ ((base)->event_count -= (~((flags) >> 4) & 1))
+#define INCR_EVENT_COUNT(base,flags) do { \
+ ((base)->event_count += (~((flags) >> 4) & 1)); \
+ MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count); \
+} while (0)
+
+static void
+event_queue_remove_inserted(struct event_base *base, struct event *ev)
+{
+ EVENT_BASE_ASSERT_LOCKED(base);
+ if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
+ event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
+ ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
+ return;
+ }
+ DECR_EVENT_COUNT(base, ev->ev_flags);
+ ev->ev_flags &= ~EVLIST_INSERTED;
+}
+static void
+event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
+{
+ EVENT_BASE_ASSERT_LOCKED(base);
+ if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
+ event_errx(1, "%s: %p not on queue %x", __func__,
+ evcb, EVLIST_ACTIVE);
+ return;
+ }
+ DECR_EVENT_COUNT(base, evcb->evcb_flags);
+ evcb->evcb_flags &= ~EVLIST_ACTIVE;
+ base->event_count_active--;
+
+ TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
+ evcb, evcb_active_next);
+}
+static void
+event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
+{
+ EVENT_BASE_ASSERT_LOCKED(base);
+ if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
+ event_errx(1, "%s: %p not on queue %x", __func__,
+ evcb, EVLIST_ACTIVE_LATER);
+ return;
+ }
+ DECR_EVENT_COUNT(base, evcb->evcb_flags);
+ evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
+ base->event_count_active--;
+
+ TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
+}
+static void
+event_queue_remove_timeout(struct event_base *base, struct event *ev)
+{
+ EVENT_BASE_ASSERT_LOCKED(base);
+ if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
+ event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
+ ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
+ return;
+ }
+ DECR_EVENT_COUNT(base, ev->ev_flags);
+ ev->ev_flags &= ~EVLIST_TIMEOUT;
+
+ if (is_common_timeout(&ev->ev_timeout, base)) {
+ struct common_timeout_list *ctl =
+ get_common_timeout_list(base, &ev->ev_timeout);
+ TAILQ_REMOVE(&ctl->events, ev,
+ ev_timeout_pos.ev_next_with_common_timeout);
+ } else {
+ min_heap_erase_(&base->timeheap, ev);
+ }
+}
+
+#ifdef USE_REINSERT_TIMEOUT
+/* Remove and reinsert 'ev' into the timeout queue. */
+static void
+event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
+ int was_common, int is_common, int old_timeout_idx)
+{
+ struct common_timeout_list *ctl;
+ if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
+ event_queue_insert_timeout(base, ev);
+ return;
+ }
+
+ switch ((was_common<<1) | is_common) {
+ case 3: /* Changing from one common timeout to another */
+ ctl = base->common_timeout_queues[old_timeout_idx];
+ TAILQ_REMOVE(&ctl->events, ev,
+ ev_timeout_pos.ev_next_with_common_timeout);
+ ctl = get_common_timeout_list(base, &ev->ev_timeout);
+ insert_common_timeout_inorder(ctl, ev);
+ break;
+ case 2: /* Was common; is no longer common */
+ ctl = base->common_timeout_queues[old_timeout_idx];
+ TAILQ_REMOVE(&ctl->events, ev,
+ ev_timeout_pos.ev_next_with_common_timeout);
+ min_heap_push_(&base->timeheap, ev);
+ break;
+ case 1: /* Wasn't common; has become common. */
+ min_heap_erase_(&base->timeheap, ev);
+ ctl = get_common_timeout_list(base, &ev->ev_timeout);
+ insert_common_timeout_inorder(ctl, ev);
+ break;
+ case 0: /* was in heap; is still on heap. */
+ min_heap_adjust_(&base->timeheap, ev);
+ break;
+ default:
+ EVUTIL_ASSERT(0); /* unreachable */
+ break;
+ }
+}
+#endif
+
+/* Add 'ev' to the common timeout list in 'ev'. */
+static void
+insert_common_timeout_inorder(struct common_timeout_list *ctl,
+ struct event *ev)
+{
+ struct event *e;
+ /* By all logic, we should just be able to append 'ev' to the end of
+ * ctl->events, since the timeout on each 'ev' is set to {the common
+ * timeout} + {the time when we add the event}, and so the events
+ * should arrive in order of their timeeouts. But just in case
+ * there's some wacky threading issue going on, we do a search from
+ * the end of 'ev' to find the right insertion point.
+ */
+ TAILQ_FOREACH_REVERSE(e, &ctl->events,
+ event_list, ev_timeout_pos.ev_next_with_common_timeout) {
+ /* This timercmp is a little sneaky, since both ev and e have
+ * magic values in tv_usec. Fortunately, they ought to have
+ * the _same_ magic values in tv_usec. Let's assert for that.
+ */
+ EVUTIL_ASSERT(
+ is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
+ if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
+ TAILQ_INSERT_AFTER(&ctl->events, e, ev,
+ ev_timeout_pos.ev_next_with_common_timeout);
+ return;
+ }
+ }
+ TAILQ_INSERT_HEAD(&ctl->events, ev,
+ ev_timeout_pos.ev_next_with_common_timeout);
+}
+
+static void
+event_queue_insert_inserted(struct event_base *base, struct event *ev)
+{
+ EVENT_BASE_ASSERT_LOCKED(base);
+
+ if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
+ event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
+ ev, EV_SOCK_ARG(ev->ev_fd));
+ return;
+ }
+
+ INCR_EVENT_COUNT(base, ev->ev_flags);
+
+ ev->ev_flags |= EVLIST_INSERTED;
+}
+
+static void
+event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
+{
+ EVENT_BASE_ASSERT_LOCKED(base);
+
+ if (evcb->evcb_flags & EVLIST_ACTIVE) {
+ /* Double insertion is possible for active events */
+ return;
+ }
+
+ INCR_EVENT_COUNT(base, evcb->evcb_flags);
+
+ evcb->evcb_flags |= EVLIST_ACTIVE;
+
+ base->event_count_active++;
+ MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
+ EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
+ TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
+ evcb, evcb_active_next);
+}
+
+static void
+event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
+{
+ EVENT_BASE_ASSERT_LOCKED(base);
+ if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
+ /* Double insertion is possible */
+ return;
+ }
+
+ INCR_EVENT_COUNT(base, evcb->evcb_flags);
+ evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
+ base->event_count_active++;
+ MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
+ EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
+ TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
+}
+
+static void
+event_queue_insert_timeout(struct event_base *base, struct event *ev)
+{
+ EVENT_BASE_ASSERT_LOCKED(base);
+
+ if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
+ event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
+ ev, EV_SOCK_ARG(ev->ev_fd));
+ return;
+ }
+
+ INCR_EVENT_COUNT(base, ev->ev_flags);
+
+ ev->ev_flags |= EVLIST_TIMEOUT;
+
+ if (is_common_timeout(&ev->ev_timeout, base)) {
+ struct common_timeout_list *ctl =
+ get_common_timeout_list(base, &ev->ev_timeout);
+ insert_common_timeout_inorder(ctl, ev);
+ } else {
+ min_heap_push_(&base->timeheap, ev);
+ }
+}
+
+static void
+event_queue_make_later_events_active(struct event_base *base)
+{
+ struct event_callback *evcb;
+ EVENT_BASE_ASSERT_LOCKED(base);
+
+ while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
+ TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
+ evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
+ EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
+ TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
+ base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
+ }
+}
+
+/* Functions for debugging */
+
+const char *
+event_get_version(void)
+{
+ return (EVENT__VERSION);
+}
+
+ev_uint32_t
+event_get_version_number(void)
+{
+ return (EVENT__NUMERIC_VERSION);
+}
+
+/*
+ * No thread-safe interface needed - the information should be the same
+ * for all threads.
+ */
+
+const char *
+event_get_method(void)
+{
+ return (current_base->evsel->name);
+}
+
+#ifndef EVENT__DISABLE_MM_REPLACEMENT
+static void *(*mm_malloc_fn_)(size_t sz) = NULL;
+static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
+static void (*mm_free_fn_)(void *p) = NULL;
+
+void *
+event_mm_malloc_(size_t sz)
+{
+ if (sz == 0)
+ return NULL;
+
+ if (mm_malloc_fn_)
+ return mm_malloc_fn_(sz);
+ else
+ return malloc(sz);
+}
+
+void *
+event_mm_calloc_(size_t count, size_t size)
+{
+ if (count == 0 || size == 0)
+ return NULL;
+
+ if (mm_malloc_fn_) {
+ size_t sz = count * size;
+ void *p = NULL;
+ if (count > EV_SIZE_MAX / size)
+ goto error;
+ p = mm_malloc_fn_(sz);
+ if (p)
+ return memset(p, 0, sz);
+ } else {
+ void *p = calloc(count, size);
+#ifdef _WIN32
+ /* Windows calloc doesn't reliably set ENOMEM */
+ if (p == NULL)
+ goto error;
+#endif
+ return p;
+ }
+
+error:
+ errno = ENOMEM;
+ return NULL;
+}
+
+char *
+event_mm_strdup_(const char *str)
+{
+ if (!str) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ if (mm_malloc_fn_) {
+ size_t ln = strlen(str);
+ void *p = NULL;
+ if (ln == EV_SIZE_MAX)
+ goto error;
+ p = mm_malloc_fn_(ln+1);
+ if (p)
+ return memcpy(p, str, ln+1);
+ } else
+#ifdef _WIN32
+ return _strdup(str);
+#else
+ return strdup(str);
+#endif
+
+error:
+ errno = ENOMEM;
+ return NULL;
+}
+
+void *
+event_mm_realloc_(void *ptr, size_t sz)
+{
+ if (mm_realloc_fn_)
+ return mm_realloc_fn_(ptr, sz);
+ else
+ return realloc(ptr, sz);
+}
+
+void
+event_mm_free_(void *ptr)
+{
+ if (mm_free_fn_)
+ mm_free_fn_(ptr);
+ else
+ free(ptr);
+}
+
+void
+event_set_mem_functions(void *(*malloc_fn)(size_t sz),
+ void *(*realloc_fn)(void *ptr, size_t sz),
+ void (*free_fn)(void *ptr))
+{
+ mm_malloc_fn_ = malloc_fn;
+ mm_realloc_fn_ = realloc_fn;
+ mm_free_fn_ = free_fn;
+}
+#endif
+
+#ifdef EVENT__HAVE_EVENTFD
+static void
+evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
+{
+ ev_uint64_t msg;
+ ev_ssize_t r;
+ struct event_base *base = arg;
+
+ r = read(fd, (void*) &msg, sizeof(msg));
+ if (r<0 && errno != EAGAIN) {
+ event_sock_warn(fd, "Error reading from eventfd");
+ }
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ base->is_notify_pending = 0;
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+#endif
+
+static void
+evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
+{
+ unsigned char buf[1024];
+ struct event_base *base = arg;
+#ifdef _WIN32
+ while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
+ ;
+#else
+ while (read(fd, (char*)buf, sizeof(buf)) > 0)
+ ;
+#endif
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ base->is_notify_pending = 0;
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+int
+evthread_make_base_notifiable(struct event_base *base)
+{
+ int r;
+ if (!base)
+ return -1;
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ r = evthread_make_base_notifiable_nolock_(base);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return r;
+}
+
+static int
+evthread_make_base_notifiable_nolock_(struct event_base *base)
+{
+ void (*cb)(evutil_socket_t, short, void *);
+ int (*notify)(struct event_base *);
+
+ if (base->th_notify_fn != NULL) {
+ /* The base is already notifiable: we're doing fine. */
+ return 0;
+ }
+
+#if defined(EVENT__HAVE_WORKING_KQUEUE)
+ if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
+ base->th_notify_fn = event_kq_notify_base_;
+ /* No need to add an event here; the backend can wake
+ * itself up just fine. */
+ return 0;
+ }
+#endif
+
+#ifdef EVENT__HAVE_EVENTFD
+ base->th_notify_fd[0] = evutil_eventfd_(0,
+ EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
+ if (base->th_notify_fd[0] >= 0) {
+ base->th_notify_fd[1] = -1;
+ notify = evthread_notify_base_eventfd;
+ cb = evthread_notify_drain_eventfd;
+ } else
+#endif
+ if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
+ notify = evthread_notify_base_default;
+ cb = evthread_notify_drain_default;
+ } else {
+ return -1;
+ }
+
+ base->th_notify_fn = notify;
+
+ /* prepare an event that we can use for wakeup */
+ event_assign(&base->th_notify, base, base->th_notify_fd[0],
+ EV_READ|EV_PERSIST, cb, base);
+
+ /* we need to mark this as internal event */
+ base->th_notify.ev_flags |= EVLIST_INTERNAL;
+ event_priority_set(&base->th_notify, 0);
+
+ return event_add_nolock_(&base->th_notify, NULL, 0);
+}
+
+int
+event_base_foreach_event_nolock_(struct event_base *base,
+ event_base_foreach_event_cb fn, void *arg)
+{
+ int r, i;
+ unsigned u;
+ struct event *ev;
+
+ /* Start out with all the EVLIST_INSERTED events. */
+ if ((r = evmap_foreach_event_(base, fn, arg)))
+ return r;
+
+ /* Okay, now we deal with those events that have timeouts and are in
+ * the min-heap. */
+ for (u = 0; u < base->timeheap.n; ++u) {
+ ev = base->timeheap.p[u];
+ if (ev->ev_flags & EVLIST_INSERTED) {
+ /* we already processed this one */
+ continue;
+ }
+ if ((r = fn(base, ev, arg)))
+ return r;
+ }
+
+ /* Now for the events in one of the timeout queues.
+ * the min-heap. */
+ for (i = 0; i < base->n_common_timeouts; ++i) {
+ struct common_timeout_list *ctl =
+ base->common_timeout_queues[i];
+ TAILQ_FOREACH(ev, &ctl->events,
+ ev_timeout_pos.ev_next_with_common_timeout) {
+ if (ev->ev_flags & EVLIST_INSERTED) {
+ /* we already processed this one */
+ continue;
+ }
+ if ((r = fn(base, ev, arg)))
+ return r;
+ }
+ }
+
+ /* Finally, we deal wit all the active events that we haven't touched
+ * yet. */
+ for (i = 0; i < base->nactivequeues; ++i) {
+ struct event_callback *evcb;
+ TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
+ if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
+ /* This isn't an event (evlist_init clear), or
+ * we already processed it. (inserted or
+ * timeout set */
+ continue;
+ }
+ ev = event_callback_to_event(evcb);
+ if ((r = fn(base, ev, arg)))
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+/* Helper for event_base_dump_events: called on each event in the event base;
+ * dumps only the inserted events. */
+static int
+dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
+{
+ FILE *output = arg;
+ const char *gloss = (e->ev_events & EV_SIGNAL) ?
+ "sig" : "fd ";
+
+ if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
+ return 0;
+
+ fprintf(output, " %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s",
+ (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
+ (e->ev_events&EV_READ)?" Read":"",
+ (e->ev_events&EV_WRITE)?" Write":"",
+ (e->ev_events&EV_CLOSED)?" EOF":"",
+ (e->ev_events&EV_SIGNAL)?" Signal":"",
+ (e->ev_events&EV_PERSIST)?" Persist":"",
+ (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
+ if (e->ev_flags & EVLIST_TIMEOUT) {
+ struct timeval tv;
+ tv.tv_sec = e->ev_timeout.tv_sec;
+ tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
+ evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
+ fprintf(output, " Timeout=%ld.%06d",
+ (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
+ }
+ fputc('\n', output);
+
+ return 0;
+}
+
+/* Helper for event_base_dump_events: called on each event in the event base;
+ * dumps only the active events. */
+static int
+dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
+{
+ FILE *output = arg;
+ const char *gloss = (e->ev_events & EV_SIGNAL) ?
+ "sig" : "fd ";
+
+ if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
+ return 0;
+
+ fprintf(output, " %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
+ (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
+ (e->ev_res&EV_READ)?" Read":"",
+ (e->ev_res&EV_WRITE)?" Write":"",
+ (e->ev_res&EV_CLOSED)?" EOF":"",
+ (e->ev_res&EV_SIGNAL)?" Signal":"",
+ (e->ev_res&EV_TIMEOUT)?" Timeout":"",
+ (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
+ (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
+
+ return 0;
+}
+
+int
+event_base_foreach_event(struct event_base *base,
+ event_base_foreach_event_cb fn, void *arg)
+{
+ int r;
+ if ((!fn) || (!base)) {
+ return -1;
+ }
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ r = event_base_foreach_event_nolock_(base, fn, arg);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ return r;
+}
+
+
+void
+event_base_dump_events(struct event_base *base, FILE *output)
+{
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ fprintf(output, "Inserted events:\n");
+ event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
+
+ fprintf(output, "Active events:\n");
+ event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+void
+event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
+{
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+void
+event_base_active_by_signal(struct event_base *base, int sig)
+{
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ evmap_signal_active_(base, sig, 1);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+
+void
+event_base_add_virtual_(struct event_base *base)
+{
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ base->virtual_event_count++;
+ MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+void
+event_base_del_virtual_(struct event_base *base)
+{
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ EVUTIL_ASSERT(base->virtual_event_count > 0);
+ base->virtual_event_count--;
+ if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
+ evthread_notify_base(base);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+static void
+event_free_debug_globals_locks(void)
+{
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+#ifndef EVENT__DISABLE_DEBUG_MODE
+ if (event_debug_map_lock_ != NULL) {
+ EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
+ event_debug_map_lock_ = NULL;
+ evthreadimpl_disable_lock_debugging_();
+ }
+#endif /* EVENT__DISABLE_DEBUG_MODE */
+#endif /* EVENT__DISABLE_THREAD_SUPPORT */
+ return;
+}
+
+static void
+event_free_debug_globals(void)
+{
+ event_free_debug_globals_locks();
+}
+
+static void
+event_free_evsig_globals(void)
+{
+ evsig_free_globals_();
+}
+
+static void
+event_free_evutil_globals(void)
+{
+ evutil_free_globals_();
+}
+
+static void
+event_free_globals(void)
+{
+ event_free_debug_globals();
+ event_free_evsig_globals();
+ event_free_evutil_globals();
+}
+
+void
+libevent_global_shutdown(void)
+{
+ event_disable_debug_mode();
+ event_free_globals();
+}
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+int
+event_global_setup_locks_(const int enable_locks)
+{
+#ifndef EVENT__DISABLE_DEBUG_MODE
+ EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
+#endif
+ if (evsig_global_setup_locks_(enable_locks) < 0)
+ return -1;
+ if (evutil_global_setup_locks_(enable_locks) < 0)
+ return -1;
+ if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
+ return -1;
+ return 0;
+}
+#endif
+
+void
+event_base_assert_ok_(struct event_base *base)
+{
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ event_base_assert_ok_nolock_(base);
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+void
+event_base_assert_ok_nolock_(struct event_base *base)
+{
+ int i;
+ int count;
+
+ /* First do checks on the per-fd and per-signal lists */
+ evmap_check_integrity_(base);
+
+ /* Check the heap property */
+ for (i = 1; i < (int)base->timeheap.n; ++i) {
+ int parent = (i - 1) / 2;
+ struct event *ev, *p_ev;
+ ev = base->timeheap.p[i];
+ p_ev = base->timeheap.p[parent];
+ EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
+ EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
+ EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
+ }
+
+ /* Check that the common timeouts are fine */
+ for (i = 0; i < base->n_common_timeouts; ++i) {
+ struct common_timeout_list *ctl = base->common_timeout_queues[i];
+ struct event *last=NULL, *ev;
+
+ EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
+
+ TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
+ if (last)
+ EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
+ EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
+ EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
+ EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
+ last = ev;
+ }
+ }
+
+ /* Check the active queues. */
+ count = 0;
+ for (i = 0; i < base->nactivequeues; ++i) {
+ struct event_callback *evcb;
+ EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
+ TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
+ EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
+ EVUTIL_ASSERT(evcb->evcb_pri == i);
+ ++count;
+ }
+ }
+
+ {
+ struct event_callback *evcb;
+ TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
+ EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
+ ++count;
+ }
+ }
+ EVUTIL_ASSERT(count == base->event_count_active);
+}
diff --git a/libs/libevent/src/event_iocp.c b/libs/libevent/src/event_iocp.c
new file mode 100644
index 0000000000..a9902fbc42
--- /dev/null
+++ b/libs/libevent/src/event_iocp.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos, Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "evconfig-private.h"
+
+#ifndef _WIN32_WINNT
+/* Minimum required for InitializeCriticalSectionAndSpinCount */
+#define _WIN32_WINNT 0x0403
+#endif
+#include <winsock2.h>
+#include <windows.h>
+#include <process.h>
+#include <stdio.h>
+#include <mswsock.h>
+
+#include "event2/util.h"
+#include "util-internal.h"
+#include "iocp-internal.h"
+#include "log-internal.h"
+#include "mm-internal.h"
+#include "event-internal.h"
+#include "evthread-internal.h"
+
+#define NOTIFICATION_KEY ((ULONG_PTR)-1)
+
+void
+event_overlapped_init_(struct event_overlapped *o, iocp_callback cb)
+{
+ memset(o, 0, sizeof(struct event_overlapped));
+ o->cb = cb;
+}
+
+static void
+handle_entry(OVERLAPPED *o, ULONG_PTR completion_key, DWORD nBytes, int ok)
+{
+ struct event_overlapped *eo =
+ EVUTIL_UPCAST(o, struct event_overlapped, overlapped);
+ eo->cb(eo, completion_key, nBytes, ok);
+}
+
+static void
+loop(void *port_)
+{
+ struct event_iocp_port *port = port_;
+ long ms = port->ms;
+ HANDLE p = port->port;
+
+ if (ms <= 0)
+ ms = INFINITE;
+
+ while (1) {
+ OVERLAPPED *overlapped=NULL;
+ ULONG_PTR key=0;
+ DWORD bytes=0;
+ int ok = GetQueuedCompletionStatus(p, &bytes, &key,
+ &overlapped, ms);
+ EnterCriticalSection(&port->lock);
+ if (port->shutdown) {
+ if (--port->n_live_threads == 0)
+ ReleaseSemaphore(port->shutdownSemaphore, 1,
+ NULL);
+ LeaveCriticalSection(&port->lock);
+ return;
+ }
+ LeaveCriticalSection(&port->lock);
+
+ if (key != NOTIFICATION_KEY && overlapped)
+ handle_entry(overlapped, key, bytes, ok);
+ else if (!overlapped)
+ break;
+ }
+ event_warnx("GetQueuedCompletionStatus exited with no event.");
+ EnterCriticalSection(&port->lock);
+ if (--port->n_live_threads == 0)
+ ReleaseSemaphore(port->shutdownSemaphore, 1, NULL);
+ LeaveCriticalSection(&port->lock);
+}
+
+int
+event_iocp_port_associate_(struct event_iocp_port *port, evutil_socket_t fd,
+ ev_uintptr_t key)
+{
+ HANDLE h;
+ h = CreateIoCompletionPort((HANDLE)fd, port->port, key, port->n_threads);
+ if (!h)
+ return -1;
+ return 0;
+}
+
+static void *
+get_extension_function(SOCKET s, const GUID *which_fn)
+{
+ void *ptr = NULL;
+ DWORD bytes=0;
+ WSAIoctl(s, SIO_GET_EXTENSION_FUNCTION_POINTER,
+ (GUID*)which_fn, sizeof(*which_fn),
+ &ptr, sizeof(ptr),
+ &bytes, NULL, NULL);
+
+ /* No need to detect errors here: if ptr is set, then we have a good
+ function pointer. Otherwise, we should behave as if we had no
+ function pointer.
+ */
+ return ptr;
+}
+
+/* Mingw doesn't have these in its mswsock.h. The values are copied from
+ wine.h. Perhaps if we copy them exactly, the cargo will come again.
+*/
+#ifndef WSAID_ACCEPTEX
+#define WSAID_ACCEPTEX \
+ {0xb5367df1,0xcbac,0x11cf,{0x95,0xca,0x00,0x80,0x5f,0x48,0xa1,0x92}}
+#endif
+#ifndef WSAID_CONNECTEX
+#define WSAID_CONNECTEX \
+ {0x25a207b9,0xddf3,0x4660,{0x8e,0xe9,0x76,0xe5,0x8c,0x74,0x06,0x3e}}
+#endif
+#ifndef WSAID_GETACCEPTEXSOCKADDRS
+#define WSAID_GETACCEPTEXSOCKADDRS \
+ {0xb5367df2,0xcbac,0x11cf,{0x95,0xca,0x00,0x80,0x5f,0x48,0xa1,0x92}}
+#endif
+
+static int extension_fns_initialized = 0;
+
+static void
+init_extension_functions(struct win32_extension_fns *ext)
+{
+ const GUID acceptex = WSAID_ACCEPTEX;
+ const GUID connectex = WSAID_CONNECTEX;
+ const GUID getacceptexsockaddrs = WSAID_GETACCEPTEXSOCKADDRS;
+ SOCKET s = socket(AF_INET, SOCK_STREAM, 0);
+ if (s == INVALID_SOCKET)
+ return;
+ ext->AcceptEx = get_extension_function(s, &acceptex);
+ ext->ConnectEx = get_extension_function(s, &connectex);
+ ext->GetAcceptExSockaddrs = get_extension_function(s,
+ &getacceptexsockaddrs);
+ closesocket(s);
+
+ extension_fns_initialized = 1;
+}
+
+static struct win32_extension_fns the_extension_fns;
+
+const struct win32_extension_fns *
+event_get_win32_extension_fns_(void)
+{
+ return &the_extension_fns;
+}
+
+#define N_CPUS_DEFAULT 2
+
+struct event_iocp_port *
+event_iocp_port_launch_(int n_cpus)
+{
+ struct event_iocp_port *port;
+ int i;
+
+ if (!extension_fns_initialized)
+ init_extension_functions(&the_extension_fns);
+
+ if (!(port = mm_calloc(1, sizeof(struct event_iocp_port))))
+ return NULL;
+
+ if (n_cpus <= 0)
+ n_cpus = N_CPUS_DEFAULT;
+ port->n_threads = n_cpus * 2;
+ port->threads = mm_calloc(port->n_threads, sizeof(HANDLE));
+ if (!port->threads)
+ goto err;
+
+ port->port = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0,
+ n_cpus);
+ port->ms = -1;
+ if (!port->port)
+ goto err;
+
+ port->shutdownSemaphore = CreateSemaphore(NULL, 0, 1, NULL);
+ if (!port->shutdownSemaphore)
+ goto err;
+
+ for (i=0; i<port->n_threads; ++i) {
+ ev_uintptr_t th = _beginthread(loop, 0, port);
+ if (th == (ev_uintptr_t)-1)
+ goto err;
+ port->threads[i] = (HANDLE)th;
+ ++port->n_live_threads;
+ }
+
+ InitializeCriticalSectionAndSpinCount(&port->lock, 1000);
+
+ return port;
+err:
+ if (port->port)
+ CloseHandle(port->port);
+ if (port->threads)
+ mm_free(port->threads);
+ if (port->shutdownSemaphore)
+ CloseHandle(port->shutdownSemaphore);
+ mm_free(port);
+ return NULL;
+}
+
+static void
+event_iocp_port_unlock_and_free_(struct event_iocp_port *port)
+{
+ DeleteCriticalSection(&port->lock);
+ CloseHandle(port->port);
+ CloseHandle(port->shutdownSemaphore);
+ mm_free(port->threads);
+ mm_free(port);
+}
+
+static int
+event_iocp_notify_all(struct event_iocp_port *port)
+{
+ int i, r, ok=1;
+ for (i=0; i<port->n_threads; ++i) {
+ r = PostQueuedCompletionStatus(port->port, 0, NOTIFICATION_KEY,
+ NULL);
+ if (!r)
+ ok = 0;
+ }
+ return ok ? 0 : -1;
+}
+
+int
+event_iocp_shutdown_(struct event_iocp_port *port, long waitMsec)
+{
+ DWORD ms = INFINITE;
+ int n;
+
+ EnterCriticalSection(&port->lock);
+ port->shutdown = 1;
+ LeaveCriticalSection(&port->lock);
+ event_iocp_notify_all(port);
+
+ if (waitMsec >= 0)
+ ms = waitMsec;
+
+ WaitForSingleObject(port->shutdownSemaphore, ms);
+ EnterCriticalSection(&port->lock);
+ n = port->n_live_threads;
+ LeaveCriticalSection(&port->lock);
+ if (n == 0) {
+ event_iocp_port_unlock_and_free_(port);
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+int
+event_iocp_activate_overlapped_(
+ struct event_iocp_port *port, struct event_overlapped *o,
+ ev_uintptr_t key, ev_uint32_t n)
+{
+ BOOL r;
+
+ r = PostQueuedCompletionStatus(port->port, n, key, &o->overlapped);
+ return (r==0) ? -1 : 0;
+}
+
+struct event_iocp_port *
+event_base_get_iocp_(struct event_base *base)
+{
+#ifdef _WIN32
+ return base->iocp;
+#else
+ return NULL;
+#endif
+}
diff --git a/libs/libevent/src/event_tagging.c b/libs/libevent/src/event_tagging.c
new file mode 100644
index 0000000000..6459dfa72e
--- /dev/null
+++ b/libs/libevent/src/event_tagging.c
@@ -0,0 +1,605 @@
+/*
+ * Copyright (c) 2003-2009 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef EVENT__HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef EVENT__HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+
+#ifdef _WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <winsock2.h>
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+
+#ifdef EVENT__HAVE_SYS_IOCTL_H
+#include <sys/ioctl.h>
+#endif
+#include <sys/queue.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef _WIN32
+#include <syslog.h>
+#endif
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#include <limits.h>
+
+#include "event2/event.h"
+#include "event2/tag.h"
+#include "event2/buffer.h"
+#include "log-internal.h"
+#include "mm-internal.h"
+#include "util-internal.h"
+
+/*
+ Here's our wire format:
+
+ Stream = TaggedData*
+
+ TaggedData = Tag Length Data
+ where the integer value of 'Length' is the length of 'data'.
+
+ Tag = HByte* LByte
+ where HByte is a byte with the high bit set, and LByte is a byte
+ with the high bit clear. The integer value of the tag is taken
+ by concatenating the lower 7 bits from all the tags. So for example,
+ the tag 0x66 is encoded as [66], whereas the tag 0x166 is encoded as
+ [82 66]
+
+ Length = Integer
+
+ Integer = NNibbles Nibble* Padding?
+ where NNibbles is a 4-bit value encoding the number of nibbles-1,
+ and each Nibble is 4 bits worth of encoded integer, in big-endian
+ order. If the total encoded integer size is an odd number of nibbles,
+ a final padding nibble with value 0 is appended.
+*/
+
+int evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf);
+int evtag_decode_int64(ev_uint64_t *pnumber, struct evbuffer *evbuf);
+int evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t tag);
+int evtag_decode_tag(ev_uint32_t *ptag, struct evbuffer *evbuf);
+
+void
+evtag_init(void)
+{
+}
+
+/*
+ * We encode integers by nibbles; the first nibble contains the number
+ * of significant nibbles - 1; this allows us to encode up to 64-bit
+ * integers. This function is byte-order independent.
+ *
+ * @param number a 32-bit unsigned integer to encode
+ * @param data a pointer to where the data should be written. Must
+ * have at least 5 bytes free.
+ * @return the number of bytes written into data.
+ */
+
+#define ENCODE_INT_INTERNAL(data, number) do { \
+ int off = 1, nibbles = 0; \
+ \
+ memset(data, 0, sizeof(number)+1); \
+ while (number) { \
+ if (off & 0x1) \
+ data[off/2] = (data[off/2] & 0xf0) | (number & 0x0f); \
+ else \
+ data[off/2] = (data[off/2] & 0x0f) | \
+ ((number & 0x0f) << 4); \
+ number >>= 4; \
+ off++; \
+ } \
+ \
+ if (off > 2) \
+ nibbles = off - 2; \
+ \
+ /* Off - 1 is the number of encoded nibbles */ \
+ data[0] = (data[0] & 0x0f) | ((nibbles & 0x0f) << 4); \
+ \
+ return ((off + 1) / 2); \
+} while (0)
+
+static inline int
+encode_int_internal(ev_uint8_t *data, ev_uint32_t number)
+{
+ ENCODE_INT_INTERNAL(data, number);
+}
+
+static inline int
+encode_int64_internal(ev_uint8_t *data, ev_uint64_t number)
+{
+ ENCODE_INT_INTERNAL(data, number);
+}
+
+void
+evtag_encode_int(struct evbuffer *evbuf, ev_uint32_t number)
+{
+ ev_uint8_t data[5];
+ int len = encode_int_internal(data, number);
+ evbuffer_add(evbuf, data, len);
+}
+
+void
+evtag_encode_int64(struct evbuffer *evbuf, ev_uint64_t number)
+{
+ ev_uint8_t data[9];
+ int len = encode_int64_internal(data, number);
+ evbuffer_add(evbuf, data, len);
+}
+
+/*
+ * Support variable length encoding of tags; we use the high bit in each
+ * octet as a continuation signal.
+ */
+
+int
+evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t tag)
+{
+ int bytes = 0;
+ ev_uint8_t data[5];
+
+ memset(data, 0, sizeof(data));
+ do {
+ ev_uint8_t lower = tag & 0x7f;
+ tag >>= 7;
+
+ if (tag)
+ lower |= 0x80;
+
+ data[bytes++] = lower;
+ } while (tag);
+
+ if (evbuf != NULL)
+ evbuffer_add(evbuf, data, bytes);
+
+ return (bytes);
+}
+
+static int
+decode_tag_internal(ev_uint32_t *ptag, struct evbuffer *evbuf, int dodrain)
+{
+ ev_uint32_t number = 0;
+ size_t len = evbuffer_get_length(evbuf);
+ ev_uint8_t *data;
+ size_t count = 0;
+ int shift = 0, done = 0;
+
+ /*
+ * the encoding of a number is at most one byte more than its
+ * storage size. however, it may also be much smaller.
+ */
+ data = evbuffer_pullup(
+ evbuf, len < sizeof(number) + 1 ? len : sizeof(number) + 1);
+ if (!data)
+ return (-1);
+
+ while (count++ < len) {
+ ev_uint8_t lower = *data++;
+ if (shift >= 28) {
+ /* Make sure it fits into 32 bits */
+ if (shift > 28)
+ return (-1);
+ if ((lower & 0x7f) > 15)
+ return (-1);
+ }
+ number |= (lower & (unsigned)0x7f) << shift;
+ shift += 7;
+
+ if (!(lower & 0x80)) {
+ done = 1;
+ break;
+ }
+ }
+
+ if (!done)
+ return (-1);
+
+ if (dodrain)
+ evbuffer_drain(evbuf, count);
+
+ if (ptag != NULL)
+ *ptag = number;
+
+ return count > INT_MAX ? INT_MAX : (int)(count);
+}
+
+int
+evtag_decode_tag(ev_uint32_t *ptag, struct evbuffer *evbuf)
+{
+ return (decode_tag_internal(ptag, evbuf, 1 /* dodrain */));
+}
+
+/*
+ * Marshal a data type, the general format is as follows:
+ *
+ * tag number: one byte; length: var bytes; payload: var bytes
+ */
+
+void
+evtag_marshal(struct evbuffer *evbuf, ev_uint32_t tag,
+ const void *data, ev_uint32_t len)
+{
+ evtag_encode_tag(evbuf, tag);
+ evtag_encode_int(evbuf, len);
+ evbuffer_add(evbuf, (void *)data, len);
+}
+
+void
+evtag_marshal_buffer(struct evbuffer *evbuf, ev_uint32_t tag,
+ struct evbuffer *data)
+{
+ evtag_encode_tag(evbuf, tag);
+ /* XXX support more than UINT32_MAX data */
+ evtag_encode_int(evbuf, (ev_uint32_t)evbuffer_get_length(data));
+ evbuffer_add_buffer(evbuf, data);
+}
+
+/* Marshaling for integers */
+void
+evtag_marshal_int(struct evbuffer *evbuf, ev_uint32_t tag, ev_uint32_t integer)
+{
+ ev_uint8_t data[5];
+ int len = encode_int_internal(data, integer);
+
+ evtag_encode_tag(evbuf, tag);
+ evtag_encode_int(evbuf, len);
+ evbuffer_add(evbuf, data, len);
+}
+
+void
+evtag_marshal_int64(struct evbuffer *evbuf, ev_uint32_t tag,
+ ev_uint64_t integer)
+{
+ ev_uint8_t data[9];
+ int len = encode_int64_internal(data, integer);
+
+ evtag_encode_tag(evbuf, tag);
+ evtag_encode_int(evbuf, len);
+ evbuffer_add(evbuf, data, len);
+}
+
+void
+evtag_marshal_string(struct evbuffer *buf, ev_uint32_t tag, const char *string)
+{
+ /* TODO support strings longer than UINT32_MAX ? */
+ evtag_marshal(buf, tag, string, (ev_uint32_t)strlen(string));
+}
+
+void
+evtag_marshal_timeval(struct evbuffer *evbuf, ev_uint32_t tag, struct timeval *tv)
+{
+ ev_uint8_t data[10];
+ int len = encode_int_internal(data, tv->tv_sec);
+ len += encode_int_internal(data + len, tv->tv_usec);
+ evtag_marshal(evbuf, tag, data, len);
+}
+
+#define DECODE_INT_INTERNAL(number, maxnibbles, pnumber, evbuf, offset) \
+do { \
+ ev_uint8_t *data; \
+ ev_ssize_t len = evbuffer_get_length(evbuf) - offset; \
+ int nibbles = 0; \
+ \
+ if (len <= 0) \
+ return (-1); \
+ \
+ /* XXX(niels): faster? */ \
+ data = evbuffer_pullup(evbuf, offset + 1) + offset; \
+ if (!data) \
+ return (-1); \
+ \
+ nibbles = ((data[0] & 0xf0) >> 4) + 1; \
+ if (nibbles > maxnibbles || (nibbles >> 1) + 1 > len) \
+ return (-1); \
+ len = (nibbles >> 1) + 1; \
+ \
+ data = evbuffer_pullup(evbuf, offset + len) + offset; \
+ if (!data) \
+ return (-1); \
+ \
+ while (nibbles > 0) { \
+ number <<= 4; \
+ if (nibbles & 0x1) \
+ number |= data[nibbles >> 1] & 0x0f; \
+ else \
+ number |= (data[nibbles >> 1] & 0xf0) >> 4; \
+ nibbles--; \
+ } \
+ \
+ *pnumber = number; \
+ \
+ return (int)(len); \
+} while (0)
+
+/* Internal: decode an integer from an evbuffer, without draining it.
+ * Only integers up to 32-bits are supported.
+ *
+ * @param evbuf the buffer to read from
+ * @param offset an index into the buffer at which we should start reading.
+ * @param pnumber a pointer to receive the integer.
+ * @return The length of the number as encoded, or -1 on error.
+ */
+
+static int
+decode_int_internal(ev_uint32_t *pnumber, struct evbuffer *evbuf, int offset)
+{
+ ev_uint32_t number = 0;
+ DECODE_INT_INTERNAL(number, 8, pnumber, evbuf, offset);
+}
+
+static int
+decode_int64_internal(ev_uint64_t *pnumber, struct evbuffer *evbuf, int offset)
+{
+ ev_uint64_t number = 0;
+ DECODE_INT_INTERNAL(number, 16, pnumber, evbuf, offset);
+}
+
+int
+evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf)
+{
+ int res = decode_int_internal(pnumber, evbuf, 0);
+ if (res != -1)
+ evbuffer_drain(evbuf, res);
+
+ return (res == -1 ? -1 : 0);
+}
+
+int
+evtag_decode_int64(ev_uint64_t *pnumber, struct evbuffer *evbuf)
+{
+ int res = decode_int64_internal(pnumber, evbuf, 0);
+ if (res != -1)
+ evbuffer_drain(evbuf, res);
+
+ return (res == -1 ? -1 : 0);
+}
+
+int
+evtag_peek(struct evbuffer *evbuf, ev_uint32_t *ptag)
+{
+ return (decode_tag_internal(ptag, evbuf, 0 /* dodrain */));
+}
+
+int
+evtag_peek_length(struct evbuffer *evbuf, ev_uint32_t *plength)
+{
+ int res, len;
+
+ len = decode_tag_internal(NULL, evbuf, 0 /* dodrain */);
+ if (len == -1)
+ return (-1);
+
+ res = decode_int_internal(plength, evbuf, len);
+ if (res == -1)
+ return (-1);
+
+ *plength += res + len;
+
+ return (0);
+}
+
+int
+evtag_payload_length(struct evbuffer *evbuf, ev_uint32_t *plength)
+{
+ int res, len;
+
+ len = decode_tag_internal(NULL, evbuf, 0 /* dodrain */);
+ if (len == -1)
+ return (-1);
+
+ res = decode_int_internal(plength, evbuf, len);
+ if (res == -1)
+ return (-1);
+
+ return (0);
+}
+
+/* just unmarshals the header and returns the length of the remaining data */
+
+int
+evtag_unmarshal_header(struct evbuffer *evbuf, ev_uint32_t *ptag)
+{
+ ev_uint32_t len;
+
+ if (decode_tag_internal(ptag, evbuf, 1 /* dodrain */) == -1)
+ return (-1);
+ if (evtag_decode_int(&len, evbuf) == -1)
+ return (-1);
+
+ if (evbuffer_get_length(evbuf) < len)
+ return (-1);
+
+ return (len);
+}
+
+int
+evtag_consume(struct evbuffer *evbuf)
+{
+ int len;
+ if ((len = evtag_unmarshal_header(evbuf, NULL)) == -1)
+ return (-1);
+ evbuffer_drain(evbuf, len);
+
+ return (0);
+}
+
+/* Reads the data type from an event buffer */
+
+int
+evtag_unmarshal(struct evbuffer *src, ev_uint32_t *ptag, struct evbuffer *dst)
+{
+ int len;
+
+ if ((len = evtag_unmarshal_header(src, ptag)) == -1)
+ return (-1);
+
+ if (evbuffer_add(dst, evbuffer_pullup(src, len), len) == -1)
+ return (-1);
+
+ evbuffer_drain(src, len);
+
+ return (len);
+}
+
+/* Marshaling for integers */
+
+int
+evtag_unmarshal_int(struct evbuffer *evbuf, ev_uint32_t need_tag,
+ ev_uint32_t *pinteger)
+{
+ ev_uint32_t tag;
+ ev_uint32_t len;
+ int result;
+
+ if (decode_tag_internal(&tag, evbuf, 1 /* dodrain */) == -1)
+ return (-1);
+ if (need_tag != tag)
+ return (-1);
+ if (evtag_decode_int(&len, evbuf) == -1)
+ return (-1);
+
+ if (evbuffer_get_length(evbuf) < len)
+ return (-1);
+
+ result = decode_int_internal(pinteger, evbuf, 0);
+ evbuffer_drain(evbuf, len);
+ if (result < 0 || (size_t)result > len) /* XXX Should this be != rather than > ?*/
+ return (-1);
+ else
+ return result;
+}
+
+int
+evtag_unmarshal_int64(struct evbuffer *evbuf, ev_uint32_t need_tag,
+ ev_uint64_t *pinteger)
+{
+ ev_uint32_t tag;
+ ev_uint32_t len;
+ int result;
+
+ if (decode_tag_internal(&tag, evbuf, 1 /* dodrain */) == -1)
+ return (-1);
+ if (need_tag != tag)
+ return (-1);
+ if (evtag_decode_int(&len, evbuf) == -1)
+ return (-1);
+
+ if (evbuffer_get_length(evbuf) < len)
+ return (-1);
+
+ result = decode_int64_internal(pinteger, evbuf, 0);
+ evbuffer_drain(evbuf, len);
+ if (result < 0 || (size_t)result > len) /* XXX Should this be != rather than > ?*/
+ return (-1);
+ else
+ return result;
+}
+
+/* Unmarshal a fixed length tag */
+
+int
+evtag_unmarshal_fixed(struct evbuffer *src, ev_uint32_t need_tag, void *data,
+ size_t len)
+{
+ ev_uint32_t tag;
+ int tag_len;
+
+ /* Now unmarshal a tag and check that it matches the tag we want */
+ if ((tag_len = evtag_unmarshal_header(src, &tag)) < 0 ||
+ tag != need_tag)
+ return (-1);
+
+ if ((size_t)tag_len != len)
+ return (-1);
+
+ evbuffer_remove(src, data, len);
+ return (0);
+}
+
+int
+evtag_unmarshal_string(struct evbuffer *evbuf, ev_uint32_t need_tag,
+ char **pstring)
+{
+ ev_uint32_t tag;
+ int tag_len;
+
+ if ((tag_len = evtag_unmarshal_header(evbuf, &tag)) == -1 ||
+ tag != need_tag)
+ return (-1);
+
+ *pstring = mm_malloc(tag_len + 1);
+ if (*pstring == NULL) {
+ event_warn("%s: malloc", __func__);
+ return -1;
+ }
+ evbuffer_remove(evbuf, *pstring, tag_len);
+ (*pstring)[tag_len] = '\0';
+
+ return (0);
+}
+
+int
+evtag_unmarshal_timeval(struct evbuffer *evbuf, ev_uint32_t need_tag,
+ struct timeval *ptv)
+{
+ ev_uint32_t tag;
+ ev_uint32_t integer;
+ int len, offset, offset2;
+ int result = -1;
+
+ if ((len = evtag_unmarshal_header(evbuf, &tag)) == -1)
+ return (-1);
+ if (tag != need_tag)
+ goto done;
+ if ((offset = decode_int_internal(&integer, evbuf, 0)) == -1)
+ goto done;
+ ptv->tv_sec = integer;
+ if ((offset2 = decode_int_internal(&integer, evbuf, offset)) == -1)
+ goto done;
+ ptv->tv_usec = integer;
+ if (offset + offset2 > len) /* XXX Should this be != instead of > ? */
+ goto done;
+
+ result = 0;
+ done:
+ evbuffer_drain(evbuf, len);
+ return result;
+}
diff --git a/libs/libevent/src/evmap-internal.h b/libs/libevent/src/evmap-internal.h
new file mode 100644
index 0000000000..dfc81d5087
--- /dev/null
+++ b/libs/libevent/src/evmap-internal.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVMAP_INTERNAL_H_INCLUDED_
+#define EVMAP_INTERNAL_H_INCLUDED_
+
+/** @file evmap-internal.h
+ *
+ * An event_map is a utility structure to map each fd or signal to zero or
+ * more events. Functions to manipulate event_maps should only be used from
+ * inside libevent. They generally need to hold the lock on the corresponding
+ * event_base.
+ **/
+
+struct event_base;
+struct event;
+
+/** Initialize an event_map for use.
+ */
+void evmap_io_initmap_(struct event_io_map* ctx);
+void evmap_signal_initmap_(struct event_signal_map* ctx);
+
+/** Remove all entries from an event_map.
+
+ @param ctx the map to clear.
+ */
+void evmap_io_clear_(struct event_io_map* ctx);
+void evmap_signal_clear_(struct event_signal_map* ctx);
+
+/** Add an IO event (some combination of EV_READ or EV_WRITE) to an
+ event_base's list of events on a given file descriptor, and tell the
+ underlying eventops about the fd if its state has changed.
+
+ Requires that ev is not already added.
+
+ @param base the event_base to operate on.
+ @param fd the file descriptor corresponding to ev.
+ @param ev the event to add.
+*/
+int evmap_io_add_(struct event_base *base, evutil_socket_t fd, struct event *ev);
+/** Remove an IO event (some combination of EV_READ or EV_WRITE) to an
+ event_base's list of events on a given file descriptor, and tell the
+ underlying eventops about the fd if its state has changed.
+
+ @param base the event_base to operate on.
+ @param fd the file descriptor corresponding to ev.
+ @param ev the event to remove.
+ */
+int evmap_io_del_(struct event_base *base, evutil_socket_t fd, struct event *ev);
+/** Active the set of events waiting on an event_base for a given fd.
+
+ @param base the event_base to operate on.
+ @param fd the file descriptor that has become active.
+ @param events a bitmask of EV_READ|EV_WRITE|EV_ET.
+*/
+void evmap_io_active_(struct event_base *base, evutil_socket_t fd, short events);
+
+
+/* These functions behave in the same way as evmap_io_*, except they work on
+ * signals rather than fds. signals use a linear map everywhere; fds use
+ * either a linear map or a hashtable. */
+int evmap_signal_add_(struct event_base *base, int signum, struct event *ev);
+int evmap_signal_del_(struct event_base *base, int signum, struct event *ev);
+void evmap_signal_active_(struct event_base *base, evutil_socket_t signum, int ncalls);
+
+/* Return the fdinfo object associated with a given fd. If the fd has no
+ * events associated with it, the result may be NULL.
+ */
+void *evmap_io_get_fdinfo_(struct event_io_map *ctx, evutil_socket_t fd);
+
+/* Helper for event_reinit(): Tell the backend to re-add every fd and signal
+ * for which we have a pending event.
+ */
+int evmap_reinit_(struct event_base *base);
+
+/* Helper for event_base_free(): Call event_del() on every pending fd and
+ * signal event.
+ */
+void evmap_delete_all_(struct event_base *base);
+
+/* Helper for event_base_assert_ok_(): Check referential integrity of the
+ * evmaps.
+ */
+void evmap_check_integrity_(struct event_base *base);
+
+/* Helper: Call fn on every fd or signal event, passing as its arguments the
+ * provided event_base, the event, and arg. If fn returns 0, process the next
+ * event. If it returns any other value, return that value and process no
+ * more events.
+ */
+int evmap_foreach_event_(struct event_base *base,
+ event_base_foreach_event_cb fn,
+ void *arg);
+
+#endif /* EVMAP_INTERNAL_H_INCLUDED_ */
diff --git a/libs/libevent/src/evmap.c b/libs/libevent/src/evmap.c
new file mode 100644
index 0000000000..3f76dd0ae1
--- /dev/null
+++ b/libs/libevent/src/evmap.c
@@ -0,0 +1,1055 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+#include <sys/types.h>
+#if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#include <stdio.h>
+#include <stdlib.h>
+#ifndef _WIN32
+#include <unistd.h>
+#endif
+#include <errno.h>
+#include <signal.h>
+#include <string.h>
+#include <time.h>
+
+#include "event-internal.h"
+#include "evmap-internal.h"
+#include "mm-internal.h"
+#include "changelist-internal.h"
+
+/** An entry for an evmap_io list: notes all the events that want to read or
+ write on a given fd, and the number of each.
+ */
+struct evmap_io {
+ struct event_dlist events;
+ ev_uint16_t nread;
+ ev_uint16_t nwrite;
+ ev_uint16_t nclose;
+};
+
+/* An entry for an evmap_signal list: notes all the events that want to know
+ when a signal triggers. */
+struct evmap_signal {
+ struct event_dlist events;
+};
+
+/* On some platforms, fds start at 0 and increment by 1 as they are
+ allocated, and old numbers get used. For these platforms, we
+ implement io maps just like signal maps: as an array of pointers to
+ struct evmap_io. But on other platforms (windows), sockets are not
+ 0-indexed, not necessarily consecutive, and not necessarily reused.
+ There, we use a hashtable to implement evmap_io.
+*/
+#ifdef EVMAP_USE_HT
+struct event_map_entry {
+ HT_ENTRY(event_map_entry) map_node;
+ evutil_socket_t fd;
+ union { /* This is a union in case we need to make more things that can
+ be in the hashtable. */
+ struct evmap_io evmap_io;
+ } ent;
+};
+
+/* Helper used by the event_io_map hashtable code; tries to return a good hash
+ * of the fd in e->fd. */
+static inline unsigned
+hashsocket(struct event_map_entry *e)
+{
+ /* On win32, in practice, the low 2-3 bits of a SOCKET seem not to
+ * matter. Our hashtable implementation really likes low-order bits,
+ * though, so let's do the rotate-and-add trick. */
+ unsigned h = (unsigned) e->fd;
+ h += (h >> 2) | (h << 30);
+ return h;
+}
+
+/* Helper used by the event_io_map hashtable code; returns true iff e1 and e2
+ * have the same e->fd. */
+static inline int
+eqsocket(struct event_map_entry *e1, struct event_map_entry *e2)
+{
+ return e1->fd == e2->fd;
+}
+
+HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket)
+HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket,
+ 0.5, mm_malloc, mm_realloc, mm_free)
+
+#define GET_IO_SLOT(x, map, slot, type) \
+ do { \
+ struct event_map_entry key_, *ent_; \
+ key_.fd = slot; \
+ ent_ = HT_FIND(event_io_map, map, &key_); \
+ (x) = ent_ ? &ent_->ent.type : NULL; \
+ } while (0);
+
+#define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
+ do { \
+ struct event_map_entry key_, *ent_; \
+ key_.fd = slot; \
+ HT_FIND_OR_INSERT_(event_io_map, map_node, hashsocket, map, \
+ event_map_entry, &key_, ptr, \
+ { \
+ ent_ = *ptr; \
+ }, \
+ { \
+ ent_ = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \
+ if (EVUTIL_UNLIKELY(ent_ == NULL)) \
+ return (-1); \
+ ent_->fd = slot; \
+ (ctor)(&ent_->ent.type); \
+ HT_FOI_INSERT_(map_node, map, &key_, ent_, ptr) \
+ }); \
+ (x) = &ent_->ent.type; \
+ } while (0)
+
+void evmap_io_initmap_(struct event_io_map *ctx)
+{
+ HT_INIT(event_io_map, ctx);
+}
+
+void evmap_io_clear_(struct event_io_map *ctx)
+{
+ struct event_map_entry **ent, **next, *this;
+ for (ent = HT_START(event_io_map, ctx); ent; ent = next) {
+ this = *ent;
+ next = HT_NEXT_RMV(event_io_map, ctx, ent);
+ mm_free(this);
+ }
+ HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */
+}
+#endif
+
+/* Set the variable 'x' to the field in event_map 'map' with fields of type
+ 'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL
+ if there are no entries for 'slot'. Does no bounds-checking. */
+#define GET_SIGNAL_SLOT(x, map, slot, type) \
+ (x) = (struct type *)((map)->entries[slot])
+/* As GET_SLOT, but construct the entry for 'slot' if it is not present,
+ by allocating enough memory for a 'struct type', and initializing the new
+ value by calling the function 'ctor' on it. Makes the function
+ return -1 on allocation failure.
+ */
+#define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
+ do { \
+ if ((map)->entries[slot] == NULL) { \
+ (map)->entries[slot] = \
+ mm_calloc(1,sizeof(struct type)+fdinfo_len); \
+ if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \
+ return (-1); \
+ (ctor)((struct type *)(map)->entries[slot]); \
+ } \
+ (x) = (struct type *)((map)->entries[slot]); \
+ } while (0)
+
+/* If we aren't using hashtables, then define the IO_SLOT macros and functions
+ as thin aliases over the SIGNAL_SLOT versions. */
+#ifndef EVMAP_USE_HT
+#define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
+#define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \
+ GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)
+#define FDINFO_OFFSET sizeof(struct evmap_io)
+void
+evmap_io_initmap_(struct event_io_map* ctx)
+{
+ evmap_signal_initmap_(ctx);
+}
+void
+evmap_io_clear_(struct event_io_map* ctx)
+{
+ evmap_signal_clear_(ctx);
+}
+#endif
+
+
+/** Expand 'map' with new entries of width 'msize' until it is big enough
+ to store a value in 'slot'.
+ */
+static int
+evmap_make_space(struct event_signal_map *map, int slot, int msize)
+{
+ if (map->nentries <= slot) {
+ int nentries = map->nentries ? map->nentries : 32;
+ void **tmp;
+
+ while (nentries <= slot)
+ nentries <<= 1;
+
+ tmp = (void **)mm_realloc(map->entries, nentries * msize);
+ if (tmp == NULL)
+ return (-1);
+
+ memset(&tmp[map->nentries], 0,
+ (nentries - map->nentries) * msize);
+
+ map->nentries = nentries;
+ map->entries = tmp;
+ }
+
+ return (0);
+}
+
+void
+evmap_signal_initmap_(struct event_signal_map *ctx)
+{
+ ctx->nentries = 0;
+ ctx->entries = NULL;
+}
+
+void
+evmap_signal_clear_(struct event_signal_map *ctx)
+{
+ if (ctx->entries != NULL) {
+ int i;
+ for (i = 0; i < ctx->nentries; ++i) {
+ if (ctx->entries[i] != NULL)
+ mm_free(ctx->entries[i]);
+ }
+ mm_free(ctx->entries);
+ ctx->entries = NULL;
+ }
+ ctx->nentries = 0;
+}
+
+
+/* code specific to file descriptors */
+
+/** Constructor for struct evmap_io */
+static void
+evmap_io_init(struct evmap_io *entry)
+{
+ LIST_INIT(&entry->events);
+ entry->nread = 0;
+ entry->nwrite = 0;
+ entry->nclose = 0;
+}
+
+
+/* return -1 on error, 0 on success if nothing changed in the event backend,
+ * and 1 on success if something did. */
+int
+evmap_io_add_(struct event_base *base, evutil_socket_t fd, struct event *ev)
+{
+ const struct eventop *evsel = base->evsel;
+ struct event_io_map *io = &base->io;
+ struct evmap_io *ctx = NULL;
+ int nread, nwrite, nclose, retval = 0;
+ short res = 0, old = 0;
+ struct event *old_ev;
+
+ EVUTIL_ASSERT(fd == ev->ev_fd);
+
+ if (fd < 0)
+ return 0;
+
+#ifndef EVMAP_USE_HT
+ if (fd >= io->nentries) {
+ if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1)
+ return (-1);
+ }
+#endif
+ GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init,
+ evsel->fdinfo_len);
+
+ nread = ctx->nread;
+ nwrite = ctx->nwrite;
+ nclose = ctx->nclose;
+
+ if (nread)
+ old |= EV_READ;
+ if (nwrite)
+ old |= EV_WRITE;
+ if (nclose)
+ old |= EV_CLOSED;
+
+ if (ev->ev_events & EV_READ) {
+ if (++nread == 1)
+ res |= EV_READ;
+ }
+ if (ev->ev_events & EV_WRITE) {
+ if (++nwrite == 1)
+ res |= EV_WRITE;
+ }
+ if (ev->ev_events & EV_CLOSED) {
+ if (++nclose == 1)
+ res |= EV_CLOSED;
+ }
+ if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff || nclose > 0xffff)) {
+ event_warnx("Too many events reading or writing on fd %d",
+ (int)fd);
+ return -1;
+ }
+ if (EVENT_DEBUG_MODE_IS_ON() &&
+ (old_ev = LIST_FIRST(&ctx->events)) &&
+ (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) {
+ event_warnx("Tried to mix edge-triggered and non-edge-triggered"
+ " events on fd %d", (int)fd);
+ return -1;
+ }
+
+ if (res) {
+ void *extra = ((char*)ctx) + sizeof(struct evmap_io);
+ /* XXX(niels): we cannot mix edge-triggered and
+ * level-triggered, we should probably assert on
+ * this. */
+ if (evsel->add(base, ev->ev_fd,
+ old, (ev->ev_events & EV_ET) | res, extra) == -1)
+ return (-1);
+ retval = 1;
+ }
+
+ ctx->nread = (ev_uint16_t) nread;
+ ctx->nwrite = (ev_uint16_t) nwrite;
+ ctx->nclose = (ev_uint16_t) nclose;
+ LIST_INSERT_HEAD(&ctx->events, ev, ev_io_next);
+
+ return (retval);
+}
+
+/* return -1 on error, 0 on success if nothing changed in the event backend,
+ * and 1 on success if something did. */
+int
+evmap_io_del_(struct event_base *base, evutil_socket_t fd, struct event *ev)
+{
+ const struct eventop *evsel = base->evsel;
+ struct event_io_map *io = &base->io;
+ struct evmap_io *ctx;
+ int nread, nwrite, nclose, retval = 0;
+ short res = 0, old = 0;
+
+ if (fd < 0)
+ return 0;
+
+ EVUTIL_ASSERT(fd == ev->ev_fd);
+
+#ifndef EVMAP_USE_HT
+ if (fd >= io->nentries)
+ return (-1);
+#endif
+
+ GET_IO_SLOT(ctx, io, fd, evmap_io);
+
+ nread = ctx->nread;
+ nwrite = ctx->nwrite;
+ nclose = ctx->nclose;
+
+ if (nread)
+ old |= EV_READ;
+ if (nwrite)
+ old |= EV_WRITE;
+ if (nclose)
+ old |= EV_CLOSED;
+
+ if (ev->ev_events & EV_READ) {
+ if (--nread == 0)
+ res |= EV_READ;
+ EVUTIL_ASSERT(nread >= 0);
+ }
+ if (ev->ev_events & EV_WRITE) {
+ if (--nwrite == 0)
+ res |= EV_WRITE;
+ EVUTIL_ASSERT(nwrite >= 0);
+ }
+ if (ev->ev_events & EV_CLOSED) {
+ if (--nclose == 0)
+ res |= EV_CLOSED;
+ EVUTIL_ASSERT(nclose >= 0);
+ }
+
+ if (res) {
+ void *extra = ((char*)ctx) + sizeof(struct evmap_io);
+ if (evsel->del(base, ev->ev_fd, old, res, extra) == -1) {
+ retval = -1;
+ } else {
+ retval = 1;
+ }
+ }
+
+ ctx->nread = nread;
+ ctx->nwrite = nwrite;
+ ctx->nclose = nclose;
+ LIST_REMOVE(ev, ev_io_next);
+
+ return (retval);
+}
+
+void
+evmap_io_active_(struct event_base *base, evutil_socket_t fd, short events)
+{
+ struct event_io_map *io = &base->io;
+ struct evmap_io *ctx;
+ struct event *ev;
+
+#ifndef EVMAP_USE_HT
+ if (fd < 0 || fd >= io->nentries)
+ return;
+#endif
+ GET_IO_SLOT(ctx, io, fd, evmap_io);
+
+ if (NULL == ctx)
+ return;
+ LIST_FOREACH(ev, &ctx->events, ev_io_next) {
+ if (ev->ev_events & events)
+ event_active_nolock_(ev, ev->ev_events & events, 1);
+ }
+}
+
+/* code specific to signals */
+
+static void
+evmap_signal_init(struct evmap_signal *entry)
+{
+ LIST_INIT(&entry->events);
+}
+
+
+int
+evmap_signal_add_(struct event_base *base, int sig, struct event *ev)
+{
+ const struct eventop *evsel = base->evsigsel;
+ struct event_signal_map *map = &base->sigmap;
+ struct evmap_signal *ctx = NULL;
+
+ if (sig >= map->nentries) {
+ if (evmap_make_space(
+ map, sig, sizeof(struct evmap_signal *)) == -1)
+ return (-1);
+ }
+ GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init,
+ base->evsigsel->fdinfo_len);
+
+ if (LIST_EMPTY(&ctx->events)) {
+ if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL)
+ == -1)
+ return (-1);
+ }
+
+ LIST_INSERT_HEAD(&ctx->events, ev, ev_signal_next);
+
+ return (1);
+}
+
+int
+evmap_signal_del_(struct event_base *base, int sig, struct event *ev)
+{
+ const struct eventop *evsel = base->evsigsel;
+ struct event_signal_map *map = &base->sigmap;
+ struct evmap_signal *ctx;
+
+ if (sig >= map->nentries)
+ return (-1);
+
+ GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
+
+ LIST_REMOVE(ev, ev_signal_next);
+
+ if (LIST_FIRST(&ctx->events) == NULL) {
+ if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1)
+ return (-1);
+ }
+
+ return (1);
+}
+
+void
+evmap_signal_active_(struct event_base *base, evutil_socket_t sig, int ncalls)
+{
+ struct event_signal_map *map = &base->sigmap;
+ struct evmap_signal *ctx;
+ struct event *ev;
+
+ if (sig < 0 || sig >= map->nentries)
+ return;
+ GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
+
+ if (!ctx)
+ return;
+ LIST_FOREACH(ev, &ctx->events, ev_signal_next)
+ event_active_nolock_(ev, EV_SIGNAL, ncalls);
+}
+
+void *
+evmap_io_get_fdinfo_(struct event_io_map *map, evutil_socket_t fd)
+{
+ struct evmap_io *ctx;
+ GET_IO_SLOT(ctx, map, fd, evmap_io);
+ if (ctx)
+ return ((char*)ctx) + sizeof(struct evmap_io);
+ else
+ return NULL;
+}
+
+/* Callback type for evmap_io_foreach_fd */
+typedef int (*evmap_io_foreach_fd_cb)(
+ struct event_base *, evutil_socket_t, struct evmap_io *, void *);
+
+/* Multipurpose helper function: Iterate over every file descriptor event_base
+ * for which we could have EV_READ or EV_WRITE events. For each such fd, call
+ * fn(base, signum, evmap_io, arg), where fn is the user-provided
+ * function, base is the event_base, signum is the signal number, evmap_io
+ * is an evmap_io structure containing a list of events pending on the
+ * file descriptor, and arg is the user-supplied argument.
+ *
+ * If fn returns 0, continue on to the next signal. Otherwise, return the same
+ * value that fn returned.
+ *
+ * Note that there is no guarantee that the file descriptors will be processed
+ * in any particular order.
+ */
+static int
+evmap_io_foreach_fd(struct event_base *base,
+ evmap_io_foreach_fd_cb fn,
+ void *arg)
+{
+ evutil_socket_t fd;
+ struct event_io_map *iomap = &base->io;
+ int r = 0;
+#ifdef EVMAP_USE_HT
+ struct event_map_entry **mapent;
+ HT_FOREACH(mapent, event_io_map, iomap) {
+ struct evmap_io *ctx = &(*mapent)->ent.evmap_io;
+ fd = (*mapent)->fd;
+#else
+ for (fd = 0; fd < iomap->nentries; ++fd) {
+ struct evmap_io *ctx = iomap->entries[fd];
+ if (!ctx)
+ continue;
+#endif
+ if ((r = fn(base, fd, ctx, arg)))
+ break;
+ }
+ return r;
+}
+
+/* Callback type for evmap_signal_foreach_signal */
+typedef int (*evmap_signal_foreach_signal_cb)(
+ struct event_base *, int, struct evmap_signal *, void *);
+
+/* Multipurpose helper function: Iterate over every signal number in the
+ * event_base for which we could have signal events. For each such signal,
+ * call fn(base, signum, evmap_signal, arg), where fn is the user-provided
+ * function, base is the event_base, signum is the signal number, evmap_signal
+ * is an evmap_signal structure containing a list of events pending on the
+ * signal, and arg is the user-supplied argument.
+ *
+ * If fn returns 0, continue on to the next signal. Otherwise, return the same
+ * value that fn returned.
+ */
+static int
+evmap_signal_foreach_signal(struct event_base *base,
+ evmap_signal_foreach_signal_cb fn,
+ void *arg)
+{
+ struct event_signal_map *sigmap = &base->sigmap;
+ int r = 0;
+ int signum;
+
+ for (signum = 0; signum < sigmap->nentries; ++signum) {
+ struct evmap_signal *ctx = sigmap->entries[signum];
+ if (!ctx)
+ continue;
+ if ((r = fn(base, signum, ctx, arg)))
+ break;
+ }
+ return r;
+}
+
+/* Helper for evmap_reinit_: tell the backend to add every fd for which we have
+ * pending events, with the appropriate combination of EV_READ, EV_WRITE, and
+ * EV_ET. */
+static int
+evmap_io_reinit_iter_fn(struct event_base *base, evutil_socket_t fd,
+ struct evmap_io *ctx, void *arg)
+{
+ const struct eventop *evsel = base->evsel;
+ void *extra;
+ int *result = arg;
+ short events = 0;
+ struct event *ev;
+ EVUTIL_ASSERT(ctx);
+
+ extra = ((char*)ctx) + sizeof(struct evmap_io);
+ if (ctx->nread)
+ events |= EV_READ;
+ if (ctx->nwrite)
+ events |= EV_WRITE;
+ if (ctx->nclose)
+ events |= EV_CLOSED;
+ if (evsel->fdinfo_len)
+ memset(extra, 0, evsel->fdinfo_len);
+ if (events &&
+ (ev = LIST_FIRST(&ctx->events)) &&
+ (ev->ev_events & EV_ET))
+ events |= EV_ET;
+ if (evsel->add(base, fd, 0, events, extra) == -1)
+ *result = -1;
+
+ return 0;
+}
+
+/* Helper for evmap_reinit_: tell the backend to add every signal for which we
+ * have pending events. */
+static int
+evmap_signal_reinit_iter_fn(struct event_base *base,
+ int signum, struct evmap_signal *ctx, void *arg)
+{
+ const struct eventop *evsel = base->evsigsel;
+ int *result = arg;
+
+ if (!LIST_EMPTY(&ctx->events)) {
+ if (evsel->add(base, signum, 0, EV_SIGNAL, NULL) == -1)
+ *result = -1;
+ }
+ return 0;
+}
+
+int
+evmap_reinit_(struct event_base *base)
+{
+ int result = 0;
+
+ evmap_io_foreach_fd(base, evmap_io_reinit_iter_fn, &result);
+ if (result < 0)
+ return -1;
+ evmap_signal_foreach_signal(base, evmap_signal_reinit_iter_fn, &result);
+ if (result < 0)
+ return -1;
+ return 0;
+}
+
+/* Helper for evmap_delete_all_: delete every event in an event_dlist. */
+static int
+delete_all_in_dlist(struct event_dlist *dlist)
+{
+ struct event *ev;
+ while ((ev = LIST_FIRST(dlist)))
+ event_del(ev);
+ return 0;
+}
+
+/* Helper for evmap_delete_all_: delete every event pending on an fd. */
+static int
+evmap_io_delete_all_iter_fn(struct event_base *base, evutil_socket_t fd,
+ struct evmap_io *io_info, void *arg)
+{
+ return delete_all_in_dlist(&io_info->events);
+}
+
+/* Helper for evmap_delete_all_: delete every event pending on a signal. */
+static int
+evmap_signal_delete_all_iter_fn(struct event_base *base, int signum,
+ struct evmap_signal *sig_info, void *arg)
+{
+ return delete_all_in_dlist(&sig_info->events);
+}
+
+void
+evmap_delete_all_(struct event_base *base)
+{
+ evmap_signal_foreach_signal(base, evmap_signal_delete_all_iter_fn, NULL);
+ evmap_io_foreach_fd(base, evmap_io_delete_all_iter_fn, NULL);
+}
+
+/** Per-fd structure for use with changelists. It keeps track, for each fd or
+ * signal using the changelist, of where its entry in the changelist is.
+ */
+struct event_changelist_fdinfo {
+ int idxplus1; /* this is the index +1, so that memset(0) will make it
+ * a no-such-element */
+};
+
+void
+event_changelist_init_(struct event_changelist *changelist)
+{
+ changelist->changes = NULL;
+ changelist->changes_size = 0;
+ changelist->n_changes = 0;
+}
+
+/** Helper: return the changelist_fdinfo corresponding to a given change. */
+static inline struct event_changelist_fdinfo *
+event_change_get_fdinfo(struct event_base *base,
+ const struct event_change *change)
+{
+ char *ptr;
+ if (change->read_change & EV_CHANGE_SIGNAL) {
+ struct evmap_signal *ctx;
+ GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal);
+ ptr = ((char*)ctx) + sizeof(struct evmap_signal);
+ } else {
+ struct evmap_io *ctx;
+ GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io);
+ ptr = ((char*)ctx) + sizeof(struct evmap_io);
+ }
+ return (void*)ptr;
+}
+
+/** Callback helper for event_changelist_assert_ok */
+static int
+event_changelist_assert_ok_foreach_iter_fn(
+ struct event_base *base,
+ evutil_socket_t fd, struct evmap_io *io, void *arg)
+{
+ struct event_changelist *changelist = &base->changelist;
+ struct event_changelist_fdinfo *f;
+ f = (void*)
+ ( ((char*)io) + sizeof(struct evmap_io) );
+ if (f->idxplus1) {
+ struct event_change *c = &changelist->changes[f->idxplus1 - 1];
+ EVUTIL_ASSERT(c->fd == fd);
+ }
+ return 0;
+}
+
+/** Make sure that the changelist is consistent with the evmap structures. */
+static void
+event_changelist_assert_ok(struct event_base *base)
+{
+ int i;
+ struct event_changelist *changelist = &base->changelist;
+
+ EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes);
+ for (i = 0; i < changelist->n_changes; ++i) {
+ struct event_change *c = &changelist->changes[i];
+ struct event_changelist_fdinfo *f;
+ EVUTIL_ASSERT(c->fd >= 0);
+ f = event_change_get_fdinfo(base, c);
+ EVUTIL_ASSERT(f);
+ EVUTIL_ASSERT(f->idxplus1 == i + 1);
+ }
+
+ evmap_io_foreach_fd(base,
+ event_changelist_assert_ok_foreach_iter_fn,
+ NULL);
+}
+
+#ifdef DEBUG_CHANGELIST
+#define event_changelist_check(base) event_changelist_assert_ok((base))
+#else
+#define event_changelist_check(base) ((void)0)
+#endif
+
+void
+event_changelist_remove_all_(struct event_changelist *changelist,
+ struct event_base *base)
+{
+ int i;
+
+ event_changelist_check(base);
+
+ for (i = 0; i < changelist->n_changes; ++i) {
+ struct event_change *ch = &changelist->changes[i];
+ struct event_changelist_fdinfo *fdinfo =
+ event_change_get_fdinfo(base, ch);
+ EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1);
+ fdinfo->idxplus1 = 0;
+ }
+
+ changelist->n_changes = 0;
+
+ event_changelist_check(base);
+}
+
+void
+event_changelist_freemem_(struct event_changelist *changelist)
+{
+ if (changelist->changes)
+ mm_free(changelist->changes);
+ event_changelist_init_(changelist); /* zero it all out. */
+}
+
+/** Increase the size of 'changelist' to hold more changes. */
+static int
+event_changelist_grow(struct event_changelist *changelist)
+{
+ int new_size;
+ struct event_change *new_changes;
+ if (changelist->changes_size < 64)
+ new_size = 64;
+ else
+ new_size = changelist->changes_size * 2;
+
+ new_changes = mm_realloc(changelist->changes,
+ new_size * sizeof(struct event_change));
+
+ if (EVUTIL_UNLIKELY(new_changes == NULL))
+ return (-1);
+
+ changelist->changes = new_changes;
+ changelist->changes_size = new_size;
+
+ return (0);
+}
+
+/** Return a pointer to the changelist entry for the file descriptor or signal
+ * 'fd', whose fdinfo is 'fdinfo'. If none exists, construct it, setting its
+ * old_events field to old_events.
+ */
+static struct event_change *
+event_changelist_get_or_construct(struct event_changelist *changelist,
+ evutil_socket_t fd,
+ short old_events,
+ struct event_changelist_fdinfo *fdinfo)
+{
+ struct event_change *change;
+
+ if (fdinfo->idxplus1 == 0) {
+ int idx;
+ EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size);
+
+ if (changelist->n_changes == changelist->changes_size) {
+ if (event_changelist_grow(changelist) < 0)
+ return NULL;
+ }
+
+ idx = changelist->n_changes++;
+ change = &changelist->changes[idx];
+ fdinfo->idxplus1 = idx + 1;
+
+ memset(change, 0, sizeof(struct event_change));
+ change->fd = fd;
+ change->old_events = old_events;
+ } else {
+ change = &changelist->changes[fdinfo->idxplus1 - 1];
+ EVUTIL_ASSERT(change->fd == fd);
+ }
+ return change;
+}
+
+int
+event_changelist_add_(struct event_base *base, evutil_socket_t fd, short old, short events,
+ void *p)
+{
+ struct event_changelist *changelist = &base->changelist;
+ struct event_changelist_fdinfo *fdinfo = p;
+ struct event_change *change;
+
+ event_changelist_check(base);
+
+ change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
+ if (!change)
+ return -1;
+
+ /* An add replaces any previous delete, but doesn't result in a no-op,
+ * since the delete might fail (because the fd had been closed since
+ * the last add, for instance. */
+
+ if (events & (EV_READ|EV_SIGNAL)) {
+ change->read_change = EV_CHANGE_ADD |
+ (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
+ }
+ if (events & EV_WRITE) {
+ change->write_change = EV_CHANGE_ADD |
+ (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
+ }
+ if (events & EV_CLOSED) {
+ change->close_change = EV_CHANGE_ADD |
+ (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
+ }
+
+ event_changelist_check(base);
+ return (0);
+}
+
+int
+event_changelist_del_(struct event_base *base, evutil_socket_t fd, short old, short events,
+ void *p)
+{
+ struct event_changelist *changelist = &base->changelist;
+ struct event_changelist_fdinfo *fdinfo = p;
+ struct event_change *change;
+
+ event_changelist_check(base);
+ change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
+ event_changelist_check(base);
+ if (!change)
+ return -1;
+
+ /* A delete on an event set that doesn't contain the event to be
+ deleted produces a no-op. This effectively emoves any previous
+ uncommitted add, rather than replacing it: on those platforms where
+ "add, delete, dispatch" is not the same as "no-op, dispatch", we
+ want the no-op behavior.
+
+ If we have a no-op item, we could remove it it from the list
+ entirely, but really there's not much point: skipping the no-op
+ change when we do the dispatch later is far cheaper than rejuggling
+ the array now.
+
+ As this stands, it also lets through deletions of events that are
+ not currently set.
+ */
+
+ if (events & (EV_READ|EV_SIGNAL)) {
+ if (!(change->old_events & (EV_READ | EV_SIGNAL)))
+ change->read_change = 0;
+ else
+ change->read_change = EV_CHANGE_DEL;
+ }
+ if (events & EV_WRITE) {
+ if (!(change->old_events & EV_WRITE))
+ change->write_change = 0;
+ else
+ change->write_change = EV_CHANGE_DEL;
+ }
+ if (events & EV_CLOSED) {
+ if (!(change->old_events & EV_CLOSED))
+ change->close_change = 0;
+ else
+ change->close_change = EV_CHANGE_DEL;
+ }
+
+ event_changelist_check(base);
+ return (0);
+}
+
+/* Helper for evmap_check_integrity_: verify that all of the events pending on
+ * given fd are set up correctly, and that the nread and nwrite counts on that
+ * fd are correct. */
+static int
+evmap_io_check_integrity_fn(struct event_base *base, evutil_socket_t fd,
+ struct evmap_io *io_info, void *arg)
+{
+ struct event *ev;
+ int n_read = 0, n_write = 0, n_close = 0;
+
+ /* First, make sure the list itself isn't corrupt. Otherwise,
+ * running LIST_FOREACH could be an exciting adventure. */
+ EVUTIL_ASSERT_LIST_OK(&io_info->events, event, ev_io_next);
+
+ LIST_FOREACH(ev, &io_info->events, ev_io_next) {
+ EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
+ EVUTIL_ASSERT(ev->ev_fd == fd);
+ EVUTIL_ASSERT(!(ev->ev_events & EV_SIGNAL));
+ EVUTIL_ASSERT((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)));
+ if (ev->ev_events & EV_READ)
+ ++n_read;
+ if (ev->ev_events & EV_WRITE)
+ ++n_write;
+ if (ev->ev_events & EV_CLOSED)
+ ++n_close;
+ }
+
+ EVUTIL_ASSERT(n_read == io_info->nread);
+ EVUTIL_ASSERT(n_write == io_info->nwrite);
+ EVUTIL_ASSERT(n_close == io_info->nclose);
+
+ return 0;
+}
+
+/* Helper for evmap_check_integrity_: verify that all of the events pending
+ * on given signal are set up correctly. */
+static int
+evmap_signal_check_integrity_fn(struct event_base *base,
+ int signum, struct evmap_signal *sig_info, void *arg)
+{
+ struct event *ev;
+ /* First, make sure the list itself isn't corrupt. */
+ EVUTIL_ASSERT_LIST_OK(&sig_info->events, event, ev_signal_next);
+
+ LIST_FOREACH(ev, &sig_info->events, ev_io_next) {
+ EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
+ EVUTIL_ASSERT(ev->ev_fd == signum);
+ EVUTIL_ASSERT((ev->ev_events & EV_SIGNAL));
+ EVUTIL_ASSERT(!(ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)));
+ }
+ return 0;
+}
+
+void
+evmap_check_integrity_(struct event_base *base)
+{
+ evmap_io_foreach_fd(base, evmap_io_check_integrity_fn, NULL);
+ evmap_signal_foreach_signal(base, evmap_signal_check_integrity_fn, NULL);
+
+ if (base->evsel->add == event_changelist_add_)
+ event_changelist_assert_ok(base);
+}
+
+/* Helper type for evmap_foreach_event_: Bundles a function to call on every
+ * event, and the user-provided void* to use as its third argument. */
+struct evmap_foreach_event_helper {
+ event_base_foreach_event_cb fn;
+ void *arg;
+};
+
+/* Helper for evmap_foreach_event_: calls a provided function on every event
+ * pending on a given fd. */
+static int
+evmap_io_foreach_event_fn(struct event_base *base, evutil_socket_t fd,
+ struct evmap_io *io_info, void *arg)
+{
+ struct evmap_foreach_event_helper *h = arg;
+ struct event *ev;
+ int r;
+ LIST_FOREACH(ev, &io_info->events, ev_io_next) {
+ if ((r = h->fn(base, ev, h->arg)))
+ return r;
+ }
+ return 0;
+}
+
+/* Helper for evmap_foreach_event_: calls a provided function on every event
+ * pending on a given signal. */
+static int
+evmap_signal_foreach_event_fn(struct event_base *base, int signum,
+ struct evmap_signal *sig_info, void *arg)
+{
+ struct event *ev;
+ struct evmap_foreach_event_helper *h = arg;
+ int r;
+ LIST_FOREACH(ev, &sig_info->events, ev_signal_next) {
+ if ((r = h->fn(base, ev, h->arg)))
+ return r;
+ }
+ return 0;
+}
+
+int
+evmap_foreach_event_(struct event_base *base,
+ event_base_foreach_event_cb fn, void *arg)
+{
+ struct evmap_foreach_event_helper h;
+ int r;
+ h.fn = fn;
+ h.arg = arg;
+ if ((r = evmap_io_foreach_fd(base, evmap_io_foreach_event_fn, &h)))
+ return r;
+ return evmap_signal_foreach_signal(base, evmap_signal_foreach_event_fn, &h);
+}
+
diff --git a/libs/libevent/src/evrpc-internal.h b/libs/libevent/src/evrpc-internal.h
new file mode 100644
index 0000000000..9eb376386d
--- /dev/null
+++ b/libs/libevent/src/evrpc-internal.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2006-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVRPC_INTERNAL_H_INCLUDED_
+#define EVRPC_INTERNAL_H_INCLUDED_
+
+#include "event2/http.h"
+#include "http-internal.h"
+
+struct evrpc;
+struct evrpc_request_wrapper;
+
+#define EVRPC_URI_PREFIX "/.rpc."
+
+struct evrpc_hook {
+ TAILQ_ENTRY(evrpc_hook) next;
+
+ /* returns EVRPC_TERMINATE; if the rpc should be aborted.
+ * a hook is is allowed to rewrite the evbuffer
+ */
+ int (*process)(void *, struct evhttp_request *,
+ struct evbuffer *, void *);
+ void *process_arg;
+};
+
+TAILQ_HEAD(evrpc_hook_list, evrpc_hook);
+
+/*
+ * this is shared between the base and the pool, so that we can reuse
+ * the hook adding functions; we alias both evrpc_pool and evrpc_base
+ * to this common structure.
+ */
+
+struct evrpc_hook_ctx;
+TAILQ_HEAD(evrpc_pause_list, evrpc_hook_ctx);
+
+struct evrpc_hooks_ {
+ /* hooks for processing outbound and inbound rpcs */
+ struct evrpc_hook_list in_hooks;
+ struct evrpc_hook_list out_hooks;
+
+ struct evrpc_pause_list pause_requests;
+};
+
+#define input_hooks common.in_hooks
+#define output_hooks common.out_hooks
+#define paused_requests common.pause_requests
+
+struct evrpc_base {
+ struct evrpc_hooks_ common;
+
+ /* the HTTP server under which we register our RPC calls */
+ struct evhttp* http_server;
+
+ /* a list of all RPCs registered with us */
+ TAILQ_HEAD(evrpc_list, evrpc) registered_rpcs;
+};
+
+struct evrpc_req_generic;
+void evrpc_reqstate_free_(struct evrpc_req_generic* rpc_state);
+
+/* A pool for holding evhttp_connection objects */
+struct evrpc_pool {
+ struct evrpc_hooks_ common;
+
+ struct event_base *base;
+
+ struct evconq connections;
+
+ int timeout;
+
+ TAILQ_HEAD(evrpc_requestq, evrpc_request_wrapper) (requests);
+};
+
+struct evrpc_hook_ctx {
+ TAILQ_ENTRY(evrpc_hook_ctx) next;
+
+ void *ctx;
+ void (*cb)(void *, enum EVRPC_HOOK_RESULT);
+};
+
+struct evrpc_meta {
+ TAILQ_ENTRY(evrpc_meta) next;
+ char *key;
+
+ void *data;
+ size_t data_size;
+};
+
+TAILQ_HEAD(evrpc_meta_list, evrpc_meta);
+
+struct evrpc_hook_meta {
+ struct evrpc_meta_list meta_data;
+ struct evhttp_connection *evcon;
+};
+
+/* allows association of meta data with a request */
+static void evrpc_hook_associate_meta_(struct evrpc_hook_meta **pctx,
+ struct evhttp_connection *evcon);
+
+/* creates a new meta data store */
+static struct evrpc_hook_meta *evrpc_hook_meta_new_(void);
+
+/* frees the meta data associated with a request */
+static void evrpc_hook_context_free_(struct evrpc_hook_meta *ctx);
+
+/* the server side of an rpc */
+
+/* We alias the RPC specific structs to this voided one */
+struct evrpc_req_generic {
+ /*
+ * allows association of meta data via hooks - needs to be
+ * synchronized with evrpc_request_wrapper
+ */
+ struct evrpc_hook_meta *hook_meta;
+
+ /* the unmarshaled request object */
+ void *request;
+
+ /* the empty reply object that needs to be filled in */
+ void *reply;
+
+ /*
+ * the static structure for this rpc; that can be used to
+ * automatically unmarshal and marshal the http buffers.
+ */
+ struct evrpc *rpc;
+
+ /*
+ * the http request structure on which we need to answer.
+ */
+ struct evhttp_request* http_req;
+
+ /*
+ * Temporary data store for marshaled data
+ */
+ struct evbuffer* rpc_data;
+};
+
+/* the client side of an rpc request */
+struct evrpc_request_wrapper {
+ /*
+ * allows association of meta data via hooks - needs to be
+ * synchronized with evrpc_req_generic.
+ */
+ struct evrpc_hook_meta *hook_meta;
+
+ TAILQ_ENTRY(evrpc_request_wrapper) next;
+
+ /* pool on which this rpc request is being made */
+ struct evrpc_pool *pool;
+
+ /* connection on which the request is being sent */
+ struct evhttp_connection *evcon;
+
+ /* the actual request */
+ struct evhttp_request *req;
+
+ /* event for implementing request timeouts */
+ struct event ev_timeout;
+
+ /* the name of the rpc */
+ char *name;
+
+ /* callback */
+ void (*cb)(struct evrpc_status*, void *request, void *reply, void *arg);
+ void *cb_arg;
+
+ void *request;
+ void *reply;
+
+ /* unmarshals the buffer into the proper request structure */
+ void (*request_marshal)(struct evbuffer *, void *);
+
+ /* removes all stored state in the reply */
+ void (*reply_clear)(void *);
+
+ /* marshals the reply into a buffer */
+ int (*reply_unmarshal)(void *, struct evbuffer*);
+};
+
+#endif /* EVRPC_INTERNAL_H_INCLUDED_ */
diff --git a/libs/libevent/src/evrpc.c b/libs/libevent/src/evrpc.c
new file mode 100644
index 0000000000..2443ab2793
--- /dev/null
+++ b/libs/libevent/src/evrpc.c
@@ -0,0 +1,1171 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef _WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <winsock2.h>
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+
+#include <sys/types.h>
+#ifndef _WIN32
+#include <sys/socket.h>
+#endif
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#include <stdio.h>
+#include <stdlib.h>
+#ifndef _WIN32
+#include <unistd.h>
+#endif
+#include <errno.h>
+#include <signal.h>
+#include <string.h>
+
+#include <sys/queue.h>
+
+#include "event2/event.h"
+#include "event2/event_struct.h"
+#include "event2/rpc.h"
+#include "event2/rpc_struct.h"
+#include "evrpc-internal.h"
+#include "event2/http.h"
+#include "event2/buffer.h"
+#include "event2/tag.h"
+#include "event2/http_struct.h"
+#include "event2/http_compat.h"
+#include "event2/util.h"
+#include "util-internal.h"
+#include "log-internal.h"
+#include "mm-internal.h"
+
+struct evrpc_base *
+evrpc_init(struct evhttp *http_server)
+{
+ struct evrpc_base* base = mm_calloc(1, sizeof(struct evrpc_base));
+ if (base == NULL)
+ return (NULL);
+
+ /* we rely on the tagging sub system */
+ evtag_init();
+
+ TAILQ_INIT(&base->registered_rpcs);
+ TAILQ_INIT(&base->input_hooks);
+ TAILQ_INIT(&base->output_hooks);
+
+ TAILQ_INIT(&base->paused_requests);
+
+ base->http_server = http_server;
+
+ return (base);
+}
+
+void
+evrpc_free(struct evrpc_base *base)
+{
+ struct evrpc *rpc;
+ struct evrpc_hook *hook;
+ struct evrpc_hook_ctx *pause;
+ int r;
+
+ while ((rpc = TAILQ_FIRST(&base->registered_rpcs)) != NULL) {
+ r = evrpc_unregister_rpc(base, rpc->uri);
+ EVUTIL_ASSERT(r == 0);
+ }
+ while ((pause = TAILQ_FIRST(&base->paused_requests)) != NULL) {
+ TAILQ_REMOVE(&base->paused_requests, pause, next);
+ mm_free(pause);
+ }
+ while ((hook = TAILQ_FIRST(&base->input_hooks)) != NULL) {
+ r = evrpc_remove_hook(base, EVRPC_INPUT, hook);
+ EVUTIL_ASSERT(r);
+ }
+ while ((hook = TAILQ_FIRST(&base->output_hooks)) != NULL) {
+ r = evrpc_remove_hook(base, EVRPC_OUTPUT, hook);
+ EVUTIL_ASSERT(r);
+ }
+ mm_free(base);
+}
+
+void *
+evrpc_add_hook(void *vbase,
+ enum EVRPC_HOOK_TYPE hook_type,
+ int (*cb)(void *, struct evhttp_request *, struct evbuffer *, void *),
+ void *cb_arg)
+{
+ struct evrpc_hooks_ *base = vbase;
+ struct evrpc_hook_list *head = NULL;
+ struct evrpc_hook *hook = NULL;
+ switch (hook_type) {
+ case EVRPC_INPUT:
+ head = &base->in_hooks;
+ break;
+ case EVRPC_OUTPUT:
+ head = &base->out_hooks;
+ break;
+ default:
+ EVUTIL_ASSERT(hook_type == EVRPC_INPUT || hook_type == EVRPC_OUTPUT);
+ }
+
+ hook = mm_calloc(1, sizeof(struct evrpc_hook));
+ EVUTIL_ASSERT(hook != NULL);
+
+ hook->process = cb;
+ hook->process_arg = cb_arg;
+ TAILQ_INSERT_TAIL(head, hook, next);
+
+ return (hook);
+}
+
+static int
+evrpc_remove_hook_internal(struct evrpc_hook_list *head, void *handle)
+{
+ struct evrpc_hook *hook = NULL;
+ TAILQ_FOREACH(hook, head, next) {
+ if (hook == handle) {
+ TAILQ_REMOVE(head, hook, next);
+ mm_free(hook);
+ return (1);
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * remove the hook specified by the handle
+ */
+
+int
+evrpc_remove_hook(void *vbase, enum EVRPC_HOOK_TYPE hook_type, void *handle)
+{
+ struct evrpc_hooks_ *base = vbase;
+ struct evrpc_hook_list *head = NULL;
+ switch (hook_type) {
+ case EVRPC_INPUT:
+ head = &base->in_hooks;
+ break;
+ case EVRPC_OUTPUT:
+ head = &base->out_hooks;
+ break;
+ default:
+ EVUTIL_ASSERT(hook_type == EVRPC_INPUT || hook_type == EVRPC_OUTPUT);
+ }
+
+ return (evrpc_remove_hook_internal(head, handle));
+}
+
+static int
+evrpc_process_hooks(struct evrpc_hook_list *head, void *ctx,
+ struct evhttp_request *req, struct evbuffer *evbuf)
+{
+ struct evrpc_hook *hook;
+ TAILQ_FOREACH(hook, head, next) {
+ int res = hook->process(ctx, req, evbuf, hook->process_arg);
+ if (res != EVRPC_CONTINUE)
+ return (res);
+ }
+
+ return (EVRPC_CONTINUE);
+}
+
+static void evrpc_pool_schedule(struct evrpc_pool *pool);
+static void evrpc_request_cb(struct evhttp_request *, void *);
+
+/*
+ * Registers a new RPC with the HTTP server. The evrpc object is expected
+ * to have been filled in via the EVRPC_REGISTER_OBJECT macro which in turn
+ * calls this function.
+ */
+
+static char *
+evrpc_construct_uri(const char *uri)
+{
+ char *constructed_uri;
+ size_t constructed_uri_len;
+
+ constructed_uri_len = strlen(EVRPC_URI_PREFIX) + strlen(uri) + 1;
+ if ((constructed_uri = mm_malloc(constructed_uri_len)) == NULL)
+ event_err(1, "%s: failed to register rpc at %s",
+ __func__, uri);
+ memcpy(constructed_uri, EVRPC_URI_PREFIX, strlen(EVRPC_URI_PREFIX));
+ memcpy(constructed_uri + strlen(EVRPC_URI_PREFIX), uri, strlen(uri));
+ constructed_uri[constructed_uri_len - 1] = '\0';
+
+ return (constructed_uri);
+}
+
+int
+evrpc_register_rpc(struct evrpc_base *base, struct evrpc *rpc,
+ void (*cb)(struct evrpc_req_generic *, void *), void *cb_arg)
+{
+ char *constructed_uri = evrpc_construct_uri(rpc->uri);
+
+ rpc->base = base;
+ rpc->cb = cb;
+ rpc->cb_arg = cb_arg;
+
+ TAILQ_INSERT_TAIL(&base->registered_rpcs, rpc, next);
+
+ evhttp_set_cb(base->http_server,
+ constructed_uri,
+ evrpc_request_cb,
+ rpc);
+
+ mm_free(constructed_uri);
+
+ return (0);
+}
+
+int
+evrpc_unregister_rpc(struct evrpc_base *base, const char *name)
+{
+ char *registered_uri = NULL;
+ struct evrpc *rpc;
+ int r;
+
+ /* find the right rpc; linear search might be slow */
+ TAILQ_FOREACH(rpc, &base->registered_rpcs, next) {
+ if (strcmp(rpc->uri, name) == 0)
+ break;
+ }
+ if (rpc == NULL) {
+ /* We did not find an RPC with this name */
+ return (-1);
+ }
+ TAILQ_REMOVE(&base->registered_rpcs, rpc, next);
+
+ registered_uri = evrpc_construct_uri(name);
+
+ /* remove the http server callback */
+ r = evhttp_del_cb(base->http_server, registered_uri);
+ EVUTIL_ASSERT(r == 0);
+
+ mm_free(registered_uri);
+
+ mm_free((char *)rpc->uri);
+ mm_free(rpc);
+ return (0);
+}
+
+static int evrpc_pause_request(void *vbase, void *ctx,
+ void (*cb)(void *, enum EVRPC_HOOK_RESULT));
+static void evrpc_request_cb_closure(void *, enum EVRPC_HOOK_RESULT);
+
+static void
+evrpc_request_cb(struct evhttp_request *req, void *arg)
+{
+ struct evrpc *rpc = arg;
+ struct evrpc_req_generic *rpc_state = NULL;
+
+ /* let's verify the outside parameters */
+ if (req->type != EVHTTP_REQ_POST ||
+ evbuffer_get_length(req->input_buffer) <= 0)
+ goto error;
+
+ rpc_state = mm_calloc(1, sizeof(struct evrpc_req_generic));
+ if (rpc_state == NULL)
+ goto error;
+ rpc_state->rpc = rpc;
+ rpc_state->http_req = req;
+ rpc_state->rpc_data = NULL;
+
+ if (TAILQ_FIRST(&rpc->base->input_hooks) != NULL) {
+ int hook_res;
+
+ evrpc_hook_associate_meta_(&rpc_state->hook_meta, req->evcon);
+
+ /*
+ * allow hooks to modify the outgoing request
+ */
+ hook_res = evrpc_process_hooks(&rpc->base->input_hooks,
+ rpc_state, req, req->input_buffer);
+ switch (hook_res) {
+ case EVRPC_TERMINATE:
+ goto error;
+ case EVRPC_PAUSE:
+ evrpc_pause_request(rpc->base, rpc_state,
+ evrpc_request_cb_closure);
+ return;
+ case EVRPC_CONTINUE:
+ break;
+ default:
+ EVUTIL_ASSERT(hook_res == EVRPC_TERMINATE ||
+ hook_res == EVRPC_CONTINUE ||
+ hook_res == EVRPC_PAUSE);
+ }
+ }
+
+ evrpc_request_cb_closure(rpc_state, EVRPC_CONTINUE);
+ return;
+
+error:
+ evrpc_reqstate_free_(rpc_state);
+ evhttp_send_error(req, HTTP_SERVUNAVAIL, NULL);
+ return;
+}
+
+static void
+evrpc_request_cb_closure(void *arg, enum EVRPC_HOOK_RESULT hook_res)
+{
+ struct evrpc_req_generic *rpc_state = arg;
+ struct evrpc *rpc;
+ struct evhttp_request *req;
+
+ EVUTIL_ASSERT(rpc_state);
+ rpc = rpc_state->rpc;
+ req = rpc_state->http_req;
+
+ if (hook_res == EVRPC_TERMINATE)
+ goto error;
+
+ /* let's check that we can parse the request */
+ rpc_state->request = rpc->request_new(rpc->request_new_arg);
+ if (rpc_state->request == NULL)
+ goto error;
+
+ if (rpc->request_unmarshal(
+ rpc_state->request, req->input_buffer) == -1) {
+ /* we failed to parse the request; that's a bummer */
+ goto error;
+ }
+
+ /* at this point, we have a well formed request, prepare the reply */
+
+ rpc_state->reply = rpc->reply_new(rpc->reply_new_arg);
+ if (rpc_state->reply == NULL)
+ goto error;
+
+ /* give the rpc to the user; they can deal with it */
+ rpc->cb(rpc_state, rpc->cb_arg);
+
+ return;
+
+error:
+ evrpc_reqstate_free_(rpc_state);
+ evhttp_send_error(req, HTTP_SERVUNAVAIL, NULL);
+ return;
+}
+
+
+void
+evrpc_reqstate_free_(struct evrpc_req_generic* rpc_state)
+{
+ struct evrpc *rpc;
+ EVUTIL_ASSERT(rpc_state != NULL);
+ rpc = rpc_state->rpc;
+
+ /* clean up all memory */
+ if (rpc_state->hook_meta != NULL)
+ evrpc_hook_context_free_(rpc_state->hook_meta);
+ if (rpc_state->request != NULL)
+ rpc->request_free(rpc_state->request);
+ if (rpc_state->reply != NULL)
+ rpc->reply_free(rpc_state->reply);
+ if (rpc_state->rpc_data != NULL)
+ evbuffer_free(rpc_state->rpc_data);
+ mm_free(rpc_state);
+}
+
+static void
+evrpc_request_done_closure(void *, enum EVRPC_HOOK_RESULT);
+
+void
+evrpc_request_done(struct evrpc_req_generic *rpc_state)
+{
+ struct evhttp_request *req;
+ struct evrpc *rpc;
+
+ EVUTIL_ASSERT(rpc_state);
+
+ req = rpc_state->http_req;
+ rpc = rpc_state->rpc;
+
+ if (rpc->reply_complete(rpc_state->reply) == -1) {
+ /* the reply was not completely filled in. error out */
+ goto error;
+ }
+
+ if ((rpc_state->rpc_data = evbuffer_new()) == NULL) {
+ /* out of memory */
+ goto error;
+ }
+
+ /* serialize the reply */
+ rpc->reply_marshal(rpc_state->rpc_data, rpc_state->reply);
+
+ if (TAILQ_FIRST(&rpc->base->output_hooks) != NULL) {
+ int hook_res;
+
+ evrpc_hook_associate_meta_(&rpc_state->hook_meta, req->evcon);
+
+ /* do hook based tweaks to the request */
+ hook_res = evrpc_process_hooks(&rpc->base->output_hooks,
+ rpc_state, req, rpc_state->rpc_data);
+ switch (hook_res) {
+ case EVRPC_TERMINATE:
+ goto error;
+ case EVRPC_PAUSE:
+ if (evrpc_pause_request(rpc->base, rpc_state,
+ evrpc_request_done_closure) == -1)
+ goto error;
+ return;
+ case EVRPC_CONTINUE:
+ break;
+ default:
+ EVUTIL_ASSERT(hook_res == EVRPC_TERMINATE ||
+ hook_res == EVRPC_CONTINUE ||
+ hook_res == EVRPC_PAUSE);
+ }
+ }
+
+ evrpc_request_done_closure(rpc_state, EVRPC_CONTINUE);
+ return;
+
+error:
+ evrpc_reqstate_free_(rpc_state);
+ evhttp_send_error(req, HTTP_SERVUNAVAIL, NULL);
+ return;
+}
+
+void *
+evrpc_get_request(struct evrpc_req_generic *req)
+{
+ return req->request;
+}
+
+void *
+evrpc_get_reply(struct evrpc_req_generic *req)
+{
+ return req->reply;
+}
+
+static void
+evrpc_request_done_closure(void *arg, enum EVRPC_HOOK_RESULT hook_res)
+{
+ struct evrpc_req_generic *rpc_state = arg;
+ struct evhttp_request *req;
+ EVUTIL_ASSERT(rpc_state);
+ req = rpc_state->http_req;
+
+ if (hook_res == EVRPC_TERMINATE)
+ goto error;
+
+ /* on success, we are going to transmit marshaled binary data */
+ if (evhttp_find_header(req->output_headers, "Content-Type") == NULL) {
+ evhttp_add_header(req->output_headers,
+ "Content-Type", "application/octet-stream");
+ }
+ evhttp_send_reply(req, HTTP_OK, "OK", rpc_state->rpc_data);
+
+ evrpc_reqstate_free_(rpc_state);
+
+ return;
+
+error:
+ evrpc_reqstate_free_(rpc_state);
+ evhttp_send_error(req, HTTP_SERVUNAVAIL, NULL);
+ return;
+}
+
+
+/* Client implementation of RPC site */
+
+static int evrpc_schedule_request(struct evhttp_connection *connection,
+ struct evrpc_request_wrapper *ctx);
+
+struct evrpc_pool *
+evrpc_pool_new(struct event_base *base)
+{
+ struct evrpc_pool *pool = mm_calloc(1, sizeof(struct evrpc_pool));
+ if (pool == NULL)
+ return (NULL);
+
+ TAILQ_INIT(&pool->connections);
+ TAILQ_INIT(&pool->requests);
+
+ TAILQ_INIT(&pool->paused_requests);
+
+ TAILQ_INIT(&pool->input_hooks);
+ TAILQ_INIT(&pool->output_hooks);
+
+ pool->base = base;
+ pool->timeout = -1;
+
+ return (pool);
+}
+
+static void
+evrpc_request_wrapper_free(struct evrpc_request_wrapper *request)
+{
+ if (request->hook_meta != NULL)
+ evrpc_hook_context_free_(request->hook_meta);
+ mm_free(request->name);
+ mm_free(request);
+}
+
+void
+evrpc_pool_free(struct evrpc_pool *pool)
+{
+ struct evhttp_connection *connection;
+ struct evrpc_request_wrapper *request;
+ struct evrpc_hook_ctx *pause;
+ struct evrpc_hook *hook;
+ int r;
+
+ while ((request = TAILQ_FIRST(&pool->requests)) != NULL) {
+ TAILQ_REMOVE(&pool->requests, request, next);
+ evrpc_request_wrapper_free(request);
+ }
+
+ while ((pause = TAILQ_FIRST(&pool->paused_requests)) != NULL) {
+ TAILQ_REMOVE(&pool->paused_requests, pause, next);
+ mm_free(pause);
+ }
+
+ while ((connection = TAILQ_FIRST(&pool->connections)) != NULL) {
+ TAILQ_REMOVE(&pool->connections, connection, next);
+ evhttp_connection_free(connection);
+ }
+
+ while ((hook = TAILQ_FIRST(&pool->input_hooks)) != NULL) {
+ r = evrpc_remove_hook(pool, EVRPC_INPUT, hook);
+ EVUTIL_ASSERT(r);
+ }
+
+ while ((hook = TAILQ_FIRST(&pool->output_hooks)) != NULL) {
+ r = evrpc_remove_hook(pool, EVRPC_OUTPUT, hook);
+ EVUTIL_ASSERT(r);
+ }
+
+ mm_free(pool);
+}
+
+/*
+ * Add a connection to the RPC pool. A request scheduled on the pool
+ * may use any available connection.
+ */
+
+void
+evrpc_pool_add_connection(struct evrpc_pool *pool,
+ struct evhttp_connection *connection)
+{
+ EVUTIL_ASSERT(connection->http_server == NULL);
+ TAILQ_INSERT_TAIL(&pool->connections, connection, next);
+
+ /*
+ * associate an event base with this connection
+ */
+ if (pool->base != NULL)
+ evhttp_connection_set_base(connection, pool->base);
+
+ /*
+ * unless a timeout was specifically set for a connection,
+ * the connection inherits the timeout from the pool.
+ */
+ if (!evutil_timerisset(&connection->timeout))
+ evhttp_connection_set_timeout(connection, pool->timeout);
+
+ /*
+ * if we have any requests pending, schedule them with the new
+ * connections.
+ */
+
+ if (TAILQ_FIRST(&pool->requests) != NULL) {
+ struct evrpc_request_wrapper *request =
+ TAILQ_FIRST(&pool->requests);
+ TAILQ_REMOVE(&pool->requests, request, next);
+ evrpc_schedule_request(connection, request);
+ }
+}
+
+void
+evrpc_pool_remove_connection(struct evrpc_pool *pool,
+ struct evhttp_connection *connection)
+{
+ TAILQ_REMOVE(&pool->connections, connection, next);
+}
+
+void
+evrpc_pool_set_timeout(struct evrpc_pool *pool, int timeout_in_secs)
+{
+ struct evhttp_connection *evcon;
+ TAILQ_FOREACH(evcon, &pool->connections, next) {
+ evhttp_connection_set_timeout(evcon, timeout_in_secs);
+ }
+ pool->timeout = timeout_in_secs;
+}
+
+
+static void evrpc_reply_done(struct evhttp_request *, void *);
+static void evrpc_request_timeout(evutil_socket_t, short, void *);
+
+/*
+ * Finds a connection object associated with the pool that is currently
+ * idle and can be used to make a request.
+ */
+static struct evhttp_connection *
+evrpc_pool_find_connection(struct evrpc_pool *pool)
+{
+ struct evhttp_connection *connection;
+ TAILQ_FOREACH(connection, &pool->connections, next) {
+ if (TAILQ_FIRST(&connection->requests) == NULL)
+ return (connection);
+ }
+
+ return (NULL);
+}
+
+/*
+ * Prototypes responsible for evrpc scheduling and hooking
+ */
+
+static void evrpc_schedule_request_closure(void *ctx, enum EVRPC_HOOK_RESULT);
+
+/*
+ * We assume that the ctx is no longer queued on the pool.
+ */
+static int
+evrpc_schedule_request(struct evhttp_connection *connection,
+ struct evrpc_request_wrapper *ctx)
+{
+ struct evhttp_request *req = NULL;
+ struct evrpc_pool *pool = ctx->pool;
+ struct evrpc_status status;
+
+ if ((req = evhttp_request_new(evrpc_reply_done, ctx)) == NULL)
+ goto error;
+
+ /* serialize the request data into the output buffer */
+ ctx->request_marshal(req->output_buffer, ctx->request);
+
+ /* we need to know the connection that we might have to abort */
+ ctx->evcon = connection;
+
+ /* if we get paused we also need to know the request */
+ ctx->req = req;
+
+ if (TAILQ_FIRST(&pool->output_hooks) != NULL) {
+ int hook_res;
+
+ evrpc_hook_associate_meta_(&ctx->hook_meta, connection);
+
+ /* apply hooks to the outgoing request */
+ hook_res = evrpc_process_hooks(&pool->output_hooks,
+ ctx, req, req->output_buffer);
+
+ switch (hook_res) {
+ case EVRPC_TERMINATE:
+ goto error;
+ case EVRPC_PAUSE:
+ /* we need to be explicitly resumed */
+ if (evrpc_pause_request(pool, ctx,
+ evrpc_schedule_request_closure) == -1)
+ goto error;
+ return (0);
+ case EVRPC_CONTINUE:
+ /* we can just continue */
+ break;
+ default:
+ EVUTIL_ASSERT(hook_res == EVRPC_TERMINATE ||
+ hook_res == EVRPC_CONTINUE ||
+ hook_res == EVRPC_PAUSE);
+ }
+ }
+
+ evrpc_schedule_request_closure(ctx, EVRPC_CONTINUE);
+ return (0);
+
+error:
+ memset(&status, 0, sizeof(status));
+ status.error = EVRPC_STATUS_ERR_UNSTARTED;
+ (*ctx->cb)(&status, ctx->request, ctx->reply, ctx->cb_arg);
+ evrpc_request_wrapper_free(ctx);
+ return (-1);
+}
+
+static void
+evrpc_schedule_request_closure(void *arg, enum EVRPC_HOOK_RESULT hook_res)
+{
+ struct evrpc_request_wrapper *ctx = arg;
+ struct evhttp_connection *connection = ctx->evcon;
+ struct evhttp_request *req = ctx->req;
+ struct evrpc_pool *pool = ctx->pool;
+ struct evrpc_status status;
+ char *uri = NULL;
+ int res = 0;
+
+ if (hook_res == EVRPC_TERMINATE)
+ goto error;
+
+ uri = evrpc_construct_uri(ctx->name);
+ if (uri == NULL)
+ goto error;
+
+ if (pool->timeout > 0) {
+ /*
+ * a timeout after which the whole rpc is going to be aborted.
+ */
+ struct timeval tv;
+ evutil_timerclear(&tv);
+ tv.tv_sec = pool->timeout;
+ evtimer_add(&ctx->ev_timeout, &tv);
+ }
+
+ /* start the request over the connection */
+ res = evhttp_make_request(connection, req, EVHTTP_REQ_POST, uri);
+ mm_free(uri);
+
+ if (res == -1)
+ goto error;
+
+ return;
+
+error:
+ memset(&status, 0, sizeof(status));
+ status.error = EVRPC_STATUS_ERR_UNSTARTED;
+ (*ctx->cb)(&status, ctx->request, ctx->reply, ctx->cb_arg);
+ evrpc_request_wrapper_free(ctx);
+}
+
+/* we just queue the paused request on the pool under the req object */
+static int
+evrpc_pause_request(void *vbase, void *ctx,
+ void (*cb)(void *, enum EVRPC_HOOK_RESULT))
+{
+ struct evrpc_hooks_ *base = vbase;
+ struct evrpc_hook_ctx *pause = mm_malloc(sizeof(*pause));
+ if (pause == NULL)
+ return (-1);
+
+ pause->ctx = ctx;
+ pause->cb = cb;
+
+ TAILQ_INSERT_TAIL(&base->pause_requests, pause, next);
+ return (0);
+}
+
+int
+evrpc_resume_request(void *vbase, void *ctx, enum EVRPC_HOOK_RESULT res)
+{
+ struct evrpc_hooks_ *base = vbase;
+ struct evrpc_pause_list *head = &base->pause_requests;
+ struct evrpc_hook_ctx *pause;
+
+ TAILQ_FOREACH(pause, head, next) {
+ if (pause->ctx == ctx)
+ break;
+ }
+
+ if (pause == NULL)
+ return (-1);
+
+ (*pause->cb)(pause->ctx, res);
+ TAILQ_REMOVE(head, pause, next);
+ mm_free(pause);
+ return (0);
+}
+
+int
+evrpc_make_request(struct evrpc_request_wrapper *ctx)
+{
+ struct evrpc_pool *pool = ctx->pool;
+
+ /* initialize the event structure for this rpc */
+ evtimer_assign(&ctx->ev_timeout, pool->base, evrpc_request_timeout, ctx);
+
+ /* we better have some available connections on the pool */
+ EVUTIL_ASSERT(TAILQ_FIRST(&pool->connections) != NULL);
+
+ /*
+ * if no connection is available, we queue the request on the pool,
+ * the next time a connection is empty, the rpc will be send on that.
+ */
+ TAILQ_INSERT_TAIL(&pool->requests, ctx, next);
+
+ evrpc_pool_schedule(pool);
+
+ return (0);
+}
+
+
+struct evrpc_request_wrapper *
+evrpc_make_request_ctx(
+ struct evrpc_pool *pool, void *request, void *reply,
+ const char *rpcname,
+ void (*req_marshal)(struct evbuffer*, void *),
+ void (*rpl_clear)(void *),
+ int (*rpl_unmarshal)(void *, struct evbuffer *),
+ void (*cb)(struct evrpc_status *, void *, void *, void *),
+ void *cbarg)
+{
+ struct evrpc_request_wrapper *ctx = (struct evrpc_request_wrapper *)
+ mm_malloc(sizeof(struct evrpc_request_wrapper));
+ if (ctx == NULL)
+ return (NULL);
+
+ ctx->pool = pool;
+ ctx->hook_meta = NULL;
+ ctx->evcon = NULL;
+ ctx->name = mm_strdup(rpcname);
+ if (ctx->name == NULL) {
+ mm_free(ctx);
+ return (NULL);
+ }
+ ctx->cb = cb;
+ ctx->cb_arg = cbarg;
+ ctx->request = request;
+ ctx->reply = reply;
+ ctx->request_marshal = req_marshal;
+ ctx->reply_clear = rpl_clear;
+ ctx->reply_unmarshal = rpl_unmarshal;
+
+ return (ctx);
+}
+
+static void
+evrpc_reply_done_closure(void *, enum EVRPC_HOOK_RESULT);
+
+static void
+evrpc_reply_done(struct evhttp_request *req, void *arg)
+{
+ struct evrpc_request_wrapper *ctx = arg;
+ struct evrpc_pool *pool = ctx->pool;
+ int hook_res = EVRPC_CONTINUE;
+
+ /* cancel any timeout we might have scheduled */
+ event_del(&ctx->ev_timeout);
+
+ ctx->req = req;
+
+ /* we need to get the reply now */
+ if (req == NULL) {
+ evrpc_reply_done_closure(ctx, EVRPC_CONTINUE);
+ return;
+ }
+
+ if (TAILQ_FIRST(&pool->input_hooks) != NULL) {
+ evrpc_hook_associate_meta_(&ctx->hook_meta, ctx->evcon);
+
+ /* apply hooks to the incoming request */
+ hook_res = evrpc_process_hooks(&pool->input_hooks,
+ ctx, req, req->input_buffer);
+
+ switch (hook_res) {
+ case EVRPC_TERMINATE:
+ case EVRPC_CONTINUE:
+ break;
+ case EVRPC_PAUSE:
+ /*
+ * if we get paused we also need to know the
+ * request. unfortunately, the underlying
+ * layer is going to free it. we need to
+ * request ownership explicitly
+ */
+ if (req != NULL)
+ evhttp_request_own(req);
+
+ evrpc_pause_request(pool, ctx,
+ evrpc_reply_done_closure);
+ return;
+ default:
+ EVUTIL_ASSERT(hook_res == EVRPC_TERMINATE ||
+ hook_res == EVRPC_CONTINUE ||
+ hook_res == EVRPC_PAUSE);
+ }
+ }
+
+ evrpc_reply_done_closure(ctx, hook_res);
+
+ /* http request is being freed by underlying layer */
+}
+
+static void
+evrpc_reply_done_closure(void *arg, enum EVRPC_HOOK_RESULT hook_res)
+{
+ struct evrpc_request_wrapper *ctx = arg;
+ struct evhttp_request *req = ctx->req;
+ struct evrpc_pool *pool = ctx->pool;
+ struct evrpc_status status;
+ int res = -1;
+
+ memset(&status, 0, sizeof(status));
+ status.http_req = req;
+
+ /* we need to get the reply now */
+ if (req == NULL) {
+ status.error = EVRPC_STATUS_ERR_TIMEOUT;
+ } else if (hook_res == EVRPC_TERMINATE) {
+ status.error = EVRPC_STATUS_ERR_HOOKABORTED;
+ } else {
+ res = ctx->reply_unmarshal(ctx->reply, req->input_buffer);
+ if (res == -1)
+ status.error = EVRPC_STATUS_ERR_BADPAYLOAD;
+ }
+
+ if (res == -1) {
+ /* clear everything that we might have written previously */
+ ctx->reply_clear(ctx->reply);
+ }
+
+ (*ctx->cb)(&status, ctx->request, ctx->reply, ctx->cb_arg);
+
+ evrpc_request_wrapper_free(ctx);
+
+ /* the http layer owned the original request structure, but if we
+ * got paused, we asked for ownership and need to free it here. */
+ if (req != NULL && evhttp_request_is_owned(req))
+ evhttp_request_free(req);
+
+ /* see if we can schedule another request */
+ evrpc_pool_schedule(pool);
+}
+
+static void
+evrpc_pool_schedule(struct evrpc_pool *pool)
+{
+ struct evrpc_request_wrapper *ctx = TAILQ_FIRST(&pool->requests);
+ struct evhttp_connection *evcon;
+
+ /* if no requests are pending, we have no work */
+ if (ctx == NULL)
+ return;
+
+ if ((evcon = evrpc_pool_find_connection(pool)) != NULL) {
+ TAILQ_REMOVE(&pool->requests, ctx, next);
+ evrpc_schedule_request(evcon, ctx);
+ }
+}
+
+static void
+evrpc_request_timeout(evutil_socket_t fd, short what, void *arg)
+{
+ struct evrpc_request_wrapper *ctx = arg;
+ struct evhttp_connection *evcon = ctx->evcon;
+ EVUTIL_ASSERT(evcon != NULL);
+
+ evhttp_connection_fail_(evcon, EVREQ_HTTP_TIMEOUT);
+}
+
+/*
+ * frees potential meta data associated with a request.
+ */
+
+static void
+evrpc_meta_data_free(struct evrpc_meta_list *meta_data)
+{
+ struct evrpc_meta *entry;
+ EVUTIL_ASSERT(meta_data != NULL);
+
+ while ((entry = TAILQ_FIRST(meta_data)) != NULL) {
+ TAILQ_REMOVE(meta_data, entry, next);
+ mm_free(entry->key);
+ mm_free(entry->data);
+ mm_free(entry);
+ }
+}
+
+static struct evrpc_hook_meta *
+evrpc_hook_meta_new_(void)
+{
+ struct evrpc_hook_meta *ctx;
+ ctx = mm_malloc(sizeof(struct evrpc_hook_meta));
+ EVUTIL_ASSERT(ctx != NULL);
+
+ TAILQ_INIT(&ctx->meta_data);
+ ctx->evcon = NULL;
+
+ return (ctx);
+}
+
+static void
+evrpc_hook_associate_meta_(struct evrpc_hook_meta **pctx,
+ struct evhttp_connection *evcon)
+{
+ struct evrpc_hook_meta *ctx = *pctx;
+ if (ctx == NULL)
+ *pctx = ctx = evrpc_hook_meta_new_();
+ ctx->evcon = evcon;
+}
+
+static void
+evrpc_hook_context_free_(struct evrpc_hook_meta *ctx)
+{
+ evrpc_meta_data_free(&ctx->meta_data);
+ mm_free(ctx);
+}
+
+/* Adds meta data */
+void
+evrpc_hook_add_meta(void *ctx, const char *key,
+ const void *data, size_t data_size)
+{
+ struct evrpc_request_wrapper *req = ctx;
+ struct evrpc_hook_meta *store = NULL;
+ struct evrpc_meta *meta = NULL;
+
+ if ((store = req->hook_meta) == NULL)
+ store = req->hook_meta = evrpc_hook_meta_new_();
+
+ meta = mm_malloc(sizeof(struct evrpc_meta));
+ EVUTIL_ASSERT(meta != NULL);
+ meta->key = mm_strdup(key);
+ EVUTIL_ASSERT(meta->key != NULL);
+ meta->data_size = data_size;
+ meta->data = mm_malloc(data_size);
+ EVUTIL_ASSERT(meta->data != NULL);
+ memcpy(meta->data, data, data_size);
+
+ TAILQ_INSERT_TAIL(&store->meta_data, meta, next);
+}
+
+int
+evrpc_hook_find_meta(void *ctx, const char *key, void **data, size_t *data_size)
+{
+ struct evrpc_request_wrapper *req = ctx;
+ struct evrpc_meta *meta = NULL;
+
+ if (req->hook_meta == NULL)
+ return (-1);
+
+ TAILQ_FOREACH(meta, &req->hook_meta->meta_data, next) {
+ if (strcmp(meta->key, key) == 0) {
+ *data = meta->data;
+ *data_size = meta->data_size;
+ return (0);
+ }
+ }
+
+ return (-1);
+}
+
+struct evhttp_connection *
+evrpc_hook_get_connection(void *ctx)
+{
+ struct evrpc_request_wrapper *req = ctx;
+ return (req->hook_meta != NULL ? req->hook_meta->evcon : NULL);
+}
+
+int
+evrpc_send_request_generic(struct evrpc_pool *pool,
+ void *request, void *reply,
+ void (*cb)(struct evrpc_status *, void *, void *, void *),
+ void *cb_arg,
+ const char *rpcname,
+ void (*req_marshal)(struct evbuffer *, void *),
+ void (*rpl_clear)(void *),
+ int (*rpl_unmarshal)(void *, struct evbuffer *))
+{
+ struct evrpc_status status;
+ struct evrpc_request_wrapper *ctx;
+ ctx = evrpc_make_request_ctx(pool, request, reply,
+ rpcname, req_marshal, rpl_clear, rpl_unmarshal, cb, cb_arg);
+ if (ctx == NULL)
+ goto error;
+ return (evrpc_make_request(ctx));
+error:
+ memset(&status, 0, sizeof(status));
+ status.error = EVRPC_STATUS_ERR_UNSTARTED;
+ (*(cb))(&status, request, reply, cb_arg);
+ return (-1);
+}
+
+/** Takes a request object and fills it in with the right magic */
+static struct evrpc *
+evrpc_register_object(const char *name,
+ void *(*req_new)(void*), void *req_new_arg, void (*req_free)(void *),
+ int (*req_unmarshal)(void *, struct evbuffer *),
+ void *(*rpl_new)(void*), void *rpl_new_arg, void (*rpl_free)(void *),
+ int (*rpl_complete)(void *),
+ void (*rpl_marshal)(struct evbuffer *, void *))
+{
+ struct evrpc* rpc = (struct evrpc *)mm_calloc(1, sizeof(struct evrpc));
+ if (rpc == NULL)
+ return (NULL);
+ rpc->uri = mm_strdup(name);
+ if (rpc->uri == NULL) {
+ mm_free(rpc);
+ return (NULL);
+ }
+ rpc->request_new = req_new;
+ rpc->request_new_arg = req_new_arg;
+ rpc->request_free = req_free;
+ rpc->request_unmarshal = req_unmarshal;
+ rpc->reply_new = rpl_new;
+ rpc->reply_new_arg = rpl_new_arg;
+ rpc->reply_free = rpl_free;
+ rpc->reply_complete = rpl_complete;
+ rpc->reply_marshal = rpl_marshal;
+ return (rpc);
+}
+
+int
+evrpc_register_generic(struct evrpc_base *base, const char *name,
+ void (*callback)(struct evrpc_req_generic *, void *), void *cbarg,
+ void *(*req_new)(void *), void *req_new_arg, void (*req_free)(void *),
+ int (*req_unmarshal)(void *, struct evbuffer *),
+ void *(*rpl_new)(void *), void *rpl_new_arg, void (*rpl_free)(void *),
+ int (*rpl_complete)(void *),
+ void (*rpl_marshal)(struct evbuffer *, void *))
+{
+ struct evrpc* rpc =
+ evrpc_register_object(name, req_new, req_new_arg, req_free, req_unmarshal,
+ rpl_new, rpl_new_arg, rpl_free, rpl_complete, rpl_marshal);
+ if (rpc == NULL)
+ return (-1);
+ evrpc_register_rpc(base, rpc,
+ (void (*)(struct evrpc_req_generic*, void *))callback, cbarg);
+ return (0);
+}
+
+/** accessors for obscure and undocumented functionality */
+struct evrpc_pool *
+evrpc_request_get_pool(struct evrpc_request_wrapper *ctx)
+{
+ return (ctx->pool);
+}
+
+void
+evrpc_request_set_pool(struct evrpc_request_wrapper *ctx,
+ struct evrpc_pool *pool)
+{
+ ctx->pool = pool;
+}
+
+void
+evrpc_request_set_cb(struct evrpc_request_wrapper *ctx,
+ void (*cb)(struct evrpc_status*, void *request, void *reply, void *arg),
+ void *cb_arg)
+{
+ ctx->cb = cb;
+ ctx->cb_arg = cb_arg;
+}
diff --git a/libs/libevent/src/evsignal-internal.h b/libs/libevent/src/evsignal-internal.h
new file mode 100644
index 0000000000..5cff03b525
--- /dev/null
+++ b/libs/libevent/src/evsignal-internal.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVSIGNAL_INTERNAL_H_INCLUDED_
+#define EVSIGNAL_INTERNAL_H_INCLUDED_
+
+#ifndef evutil_socket_t
+#include "event2/util.h"
+#endif
+#include <signal.h>
+
+typedef void (*ev_sighandler_t)(int);
+
+/* Data structure for the default signal-handling implementation in signal.c
+ */
+struct evsig_info {
+ /* Event watching ev_signal_pair[1] */
+ struct event ev_signal;
+ /* Socketpair used to send notifications from the signal handler */
+ evutil_socket_t ev_signal_pair[2];
+ /* True iff we've added the ev_signal event yet. */
+ int ev_signal_added;
+ /* Count of the number of signals we're currently watching. */
+ int ev_n_signals_added;
+
+ /* Array of previous signal handler objects before Libevent started
+ * messing with them. Used to restore old signal handlers. */
+#ifdef EVENT__HAVE_SIGACTION
+ struct sigaction **sh_old;
+#else
+ ev_sighandler_t **sh_old;
+#endif
+ /* Size of sh_old. */
+ int sh_old_max;
+};
+int evsig_init_(struct event_base *);
+void evsig_dealloc_(struct event_base *);
+
+void evsig_set_base_(struct event_base *base);
+void evsig_free_globals_(void);
+
+#endif /* EVSIGNAL_INTERNAL_H_INCLUDED_ */
diff --git a/libs/libevent/src/evthread-internal.h b/libs/libevent/src/evthread-internal.h
new file mode 100644
index 0000000000..efdecf81e7
--- /dev/null
+++ b/libs/libevent/src/evthread-internal.h
@@ -0,0 +1,392 @@
+/*
+ * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef EVTHREAD_INTERNAL_H_INCLUDED_
+#define EVTHREAD_INTERNAL_H_INCLUDED_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include "event2/thread.h"
+#include "util-internal.h"
+
+struct event_base;
+
+#ifndef _WIN32
+/* On Windows, the way we currently make DLLs, it's not allowed for us to
+ * have shared global structures. Thus, we only do the direct-call-to-function
+ * code path if we know that the local shared library system supports it.
+ */
+#define EVTHREAD_EXPOSE_STRUCTS
+#endif
+
+#if ! defined(EVENT__DISABLE_THREAD_SUPPORT) && defined(EVTHREAD_EXPOSE_STRUCTS)
+/* Global function pointers to lock-related functions. NULL if locking isn't
+ enabled. */
+extern struct evthread_lock_callbacks evthread_lock_fns_;
+extern struct evthread_condition_callbacks evthread_cond_fns_;
+extern unsigned long (*evthread_id_fn_)(void);
+extern int evthread_lock_debugging_enabled_;
+
+/** Return the ID of the current thread, or 1 if threading isn't enabled. */
+#define EVTHREAD_GET_ID() \
+ (evthread_id_fn_ ? evthread_id_fn_() : 1)
+
+/** Return true iff we're in the thread that is currently (or most recently)
+ * running a given event_base's loop. Requires lock. */
+#define EVBASE_IN_THREAD(base) \
+ (evthread_id_fn_ == NULL || \
+ (base)->th_owner_id == evthread_id_fn_())
+
+/** Return true iff we need to notify the base's main thread about changes to
+ * its state, because it's currently running the main loop in another
+ * thread. Requires lock. */
+#define EVBASE_NEED_NOTIFY(base) \
+ (evthread_id_fn_ != NULL && \
+ (base)->running_loop && \
+ (base)->th_owner_id != evthread_id_fn_())
+
+/** Allocate a new lock, and store it in lockvar, a void*. Sets lockvar to
+ NULL if locking is not enabled. */
+#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \
+ ((lockvar) = evthread_lock_fns_.alloc ? \
+ evthread_lock_fns_.alloc(locktype) : NULL)
+
+/** Free a given lock, if it is present and locking is enabled. */
+#define EVTHREAD_FREE_LOCK(lockvar, locktype) \
+ do { \
+ void *lock_tmp_ = (lockvar); \
+ if (lock_tmp_ && evthread_lock_fns_.free) \
+ evthread_lock_fns_.free(lock_tmp_, (locktype)); \
+ } while (0)
+
+/** Acquire a lock. */
+#define EVLOCK_LOCK(lockvar,mode) \
+ do { \
+ if (lockvar) \
+ evthread_lock_fns_.lock(mode, lockvar); \
+ } while (0)
+
+/** Release a lock */
+#define EVLOCK_UNLOCK(lockvar,mode) \
+ do { \
+ if (lockvar) \
+ evthread_lock_fns_.unlock(mode, lockvar); \
+ } while (0)
+
+/** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */
+#define EVLOCK_SORTLOCKS_(lockvar1, lockvar2) \
+ do { \
+ if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \
+ void *tmp = lockvar1; \
+ lockvar1 = lockvar2; \
+ lockvar2 = tmp; \
+ } \
+ } while (0)
+
+/** Lock an event_base, if it is set up for locking. Acquires the lock
+ in the base structure whose field is named 'lockvar'. */
+#define EVBASE_ACQUIRE_LOCK(base, lockvar) do { \
+ EVLOCK_LOCK((base)->lockvar, 0); \
+ } while (0)
+
+/** Unlock an event_base, if it is set up for locking. */
+#define EVBASE_RELEASE_LOCK(base, lockvar) do { \
+ EVLOCK_UNLOCK((base)->lockvar, 0); \
+ } while (0)
+
+/** If lock debugging is enabled, and lock is non-null, assert that 'lock' is
+ * locked and held by us. */
+#define EVLOCK_ASSERT_LOCKED(lock) \
+ do { \
+ if ((lock) && evthread_lock_debugging_enabled_) { \
+ EVUTIL_ASSERT(evthread_is_debug_lock_held_(lock)); \
+ } \
+ } while (0)
+
+/** Try to grab the lock for 'lockvar' without blocking, and return 1 if we
+ * manage to get it. */
+static inline int EVLOCK_TRY_LOCK_(void *lock);
+static inline int
+EVLOCK_TRY_LOCK_(void *lock)
+{
+ if (lock && evthread_lock_fns_.lock) {
+ int r = evthread_lock_fns_.lock(EVTHREAD_TRY, lock);
+ return !r;
+ } else {
+ /* Locking is disabled either globally or for this thing;
+ * of course we count as having the lock. */
+ return 1;
+ }
+}
+
+/** Allocate a new condition variable and store it in the void *, condvar */
+#define EVTHREAD_ALLOC_COND(condvar) \
+ do { \
+ (condvar) = evthread_cond_fns_.alloc_condition ? \
+ evthread_cond_fns_.alloc_condition(0) : NULL; \
+ } while (0)
+/** Deallocate and free a condition variable in condvar */
+#define EVTHREAD_FREE_COND(cond) \
+ do { \
+ if (cond) \
+ evthread_cond_fns_.free_condition((cond)); \
+ } while (0)
+/** Signal one thread waiting on cond */
+#define EVTHREAD_COND_SIGNAL(cond) \
+ ( (cond) ? evthread_cond_fns_.signal_condition((cond), 0) : 0 )
+/** Signal all threads waiting on cond */
+#define EVTHREAD_COND_BROADCAST(cond) \
+ ( (cond) ? evthread_cond_fns_.signal_condition((cond), 1) : 0 )
+/** Wait until the condition 'cond' is signalled. Must be called while
+ * holding 'lock'. The lock will be released until the condition is
+ * signalled, at which point it will be acquired again. Returns 0 for
+ * success, -1 for failure. */
+#define EVTHREAD_COND_WAIT(cond, lock) \
+ ( (cond) ? evthread_cond_fns_.wait_condition((cond), (lock), NULL) : 0 )
+/** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1
+ * on timeout. */
+#define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \
+ ( (cond) ? evthread_cond_fns_.wait_condition((cond), (lock), (tv)) : 0 )
+
+/** True iff locking functions have been configured. */
+#define EVTHREAD_LOCKING_ENABLED() \
+ (evthread_lock_fns_.lock != NULL)
+
+#elif ! defined(EVENT__DISABLE_THREAD_SUPPORT)
+
+unsigned long evthreadimpl_get_id_(void);
+int evthreadimpl_is_lock_debugging_enabled_(void);
+void *evthreadimpl_lock_alloc_(unsigned locktype);
+void evthreadimpl_lock_free_(void *lock, unsigned locktype);
+int evthreadimpl_lock_lock_(unsigned mode, void *lock);
+int evthreadimpl_lock_unlock_(unsigned mode, void *lock);
+void *evthreadimpl_cond_alloc_(unsigned condtype);
+void evthreadimpl_cond_free_(void *cond);
+int evthreadimpl_cond_signal_(void *cond, int broadcast);
+int evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv);
+int evthreadimpl_locking_enabled_(void);
+
+#define EVTHREAD_GET_ID() evthreadimpl_get_id_()
+#define EVBASE_IN_THREAD(base) \
+ ((base)->th_owner_id == evthreadimpl_get_id_())
+#define EVBASE_NEED_NOTIFY(base) \
+ ((base)->running_loop && \
+ ((base)->th_owner_id != evthreadimpl_get_id_()))
+
+#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \
+ ((lockvar) = evthreadimpl_lock_alloc_(locktype))
+
+#define EVTHREAD_FREE_LOCK(lockvar, locktype) \
+ do { \
+ void *lock_tmp_ = (lockvar); \
+ if (lock_tmp_) \
+ evthreadimpl_lock_free_(lock_tmp_, (locktype)); \
+ } while (0)
+
+/** Acquire a lock. */
+#define EVLOCK_LOCK(lockvar,mode) \
+ do { \
+ if (lockvar) \
+ evthreadimpl_lock_lock_(mode, lockvar); \
+ } while (0)
+
+/** Release a lock */
+#define EVLOCK_UNLOCK(lockvar,mode) \
+ do { \
+ if (lockvar) \
+ evthreadimpl_lock_unlock_(mode, lockvar); \
+ } while (0)
+
+/** Lock an event_base, if it is set up for locking. Acquires the lock
+ in the base structure whose field is named 'lockvar'. */
+#define EVBASE_ACQUIRE_LOCK(base, lockvar) do { \
+ EVLOCK_LOCK((base)->lockvar, 0); \
+ } while (0)
+
+/** Unlock an event_base, if it is set up for locking. */
+#define EVBASE_RELEASE_LOCK(base, lockvar) do { \
+ EVLOCK_UNLOCK((base)->lockvar, 0); \
+ } while (0)
+
+/** If lock debugging is enabled, and lock is non-null, assert that 'lock' is
+ * locked and held by us. */
+#define EVLOCK_ASSERT_LOCKED(lock) \
+ do { \
+ if ((lock) && evthreadimpl_is_lock_debugging_enabled_()) { \
+ EVUTIL_ASSERT(evthread_is_debug_lock_held_(lock)); \
+ } \
+ } while (0)
+
+/** Try to grab the lock for 'lockvar' without blocking, and return 1 if we
+ * manage to get it. */
+static inline int EVLOCK_TRY_LOCK_(void *lock);
+static inline int
+EVLOCK_TRY_LOCK_(void *lock)
+{
+ if (lock) {
+ int r = evthreadimpl_lock_lock_(EVTHREAD_TRY, lock);
+ return !r;
+ } else {
+ /* Locking is disabled either globally or for this thing;
+ * of course we count as having the lock. */
+ return 1;
+ }
+}
+
+/** Allocate a new condition variable and store it in the void *, condvar */
+#define EVTHREAD_ALLOC_COND(condvar) \
+ do { \
+ (condvar) = evthreadimpl_cond_alloc_(0); \
+ } while (0)
+/** Deallocate and free a condition variable in condvar */
+#define EVTHREAD_FREE_COND(cond) \
+ do { \
+ if (cond) \
+ evthreadimpl_cond_free_((cond)); \
+ } while (0)
+/** Signal one thread waiting on cond */
+#define EVTHREAD_COND_SIGNAL(cond) \
+ ( (cond) ? evthreadimpl_cond_signal_((cond), 0) : 0 )
+/** Signal all threads waiting on cond */
+#define EVTHREAD_COND_BROADCAST(cond) \
+ ( (cond) ? evthreadimpl_cond_signal_((cond), 1) : 0 )
+/** Wait until the condition 'cond' is signalled. Must be called while
+ * holding 'lock'. The lock will be released until the condition is
+ * signalled, at which point it will be acquired again. Returns 0 for
+ * success, -1 for failure. */
+#define EVTHREAD_COND_WAIT(cond, lock) \
+ ( (cond) ? evthreadimpl_cond_wait_((cond), (lock), NULL) : 0 )
+/** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1
+ * on timeout. */
+#define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \
+ ( (cond) ? evthreadimpl_cond_wait_((cond), (lock), (tv)) : 0 )
+
+#define EVTHREAD_LOCKING_ENABLED() \
+ (evthreadimpl_locking_enabled_())
+
+#else /* EVENT__DISABLE_THREAD_SUPPORT */
+
+#define EVTHREAD_GET_ID() 1
+#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) EVUTIL_NIL_STMT_
+#define EVTHREAD_FREE_LOCK(lockvar, locktype) EVUTIL_NIL_STMT_
+
+#define EVLOCK_LOCK(lockvar, mode) EVUTIL_NIL_STMT_
+#define EVLOCK_UNLOCK(lockvar, mode) EVUTIL_NIL_STMT_
+#define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) EVUTIL_NIL_STMT_
+#define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) EVUTIL_NIL_STMT_
+
+#define EVBASE_IN_THREAD(base) 1
+#define EVBASE_NEED_NOTIFY(base) 0
+#define EVBASE_ACQUIRE_LOCK(base, lock) EVUTIL_NIL_STMT_
+#define EVBASE_RELEASE_LOCK(base, lock) EVUTIL_NIL_STMT_
+#define EVLOCK_ASSERT_LOCKED(lock) EVUTIL_NIL_STMT_
+
+#define EVLOCK_TRY_LOCK_(lock) 1
+
+#define EVTHREAD_ALLOC_COND(condvar) EVUTIL_NIL_STMT_
+#define EVTHREAD_FREE_COND(cond) EVUTIL_NIL_STMT_
+#define EVTHREAD_COND_SIGNAL(cond) EVUTIL_NIL_STMT_
+#define EVTHREAD_COND_BROADCAST(cond) EVUTIL_NIL_STMT_
+#define EVTHREAD_COND_WAIT(cond, lock) EVUTIL_NIL_STMT_
+#define EVTHREAD_COND_WAIT_TIMED(cond, lock, howlong) EVUTIL_NIL_STMT_
+
+#define EVTHREAD_LOCKING_ENABLED() 0
+
+#endif
+
+/* This code is shared between both lock impls */
+#if ! defined(EVENT__DISABLE_THREAD_SUPPORT)
+/** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */
+#define EVLOCK_SORTLOCKS_(lockvar1, lockvar2) \
+ do { \
+ if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \
+ void *tmp = lockvar1; \
+ lockvar1 = lockvar2; \
+ lockvar2 = tmp; \
+ } \
+ } while (0)
+
+/** Acquire both lock1 and lock2. Always allocates locks in the same order,
+ * so that two threads locking two locks with LOCK2 will not deadlock. */
+#define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) \
+ do { \
+ void *lock1_tmplock_ = (lock1); \
+ void *lock2_tmplock_ = (lock2); \
+ EVLOCK_SORTLOCKS_(lock1_tmplock_,lock2_tmplock_); \
+ EVLOCK_LOCK(lock1_tmplock_,mode1); \
+ if (lock2_tmplock_ != lock1_tmplock_) \
+ EVLOCK_LOCK(lock2_tmplock_,mode2); \
+ } while (0)
+/** Release both lock1 and lock2. */
+#define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) \
+ do { \
+ void *lock1_tmplock_ = (lock1); \
+ void *lock2_tmplock_ = (lock2); \
+ EVLOCK_SORTLOCKS_(lock1_tmplock_,lock2_tmplock_); \
+ if (lock2_tmplock_ != lock1_tmplock_) \
+ EVLOCK_UNLOCK(lock2_tmplock_,mode2); \
+ EVLOCK_UNLOCK(lock1_tmplock_,mode1); \
+ } while (0)
+
+int evthread_is_debug_lock_held_(void *lock);
+void *evthread_debug_get_real_lock_(void *lock);
+
+void *evthread_setup_global_lock_(void *lock_, unsigned locktype,
+ int enable_locks);
+
+#define EVTHREAD_SETUP_GLOBAL_LOCK(lockvar, locktype) \
+ do { \
+ lockvar = evthread_setup_global_lock_(lockvar, \
+ (locktype), enable_locks); \
+ if (!lockvar) { \
+ event_warn("Couldn't allocate %s", #lockvar); \
+ return -1; \
+ } \
+ } while (0);
+
+int event_global_setup_locks_(const int enable_locks);
+int evsig_global_setup_locks_(const int enable_locks);
+int evutil_global_setup_locks_(const int enable_locks);
+int evutil_secure_rng_global_setup_locks_(const int enable_locks);
+
+/** Return current evthread_lock_callbacks */
+struct evthread_lock_callbacks *evthread_get_lock_callbacks(void);
+/** Return current evthread_condition_callbacks */
+struct evthread_condition_callbacks *evthread_get_condition_callbacks(void);
+/** Disable locking for internal usage (like global shutdown) */
+void evthreadimpl_disable_lock_debugging_(void);
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVTHREAD_INTERNAL_H_INCLUDED_ */
diff --git a/libs/libevent/src/evthread.c b/libs/libevent/src/evthread.c
new file mode 100644
index 0000000000..f3f1eddc89
--- /dev/null
+++ b/libs/libevent/src/evthread.c
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+
+#include "event2/thread.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "log-internal.h"
+#include "mm-internal.h"
+#include "util-internal.h"
+#include "evthread-internal.h"
+
+#ifdef EVTHREAD_EXPOSE_STRUCTS
+#define GLOBAL
+#else
+#define GLOBAL static
+#endif
+
+#ifndef EVENT__DISABLE_DEBUG_MODE
+extern int event_debug_created_threadable_ctx_;
+extern int event_debug_mode_on_;
+#endif
+
+/* globals */
+GLOBAL int evthread_lock_debugging_enabled_ = 0;
+GLOBAL struct evthread_lock_callbacks evthread_lock_fns_ = {
+ 0, 0, NULL, NULL, NULL, NULL
+};
+GLOBAL unsigned long (*evthread_id_fn_)(void) = NULL;
+GLOBAL struct evthread_condition_callbacks evthread_cond_fns_ = {
+ 0, NULL, NULL, NULL, NULL
+};
+
+/* Used for debugging */
+static struct evthread_lock_callbacks original_lock_fns_ = {
+ 0, 0, NULL, NULL, NULL, NULL
+};
+static struct evthread_condition_callbacks original_cond_fns_ = {
+ 0, NULL, NULL, NULL, NULL
+};
+
+void
+evthread_set_id_callback(unsigned long (*id_fn)(void))
+{
+ evthread_id_fn_ = id_fn;
+}
+
+struct evthread_lock_callbacks *evthread_get_lock_callbacks()
+{
+ return evthread_lock_debugging_enabled_
+ ? &original_lock_fns_ : &evthread_lock_fns_;
+}
+struct evthread_condition_callbacks *evthread_get_condition_callbacks()
+{
+ return evthread_lock_debugging_enabled_
+ ? &original_cond_fns_ : &evthread_cond_fns_;
+}
+void evthreadimpl_disable_lock_debugging_(void)
+{
+ evthread_lock_debugging_enabled_ = 0;
+}
+
+int
+evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
+{
+ struct evthread_lock_callbacks *target = evthread_get_lock_callbacks();
+
+#ifndef EVENT__DISABLE_DEBUG_MODE
+ if (event_debug_mode_on_) {
+ if (event_debug_created_threadable_ctx_) {
+ event_errx(1, "evthread initialization must be called BEFORE anything else!");
+ }
+ }
+#endif
+
+ if (!cbs) {
+ if (target->alloc)
+ event_warnx("Trying to disable lock functions after "
+ "they have been set up will probaby not work.");
+ memset(target, 0, sizeof(evthread_lock_fns_));
+ return 0;
+ }
+ if (target->alloc) {
+ /* Uh oh; we already had locking callbacks set up.*/
+ if (target->lock_api_version == cbs->lock_api_version &&
+ target->supported_locktypes == cbs->supported_locktypes &&
+ target->alloc == cbs->alloc &&
+ target->free == cbs->free &&
+ target->lock == cbs->lock &&
+ target->unlock == cbs->unlock) {
+ /* no change -- allow this. */
+ return 0;
+ }
+ event_warnx("Can't change lock callbacks once they have been "
+ "initialized.");
+ return -1;
+ }
+ if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
+ memcpy(target, cbs, sizeof(evthread_lock_fns_));
+ return event_global_setup_locks_(1);
+ } else {
+ return -1;
+ }
+}
+
+int
+evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs)
+{
+ struct evthread_condition_callbacks *target = evthread_get_condition_callbacks();
+
+#ifndef EVENT__DISABLE_DEBUG_MODE
+ if (event_debug_mode_on_) {
+ if (event_debug_created_threadable_ctx_) {
+ event_errx(1, "evthread initialization must be called BEFORE anything else!");
+ }
+ }
+#endif
+
+ if (!cbs) {
+ if (target->alloc_condition)
+ event_warnx("Trying to disable condition functions "
+ "after they have been set up will probaby not "
+ "work.");
+ memset(target, 0, sizeof(evthread_cond_fns_));
+ return 0;
+ }
+ if (target->alloc_condition) {
+ /* Uh oh; we already had condition callbacks set up.*/
+ if (target->condition_api_version == cbs->condition_api_version &&
+ target->alloc_condition == cbs->alloc_condition &&
+ target->free_condition == cbs->free_condition &&
+ target->signal_condition == cbs->signal_condition &&
+ target->wait_condition == cbs->wait_condition) {
+ /* no change -- allow this. */
+ return 0;
+ }
+ event_warnx("Can't change condition callbacks once they "
+ "have been initialized.");
+ return -1;
+ }
+ if (cbs->alloc_condition && cbs->free_condition &&
+ cbs->signal_condition && cbs->wait_condition) {
+ memcpy(target, cbs, sizeof(evthread_cond_fns_));
+ }
+ if (evthread_lock_debugging_enabled_) {
+ evthread_cond_fns_.alloc_condition = cbs->alloc_condition;
+ evthread_cond_fns_.free_condition = cbs->free_condition;
+ evthread_cond_fns_.signal_condition = cbs->signal_condition;
+ }
+ return 0;
+}
+
+#define DEBUG_LOCK_SIG 0xdeb0b10c
+
+struct debug_lock {
+ unsigned signature;
+ unsigned locktype;
+ unsigned long held_by;
+ /* XXXX if we ever use read-write locks, we will need a separate
+ * lock to protect count. */
+ int count;
+ void *lock;
+};
+
+static void *
+debug_lock_alloc(unsigned locktype)
+{
+ struct debug_lock *result = mm_malloc(sizeof(struct debug_lock));
+ if (!result)
+ return NULL;
+ if (original_lock_fns_.alloc) {
+ if (!(result->lock = original_lock_fns_.alloc(
+ locktype|EVTHREAD_LOCKTYPE_RECURSIVE))) {
+ mm_free(result);
+ return NULL;
+ }
+ } else {
+ result->lock = NULL;
+ }
+ result->signature = DEBUG_LOCK_SIG;
+ result->locktype = locktype;
+ result->count = 0;
+ result->held_by = 0;
+ return result;
+}
+
+static void
+debug_lock_free(void *lock_, unsigned locktype)
+{
+ struct debug_lock *lock = lock_;
+ EVUTIL_ASSERT(lock->count == 0);
+ EVUTIL_ASSERT(locktype == lock->locktype);
+ EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
+ if (original_lock_fns_.free) {
+ original_lock_fns_.free(lock->lock,
+ lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
+ }
+ lock->lock = NULL;
+ lock->count = -100;
+ lock->signature = 0x12300fda;
+ mm_free(lock);
+}
+
+static void
+evthread_debug_lock_mark_locked(unsigned mode, struct debug_lock *lock)
+{
+ EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
+ ++lock->count;
+ if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE))
+ EVUTIL_ASSERT(lock->count == 1);
+ if (evthread_id_fn_) {
+ unsigned long me;
+ me = evthread_id_fn_();
+ if (lock->count > 1)
+ EVUTIL_ASSERT(lock->held_by == me);
+ lock->held_by = me;
+ }
+}
+
+static int
+debug_lock_lock(unsigned mode, void *lock_)
+{
+ struct debug_lock *lock = lock_;
+ int res = 0;
+ if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
+ EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
+ else
+ EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
+ if (original_lock_fns_.lock)
+ res = original_lock_fns_.lock(mode, lock->lock);
+ if (!res) {
+ evthread_debug_lock_mark_locked(mode, lock);
+ }
+ return res;
+}
+
+static void
+evthread_debug_lock_mark_unlocked(unsigned mode, struct debug_lock *lock)
+{
+ EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
+ if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
+ EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
+ else
+ EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
+ if (evthread_id_fn_) {
+ unsigned long me;
+ me = evthread_id_fn_();
+ EVUTIL_ASSERT(lock->held_by == me);
+ if (lock->count == 1)
+ lock->held_by = 0;
+ }
+ --lock->count;
+ EVUTIL_ASSERT(lock->count >= 0);
+}
+
+static int
+debug_lock_unlock(unsigned mode, void *lock_)
+{
+ struct debug_lock *lock = lock_;
+ int res = 0;
+ evthread_debug_lock_mark_unlocked(mode, lock);
+ if (original_lock_fns_.unlock)
+ res = original_lock_fns_.unlock(mode, lock->lock);
+ return res;
+}
+
+static int
+debug_cond_wait(void *cond_, void *lock_, const struct timeval *tv)
+{
+ int r;
+ struct debug_lock *lock = lock_;
+ EVUTIL_ASSERT(lock);
+ EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
+ EVLOCK_ASSERT_LOCKED(lock_);
+ evthread_debug_lock_mark_unlocked(0, lock);
+ r = original_cond_fns_.wait_condition(cond_, lock->lock, tv);
+ evthread_debug_lock_mark_locked(0, lock);
+ return r;
+}
+
+/* misspelled version for backward compatibility */
+void
+evthread_enable_lock_debuging(void)
+{
+ evthread_enable_lock_debugging();
+}
+
+void
+evthread_enable_lock_debugging(void)
+{
+ struct evthread_lock_callbacks cbs = {
+ EVTHREAD_LOCK_API_VERSION,
+ EVTHREAD_LOCKTYPE_RECURSIVE,
+ debug_lock_alloc,
+ debug_lock_free,
+ debug_lock_lock,
+ debug_lock_unlock
+ };
+ if (evthread_lock_debugging_enabled_)
+ return;
+ memcpy(&original_lock_fns_, &evthread_lock_fns_,
+ sizeof(struct evthread_lock_callbacks));
+ memcpy(&evthread_lock_fns_, &cbs,
+ sizeof(struct evthread_lock_callbacks));
+
+ memcpy(&original_cond_fns_, &evthread_cond_fns_,
+ sizeof(struct evthread_condition_callbacks));
+ evthread_cond_fns_.wait_condition = debug_cond_wait;
+ evthread_lock_debugging_enabled_ = 1;
+
+ /* XXX return value should get checked. */
+ event_global_setup_locks_(0);
+}
+
+int
+evthread_is_debug_lock_held_(void *lock_)
+{
+ struct debug_lock *lock = lock_;
+ if (! lock->count)
+ return 0;
+ if (evthread_id_fn_) {
+ unsigned long me = evthread_id_fn_();
+ if (lock->held_by != me)
+ return 0;
+ }
+ return 1;
+}
+
+void *
+evthread_debug_get_real_lock_(void *lock_)
+{
+ struct debug_lock *lock = lock_;
+ return lock->lock;
+}
+
+void *
+evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
+{
+ /* there are four cases here:
+ 1) we're turning on debugging; locking is not on.
+ 2) we're turning on debugging; locking is on.
+ 3) we're turning on locking; debugging is not on.
+ 4) we're turning on locking; debugging is on. */
+
+ if (!enable_locks && original_lock_fns_.alloc == NULL) {
+ /* Case 1: allocate a debug lock. */
+ EVUTIL_ASSERT(lock_ == NULL);
+ return debug_lock_alloc(locktype);
+ } else if (!enable_locks && original_lock_fns_.alloc != NULL) {
+ /* Case 2: wrap the lock in a debug lock. */
+ struct debug_lock *lock;
+ EVUTIL_ASSERT(lock_ != NULL);
+
+ if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) {
+ /* We can't wrap it: We need a recursive lock */
+ original_lock_fns_.free(lock_, locktype);
+ return debug_lock_alloc(locktype);
+ }
+ lock = mm_malloc(sizeof(struct debug_lock));
+ if (!lock) {
+ original_lock_fns_.free(lock_, locktype);
+ return NULL;
+ }
+ lock->lock = lock_;
+ lock->locktype = locktype;
+ lock->count = 0;
+ lock->held_by = 0;
+ return lock;
+ } else if (enable_locks && ! evthread_lock_debugging_enabled_) {
+ /* Case 3: allocate a regular lock */
+ EVUTIL_ASSERT(lock_ == NULL);
+ return evthread_lock_fns_.alloc(locktype);
+ } else {
+ /* Case 4: Fill in a debug lock with a real lock */
+ struct debug_lock *lock = lock_ ? lock_ : debug_lock_alloc(locktype);
+ EVUTIL_ASSERT(enable_locks &&
+ evthread_lock_debugging_enabled_);
+ EVUTIL_ASSERT(lock->locktype == locktype);
+ if (!lock->lock) {
+ lock->lock = original_lock_fns_.alloc(
+ locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
+ if (!lock->lock) {
+ lock->count = -200;
+ mm_free(lock);
+ return NULL;
+ }
+ }
+ return lock;
+ }
+}
+
+
+#ifndef EVTHREAD_EXPOSE_STRUCTS
+unsigned long
+evthreadimpl_get_id_()
+{
+ return evthread_id_fn_ ? evthread_id_fn_() : 1;
+}
+void *
+evthreadimpl_lock_alloc_(unsigned locktype)
+{
+#ifndef EVENT__DISABLE_DEBUG_MODE
+ if (event_debug_mode_on_) {
+ event_debug_created_threadable_ctx_ = 1;
+ }
+#endif
+
+ return evthread_lock_fns_.alloc ?
+ evthread_lock_fns_.alloc(locktype) : NULL;
+}
+void
+evthreadimpl_lock_free_(void *lock, unsigned locktype)
+{
+ if (evthread_lock_fns_.free)
+ evthread_lock_fns_.free(lock, locktype);
+}
+int
+evthreadimpl_lock_lock_(unsigned mode, void *lock)
+{
+ if (evthread_lock_fns_.lock)
+ return evthread_lock_fns_.lock(mode, lock);
+ else
+ return 0;
+}
+int
+evthreadimpl_lock_unlock_(unsigned mode, void *lock)
+{
+ if (evthread_lock_fns_.unlock)
+ return evthread_lock_fns_.unlock(mode, lock);
+ else
+ return 0;
+}
+void *
+evthreadimpl_cond_alloc_(unsigned condtype)
+{
+#ifndef EVENT__DISABLE_DEBUG_MODE
+ if (event_debug_mode_on_) {
+ event_debug_created_threadable_ctx_ = 1;
+ }
+#endif
+
+ return evthread_cond_fns_.alloc_condition ?
+ evthread_cond_fns_.alloc_condition(condtype) : NULL;
+}
+void
+evthreadimpl_cond_free_(void *cond)
+{
+ if (evthread_cond_fns_.free_condition)
+ evthread_cond_fns_.free_condition(cond);
+}
+int
+evthreadimpl_cond_signal_(void *cond, int broadcast)
+{
+ if (evthread_cond_fns_.signal_condition)
+ return evthread_cond_fns_.signal_condition(cond, broadcast);
+ else
+ return 0;
+}
+int
+evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv)
+{
+ if (evthread_cond_fns_.wait_condition)
+ return evthread_cond_fns_.wait_condition(cond, lock, tv);
+ else
+ return 0;
+}
+int
+evthreadimpl_is_lock_debugging_enabled_(void)
+{
+ return evthread_lock_debugging_enabled_;
+}
+
+int
+evthreadimpl_locking_enabled_(void)
+{
+ return evthread_lock_fns_.lock != NULL;
+}
+#endif
+
+#endif
diff --git a/libs/libevent/src/evthread_win32.c b/libs/libevent/src/evthread_win32.c
new file mode 100644
index 0000000000..2ec80560a5
--- /dev/null
+++ b/libs/libevent/src/evthread_win32.c
@@ -0,0 +1,341 @@
+/*
+ * Copyright 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef _WIN32
+#ifndef _WIN32_WINNT
+/* Minimum required for InitializeCriticalSectionAndSpinCount */
+#define _WIN32_WINNT 0x0403
+#endif
+#include <winsock2.h>
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#include <sys/locking.h>
+#endif
+
+struct event_base;
+#include "event2/thread.h"
+
+#include "mm-internal.h"
+#include "evthread-internal.h"
+#include "time-internal.h"
+
+#define SPIN_COUNT 2000
+
+static void *
+evthread_win32_lock_create(unsigned locktype)
+{
+ CRITICAL_SECTION *lock = mm_malloc(sizeof(CRITICAL_SECTION));
+ if (!lock)
+ return NULL;
+ if (InitializeCriticalSectionAndSpinCount(lock, SPIN_COUNT) == 0) {
+ mm_free(lock);
+ return NULL;
+ }
+ return lock;
+}
+
+static void
+evthread_win32_lock_free(void *lock_, unsigned locktype)
+{
+ CRITICAL_SECTION *lock = lock_;
+ DeleteCriticalSection(lock);
+ mm_free(lock);
+}
+
+static int
+evthread_win32_lock(unsigned mode, void *lock_)
+{
+ CRITICAL_SECTION *lock = lock_;
+ if ((mode & EVTHREAD_TRY)) {
+ return ! TryEnterCriticalSection(lock);
+ } else {
+ EnterCriticalSection(lock);
+ return 0;
+ }
+}
+
+static int
+evthread_win32_unlock(unsigned mode, void *lock_)
+{
+ CRITICAL_SECTION *lock = lock_;
+ LeaveCriticalSection(lock);
+ return 0;
+}
+
+static unsigned long
+evthread_win32_get_id(void)
+{
+ return (unsigned long) GetCurrentThreadId();
+}
+
+#ifdef WIN32_HAVE_CONDITION_VARIABLES
+static void WINAPI (*InitializeConditionVariable_fn)(PCONDITION_VARIABLE)
+ = NULL;
+static BOOL WINAPI (*SleepConditionVariableCS_fn)(
+ PCONDITION_VARIABLE, PCRITICAL_SECTION, DWORD) = NULL;
+static void WINAPI (*WakeAllConditionVariable_fn)(PCONDITION_VARIABLE) = NULL;
+static void WINAPI (*WakeConditionVariable_fn)(PCONDITION_VARIABLE) = NULL;
+
+static int
+evthread_win32_condvar_init(void)
+{
+ HANDLE lib;
+
+ lib = GetModuleHandle(TEXT("kernel32.dll"));
+ if (lib == NULL)
+ return 0;
+
+#define LOAD(name) \
+ name##_fn = GetProcAddress(lib, #name)
+ LOAD(InitializeConditionVariable);
+ LOAD(SleepConditionVariableCS);
+ LOAD(WakeAllConditionVariable);
+ LOAD(WakeConditionVariable);
+
+ return InitializeConditionVariable_fn && SleepConditionVariableCS_fn &&
+ WakeAllConditionVariable_fn && WakeConditionVariable_fn;
+}
+
+/* XXXX Even if we can build this, we don't necessarily want to: the functions
+ * in question didn't exist before Vista, so we'd better LoadProc them. */
+static void *
+evthread_win32_condvar_alloc(unsigned condflags)
+{
+ CONDITION_VARIABLE *cond = mm_malloc(sizeof(CONDITION_VARIABLE));
+ if (!cond)
+ return NULL;
+ InitializeConditionVariable_fn(cond);
+ return cond;
+}
+
+static void
+evthread_win32_condvar_free(void *cond_)
+{
+ CONDITION_VARIABLE *cond = cond_;
+ /* There doesn't _seem_ to be a cleaup fn here... */
+ mm_free(cond);
+}
+
+static int
+evthread_win32_condvar_signal(void *cond, int broadcast)
+{
+ CONDITION_VARIABLE *cond = cond_;
+ if (broadcast)
+ WakeAllConditionVariable_fn(cond);
+ else
+ WakeConditionVariable_fn(cond);
+ return 0;
+}
+
+static int
+evthread_win32_condvar_wait(void *cond_, void *lock_, const struct timeval *tv)
+{
+ CONDITION_VARIABLE *cond = cond_;
+ CRITICAL_SECTION *lock = lock_;
+ DWORD ms, err;
+ BOOL result;
+
+ if (tv)
+ ms = evutil_tv_to_msec_(tv);
+ else
+ ms = INFINITE;
+ result = SleepConditionVariableCS_fn(cond, lock, ms);
+ if (result) {
+ if (GetLastError() == WAIT_TIMEOUT)
+ return 1;
+ else
+ return -1;
+ } else {
+ return 0;
+ }
+}
+#endif
+
+struct evthread_win32_cond {
+ HANDLE event;
+
+ CRITICAL_SECTION lock;
+ int n_waiting;
+ int n_to_wake;
+ int generation;
+};
+
+static void *
+evthread_win32_cond_alloc(unsigned flags)
+{
+ struct evthread_win32_cond *cond;
+ if (!(cond = mm_malloc(sizeof(struct evthread_win32_cond))))
+ return NULL;
+ if (InitializeCriticalSectionAndSpinCount(&cond->lock, SPIN_COUNT)==0) {
+ mm_free(cond);
+ return NULL;
+ }
+ if ((cond->event = CreateEvent(NULL,TRUE,FALSE,NULL)) == NULL) {
+ DeleteCriticalSection(&cond->lock);
+ mm_free(cond);
+ return NULL;
+ }
+ cond->n_waiting = cond->n_to_wake = cond->generation = 0;
+ return cond;
+}
+
+static void
+evthread_win32_cond_free(void *cond_)
+{
+ struct evthread_win32_cond *cond = cond_;
+ DeleteCriticalSection(&cond->lock);
+ CloseHandle(cond->event);
+ mm_free(cond);
+}
+
+static int
+evthread_win32_cond_signal(void *cond_, int broadcast)
+{
+ struct evthread_win32_cond *cond = cond_;
+ EnterCriticalSection(&cond->lock);
+ if (broadcast)
+ cond->n_to_wake = cond->n_waiting;
+ else
+ ++cond->n_to_wake;
+ cond->generation++;
+ SetEvent(cond->event);
+ LeaveCriticalSection(&cond->lock);
+ return 0;
+}
+
+static int
+evthread_win32_cond_wait(void *cond_, void *lock_, const struct timeval *tv)
+{
+ struct evthread_win32_cond *cond = cond_;
+ CRITICAL_SECTION *lock = lock_;
+ int generation_at_start;
+ int waiting = 1;
+ int result = -1;
+ DWORD ms = INFINITE, ms_orig = INFINITE, startTime, endTime;
+ if (tv)
+ ms_orig = ms = evutil_tv_to_msec_(tv);
+
+ EnterCriticalSection(&cond->lock);
+ ++cond->n_waiting;
+ generation_at_start = cond->generation;
+ LeaveCriticalSection(&cond->lock);
+
+ LeaveCriticalSection(lock);
+
+ startTime = GetTickCount();
+ do {
+ DWORD res;
+ res = WaitForSingleObject(cond->event, ms);
+ EnterCriticalSection(&cond->lock);
+ if (cond->n_to_wake &&
+ cond->generation != generation_at_start) {
+ --cond->n_to_wake;
+ --cond->n_waiting;
+ result = 0;
+ waiting = 0;
+ goto out;
+ } else if (res != WAIT_OBJECT_0) {
+ result = (res==WAIT_TIMEOUT) ? 1 : -1;
+ --cond->n_waiting;
+ waiting = 0;
+ goto out;
+ } else if (ms != INFINITE) {
+ endTime = GetTickCount();
+ if (startTime + ms_orig <= endTime) {
+ result = 1; /* Timeout */
+ --cond->n_waiting;
+ waiting = 0;
+ goto out;
+ } else {
+ ms = startTime + ms_orig - endTime;
+ }
+ }
+ /* If we make it here, we are still waiting. */
+ if (cond->n_to_wake == 0) {
+ /* There is nobody else who should wake up; reset
+ * the event. */
+ ResetEvent(cond->event);
+ }
+ out:
+ LeaveCriticalSection(&cond->lock);
+ } while (waiting);
+
+ EnterCriticalSection(lock);
+
+ EnterCriticalSection(&cond->lock);
+ if (!cond->n_waiting)
+ ResetEvent(cond->event);
+ LeaveCriticalSection(&cond->lock);
+
+ return result;
+}
+
+int
+evthread_use_windows_threads(void)
+{
+ struct evthread_lock_callbacks cbs = {
+ EVTHREAD_LOCK_API_VERSION,
+ EVTHREAD_LOCKTYPE_RECURSIVE,
+ evthread_win32_lock_create,
+ evthread_win32_lock_free,
+ evthread_win32_lock,
+ evthread_win32_unlock
+ };
+
+
+ struct evthread_condition_callbacks cond_cbs = {
+ EVTHREAD_CONDITION_API_VERSION,
+ evthread_win32_cond_alloc,
+ evthread_win32_cond_free,
+ evthread_win32_cond_signal,
+ evthread_win32_cond_wait
+ };
+#ifdef WIN32_HAVE_CONDITION_VARIABLES
+ struct evthread_condition_callbacks condvar_cbs = {
+ EVTHREAD_CONDITION_API_VERSION,
+ evthread_win32_condvar_alloc,
+ evthread_win32_condvar_free,
+ evthread_win32_condvar_signal,
+ evthread_win32_condvar_wait
+ };
+#endif
+
+ evthread_set_lock_callbacks(&cbs);
+ evthread_set_id_callback(evthread_win32_get_id);
+#ifdef WIN32_HAVE_CONDITION_VARIABLES
+ if (evthread_win32_condvar_init()) {
+ evthread_set_condition_callbacks(&condvar_cbs);
+ return 0;
+ }
+#endif
+ evthread_set_condition_callbacks(&cond_cbs);
+
+ return 0;
+}
+
diff --git a/libs/libevent/src/evutil.c b/libs/libevent/src/evutil.c
new file mode 100644
index 0000000000..495bfcc029
--- /dev/null
+++ b/libs/libevent/src/evutil.c
@@ -0,0 +1,2667 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#include <io.h>
+#include <tchar.h>
+#include <process.h>
+#undef _WIN32_WINNT
+/* For structs needed by GetAdaptersAddresses */
+#define _WIN32_WINNT 0x0501
+#include <iphlpapi.h>
+#endif
+
+#include <sys/types.h>
+#ifdef EVENT__HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#ifdef EVENT__HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+#ifdef EVENT__HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+#include <errno.h>
+#include <limits.h>
+#include <stdio.h>
+#include <string.h>
+#ifdef EVENT__HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#endif
+#ifdef EVENT__HAVE_NETINET_IN6_H
+#include <netinet/in6.h>
+#endif
+#ifdef EVENT__HAVE_NETINET_TCP_H
+#include <netinet/tcp.h>
+#endif
+#ifdef EVENT__HAVE_ARPA_INET_H
+#include <arpa/inet.h>
+#endif
+#include <time.h>
+#include <sys/stat.h>
+#ifdef EVENT__HAVE_IFADDRS_H
+#include <ifaddrs.h>
+#endif
+
+#include "event2/util.h"
+#include "util-internal.h"
+#include "log-internal.h"
+#include "mm-internal.h"
+#include "evthread-internal.h"
+
+#include "strlcpy-internal.h"
+#include "ipv6-internal.h"
+
+#ifdef _WIN32
+#define HT_NO_CACHE_HASH_VALUES
+#include "ht-internal.h"
+#define open _open
+#define read _read
+#define close _close
+#ifndef fstat
+#define fstat _fstati64
+#endif
+#ifndef stat
+#define stat _stati64
+#endif
+#define mode_t int
+#endif
+
+int
+evutil_open_closeonexec_(const char *pathname, int flags, unsigned mode)
+{
+ int fd;
+
+#ifdef O_CLOEXEC
+ fd = open(pathname, flags|O_CLOEXEC, (mode_t)mode);
+ if (fd >= 0 || errno == EINVAL)
+ return fd;
+ /* If we got an EINVAL, fall through and try without O_CLOEXEC */
+#endif
+ fd = open(pathname, flags, (mode_t)mode);
+ if (fd < 0)
+ return -1;
+
+#if defined(FD_CLOEXEC)
+ if (fcntl(fd, F_SETFD, FD_CLOEXEC) < 0) {
+ close(fd);
+ return -1;
+ }
+#endif
+
+ return fd;
+}
+
+/**
+ Read the contents of 'filename' into a newly allocated NUL-terminated
+ string. Set *content_out to hold this string, and *len_out to hold its
+ length (not including the appended NUL). If 'is_binary', open the file in
+ binary mode.
+
+ Returns 0 on success, -1 if the open fails, and -2 for all other failures.
+
+ Used internally only; may go away in a future version.
+ */
+int
+evutil_read_file_(const char *filename, char **content_out, size_t *len_out,
+ int is_binary)
+{
+ int fd, r;
+ struct stat st;
+ char *mem;
+ size_t read_so_far=0;
+ int mode = O_RDONLY;
+
+ EVUTIL_ASSERT(content_out);
+ EVUTIL_ASSERT(len_out);
+ *content_out = NULL;
+ *len_out = 0;
+
+#ifdef O_BINARY
+ if (is_binary)
+ mode |= O_BINARY;
+#endif
+
+ fd = evutil_open_closeonexec_(filename, mode, 0);
+ if (fd < 0)
+ return -1;
+ if (fstat(fd, &st) || st.st_size < 0 ||
+ st.st_size > EV_SSIZE_MAX-1 ) {
+ close(fd);
+ return -2;
+ }
+ mem = mm_malloc((size_t)st.st_size + 1);
+ if (!mem) {
+ close(fd);
+ return -2;
+ }
+ read_so_far = 0;
+#ifdef _WIN32
+#define N_TO_READ(x) ((x) > INT_MAX) ? INT_MAX : ((int)(x))
+#else
+#define N_TO_READ(x) (x)
+#endif
+ while ((r = read(fd, mem+read_so_far, N_TO_READ(st.st_size - read_so_far))) > 0) {
+ read_so_far += r;
+ if (read_so_far >= (size_t)st.st_size)
+ break;
+ EVUTIL_ASSERT(read_so_far < (size_t)st.st_size);
+ }
+ close(fd);
+ if (r < 0) {
+ mm_free(mem);
+ return -2;
+ }
+ mem[read_so_far] = 0;
+
+ *len_out = read_so_far;
+ *content_out = mem;
+ return 0;
+}
+
+int
+evutil_socketpair(int family, int type, int protocol, evutil_socket_t fd[2])
+{
+#ifndef _WIN32
+ return socketpair(family, type, protocol, fd);
+#else
+ return evutil_ersatz_socketpair_(family, type, protocol, fd);
+#endif
+}
+
+int
+evutil_ersatz_socketpair_(int family, int type, int protocol,
+ evutil_socket_t fd[2])
+{
+ /* This code is originally from Tor. Used with permission. */
+
+ /* This socketpair does not work when localhost is down. So
+ * it's really not the same thing at all. But it's close enough
+ * for now, and really, when localhost is down sometimes, we
+ * have other problems too.
+ */
+#ifdef _WIN32
+#define ERR(e) WSA##e
+#else
+#define ERR(e) e
+#endif
+ evutil_socket_t listener = -1;
+ evutil_socket_t connector = -1;
+ evutil_socket_t acceptor = -1;
+ struct sockaddr_in listen_addr;
+ struct sockaddr_in connect_addr;
+ ev_socklen_t size;
+ int saved_errno = -1;
+ int family_test;
+
+ family_test = family != AF_INET;
+#ifdef AF_UNIX
+ family_test = family_test && (family != AF_UNIX);
+#endif
+ if (protocol || family_test) {
+ EVUTIL_SET_SOCKET_ERROR(ERR(EAFNOSUPPORT));
+ return -1;
+ }
+
+ if (!fd) {
+ EVUTIL_SET_SOCKET_ERROR(ERR(EINVAL));
+ return -1;
+ }
+
+ listener = socket(AF_INET, type, 0);
+ if (listener < 0)
+ return -1;
+ memset(&listen_addr, 0, sizeof(listen_addr));
+ listen_addr.sin_family = AF_INET;
+ listen_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ listen_addr.sin_port = 0; /* kernel chooses port. */
+ if (bind(listener, (struct sockaddr *) &listen_addr, sizeof (listen_addr))
+ == -1)
+ goto tidy_up_and_fail;
+ if (listen(listener, 1) == -1)
+ goto tidy_up_and_fail;
+
+ connector = socket(AF_INET, type, 0);
+ if (connector < 0)
+ goto tidy_up_and_fail;
+
+ memset(&connect_addr, 0, sizeof(connect_addr));
+
+ /* We want to find out the port number to connect to. */
+ size = sizeof(connect_addr);
+ if (getsockname(listener, (struct sockaddr *) &connect_addr, &size) == -1)
+ goto tidy_up_and_fail;
+ if (size != sizeof (connect_addr))
+ goto abort_tidy_up_and_fail;
+ if (connect(connector, (struct sockaddr *) &connect_addr,
+ sizeof(connect_addr)) == -1)
+ goto tidy_up_and_fail;
+
+ size = sizeof(listen_addr);
+ acceptor = accept(listener, (struct sockaddr *) &listen_addr, &size);
+ if (acceptor < 0)
+ goto tidy_up_and_fail;
+ if (size != sizeof(listen_addr))
+ goto abort_tidy_up_and_fail;
+ /* Now check we are talking to ourself by matching port and host on the
+ two sockets. */
+ if (getsockname(connector, (struct sockaddr *) &connect_addr, &size) == -1)
+ goto tidy_up_and_fail;
+ if (size != sizeof (connect_addr)
+ || listen_addr.sin_family != connect_addr.sin_family
+ || listen_addr.sin_addr.s_addr != connect_addr.sin_addr.s_addr
+ || listen_addr.sin_port != connect_addr.sin_port)
+ goto abort_tidy_up_and_fail;
+ evutil_closesocket(listener);
+ fd[0] = connector;
+ fd[1] = acceptor;
+
+ return 0;
+
+ abort_tidy_up_and_fail:
+ saved_errno = ERR(ECONNABORTED);
+ tidy_up_and_fail:
+ if (saved_errno < 0)
+ saved_errno = EVUTIL_SOCKET_ERROR();
+ if (listener != -1)
+ evutil_closesocket(listener);
+ if (connector != -1)
+ evutil_closesocket(connector);
+ if (acceptor != -1)
+ evutil_closesocket(acceptor);
+
+ EVUTIL_SET_SOCKET_ERROR(saved_errno);
+ return -1;
+#undef ERR
+}
+
+int
+evutil_make_socket_nonblocking(evutil_socket_t fd)
+{
+#ifdef _WIN32
+ {
+ unsigned long nonblocking = 1;
+ if (ioctlsocket(fd, FIONBIO, &nonblocking) == SOCKET_ERROR) {
+ event_sock_warn(fd, "fcntl(%d, F_GETFL)", (int)fd);
+ return -1;
+ }
+ }
+#else
+ {
+ int flags;
+ if ((flags = fcntl(fd, F_GETFL, NULL)) < 0) {
+ event_warn("fcntl(%d, F_GETFL)", fd);
+ return -1;
+ }
+ if (!(flags & O_NONBLOCK)) {
+ if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) == -1) {
+ event_warn("fcntl(%d, F_SETFL)", fd);
+ return -1;
+ }
+ }
+ }
+#endif
+ return 0;
+}
+
+/* Faster version of evutil_make_socket_nonblocking for internal use.
+ *
+ * Requires that no F_SETFL flags were previously set on the fd.
+ */
+static int
+evutil_fast_socket_nonblocking(evutil_socket_t fd)
+{
+#ifdef _WIN32
+ return evutil_make_socket_nonblocking(fd);
+#else
+ if (fcntl(fd, F_SETFL, O_NONBLOCK) == -1) {
+ event_warn("fcntl(%d, F_SETFL)", fd);
+ return -1;
+ }
+ return 0;
+#endif
+}
+
+int
+evutil_make_listen_socket_reuseable(evutil_socket_t sock)
+{
+#if defined(SO_REUSEADDR) && !defined(_WIN32)
+ int one = 1;
+ /* REUSEADDR on Unix means, "don't hang on to this address after the
+ * listener is closed." On Windows, though, it means "don't keep other
+ * processes from binding to this address while we're using it. */
+ return setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (void*) &one,
+ (ev_socklen_t)sizeof(one));
+#else
+ return 0;
+#endif
+}
+
+int
+evutil_make_listen_socket_reuseable_port(evutil_socket_t sock)
+{
+#if defined __linux__ && defined(SO_REUSEPORT)
+ int one = 1;
+ /* REUSEPORT on Linux 3.9+ means, "Multiple servers (processes or
+ * threads) can bind to the same port if they each set the option. */
+ return setsockopt(sock, SOL_SOCKET, SO_REUSEPORT, (void*) &one,
+ (ev_socklen_t)sizeof(one));
+#else
+ return 0;
+#endif
+}
+
+int
+evutil_make_tcp_listen_socket_deferred(evutil_socket_t sock)
+{
+#if defined(EVENT__HAVE_NETINET_TCP_H) && defined(TCP_DEFER_ACCEPT)
+ int one = 1;
+
+ /* TCP_DEFER_ACCEPT tells the kernel to call defer accept() only after data
+ * has arrived and ready to read */
+ return setsockopt(sock, IPPROTO_TCP, TCP_DEFER_ACCEPT, &one,
+ (ev_socklen_t)sizeof(one));
+#endif
+ return 0;
+}
+
+int
+evutil_make_socket_closeonexec(evutil_socket_t fd)
+{
+#if !defined(_WIN32) && defined(EVENT__HAVE_SETFD)
+ int flags;
+ if ((flags = fcntl(fd, F_GETFD, NULL)) < 0) {
+ event_warn("fcntl(%d, F_GETFD)", fd);
+ return -1;
+ }
+ if (!(flags & FD_CLOEXEC)) {
+ if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) == -1) {
+ event_warn("fcntl(%d, F_SETFD)", fd);
+ return -1;
+ }
+ }
+#endif
+ return 0;
+}
+
+/* Faster version of evutil_make_socket_closeonexec for internal use.
+ *
+ * Requires that no F_SETFD flags were previously set on the fd.
+ */
+static int
+evutil_fast_socket_closeonexec(evutil_socket_t fd)
+{
+#if !defined(_WIN32) && defined(EVENT__HAVE_SETFD)
+ if (fcntl(fd, F_SETFD, FD_CLOEXEC) == -1) {
+ event_warn("fcntl(%d, F_SETFD)", fd);
+ return -1;
+ }
+#endif
+ return 0;
+}
+
+int
+evutil_closesocket(evutil_socket_t sock)
+{
+#ifndef _WIN32
+ return close(sock);
+#else
+ return closesocket(sock);
+#endif
+}
+
+ev_int64_t
+evutil_strtoll(const char *s, char **endptr, int base)
+{
+#ifdef EVENT__HAVE_STRTOLL
+ return (ev_int64_t)strtoll(s, endptr, base);
+#elif EVENT__SIZEOF_LONG == 8
+ return (ev_int64_t)strtol(s, endptr, base);
+#elif defined(_WIN32) && defined(_MSC_VER) && _MSC_VER < 1300
+ /* XXXX on old versions of MS APIs, we only support base
+ * 10. */
+ ev_int64_t r;
+ if (base != 10)
+ return 0;
+ r = (ev_int64_t) _atoi64(s);
+ while (isspace(*s))
+ ++s;
+ if (*s == '-')
+ ++s;
+ while (isdigit(*s))
+ ++s;
+ if (endptr)
+ *endptr = (char*) s;
+ return r;
+#elif defined(_WIN32)
+ return (ev_int64_t) _strtoi64(s, endptr, base);
+#elif defined(EVENT__SIZEOF_LONG_LONG) && EVENT__SIZEOF_LONG_LONG == 8
+ long long r;
+ int n;
+ if (base != 10 && base != 16)
+ return 0;
+ if (base == 10) {
+ n = sscanf(s, "%lld", &r);
+ } else {
+ unsigned long long ru=0;
+ n = sscanf(s, "%llx", &ru);
+ if (ru > EV_INT64_MAX)
+ return 0;
+ r = (long long) ru;
+ }
+ if (n != 1)
+ return 0;
+ while (EVUTIL_ISSPACE_(*s))
+ ++s;
+ if (*s == '-')
+ ++s;
+ if (base == 10) {
+ while (EVUTIL_ISDIGIT_(*s))
+ ++s;
+ } else {
+ while (EVUTIL_ISXDIGIT_(*s))
+ ++s;
+ }
+ if (endptr)
+ *endptr = (char*) s;
+ return r;
+#else
+#error "I don't know how to parse 64-bit integers."
+#endif
+}
+
+#ifdef _WIN32
+int
+evutil_socket_geterror(evutil_socket_t sock)
+{
+ int optval, optvallen=sizeof(optval);
+ int err = WSAGetLastError();
+ if (err == WSAEWOULDBLOCK && sock >= 0) {
+ if (getsockopt(sock, SOL_SOCKET, SO_ERROR, (void*)&optval,
+ &optvallen))
+ return err;
+ if (optval)
+ return optval;
+ }
+ return err;
+}
+#endif
+
+/* XXX we should use an enum here. */
+/* 2 for connection refused, 1 for connected, 0 for not yet, -1 for error. */
+int
+evutil_socket_connect_(evutil_socket_t *fd_ptr, const struct sockaddr *sa, int socklen)
+{
+ int made_fd = 0;
+
+ if (*fd_ptr < 0) {
+ if ((*fd_ptr = socket(sa->sa_family, SOCK_STREAM, 0)) < 0)
+ goto err;
+ made_fd = 1;
+ if (evutil_make_socket_nonblocking(*fd_ptr) < 0) {
+ goto err;
+ }
+ }
+
+ if (connect(*fd_ptr, sa, socklen) < 0) {
+ int e = evutil_socket_geterror(*fd_ptr);
+ if (EVUTIL_ERR_CONNECT_RETRIABLE(e))
+ return 0;
+ if (EVUTIL_ERR_CONNECT_REFUSED(e))
+ return 2;
+ goto err;
+ } else {
+ return 1;
+ }
+
+err:
+ if (made_fd) {
+ evutil_closesocket(*fd_ptr);
+ *fd_ptr = -1;
+ }
+ return -1;
+}
+
+/* Check whether a socket on which we called connect() is done
+ connecting. Return 1 for connected, 0 for not yet, -1 for error. In the
+ error case, set the current socket errno to the error that happened during
+ the connect operation. */
+int
+evutil_socket_finished_connecting_(evutil_socket_t fd)
+{
+ int e;
+ ev_socklen_t elen = sizeof(e);
+
+ if (getsockopt(fd, SOL_SOCKET, SO_ERROR, (void*)&e, &elen) < 0)
+ return -1;
+
+ if (e) {
+ if (EVUTIL_ERR_CONNECT_RETRIABLE(e))
+ return 0;
+ EVUTIL_SET_SOCKET_ERROR(e);
+ return -1;
+ }
+
+ return 1;
+}
+
+#if (EVUTIL_AI_PASSIVE|EVUTIL_AI_CANONNAME|EVUTIL_AI_NUMERICHOST| \
+ EVUTIL_AI_NUMERICSERV|EVUTIL_AI_V4MAPPED|EVUTIL_AI_ALL| \
+ EVUTIL_AI_ADDRCONFIG) != \
+ (EVUTIL_AI_PASSIVE^EVUTIL_AI_CANONNAME^EVUTIL_AI_NUMERICHOST^ \
+ EVUTIL_AI_NUMERICSERV^EVUTIL_AI_V4MAPPED^EVUTIL_AI_ALL^ \
+ EVUTIL_AI_ADDRCONFIG)
+#error "Some of our EVUTIL_AI_* flags seem to overlap with system AI_* flags"
+#endif
+
+/* We sometimes need to know whether we have an ipv4 address and whether we
+ have an ipv6 address. If 'have_checked_interfaces', then we've already done
+ the test. If 'had_ipv4_address', then it turns out we had an ipv4 address.
+ If 'had_ipv6_address', then it turns out we had an ipv6 address. These are
+ set by evutil_check_interfaces. */
+static int have_checked_interfaces, had_ipv4_address, had_ipv6_address;
+
+/* Macro: True iff the IPv4 address 'addr', in host order, is in 127.0.0.0/8
+ */
+#define EVUTIL_V4ADDR_IS_LOCALHOST(addr) (((addr)>>24) == 127)
+
+/* Macro: True iff the IPv4 address 'addr', in host order, is a class D
+ * (multiclass) address.
+ */
+#define EVUTIL_V4ADDR_IS_CLASSD(addr) ((((addr)>>24) & 0xf0) == 0xe0)
+
+static void
+evutil_found_ifaddr(const struct sockaddr *sa)
+{
+ const char ZEROES[] = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00";
+
+ if (sa->sa_family == AF_INET) {
+ const struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+ ev_uint32_t addr = ntohl(sin->sin_addr.s_addr);
+ if (addr == 0 ||
+ EVUTIL_V4ADDR_IS_LOCALHOST(addr) ||
+ EVUTIL_V4ADDR_IS_CLASSD(addr)) {
+ /* Not actually a usable external address. */
+ } else {
+ event_debug(("Detected an IPv4 interface"));
+ had_ipv4_address = 1;
+ }
+ } else if (sa->sa_family == AF_INET6) {
+ const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
+ const unsigned char *addr =
+ (unsigned char*)sin6->sin6_addr.s6_addr;
+ if (!memcmp(addr, ZEROES, 8) ||
+ ((addr[0] & 0xfe) == 0xfc) ||
+ (addr[0] == 0xfe && (addr[1] & 0xc0) == 0x80) ||
+ (addr[0] == 0xfe && (addr[1] & 0xc0) == 0xc0) ||
+ (addr[0] == 0xff)) {
+ /* This is a reserved, ipv4compat, ipv4map, loopback,
+ * link-local, multicast, or unspecified address. */
+ } else {
+ event_debug(("Detected an IPv6 interface"));
+ had_ipv6_address = 1;
+ }
+ }
+}
+
+#ifdef _WIN32
+typedef ULONG (WINAPI *GetAdaptersAddresses_fn_t)(
+ ULONG, ULONG, PVOID, PIP_ADAPTER_ADDRESSES, PULONG);
+#endif
+
+static int
+evutil_check_ifaddrs(void)
+{
+#if defined(EVENT__HAVE_GETIFADDRS)
+ /* Most free Unixy systems provide getifaddrs, which gives us a linked list
+ * of struct ifaddrs. */
+ struct ifaddrs *ifa = NULL;
+ const struct ifaddrs *i;
+ if (getifaddrs(&ifa) < 0) {
+ event_warn("Unable to call getifaddrs()");
+ return -1;
+ }
+
+ for (i = ifa; i; i = i->ifa_next) {
+ if (!i->ifa_addr)
+ continue;
+ evutil_found_ifaddr(i->ifa_addr);
+ }
+
+ freeifaddrs(ifa);
+ return 0;
+#elif defined(_WIN32)
+ /* Windows XP began to provide GetAdaptersAddresses. Windows 2000 had a
+ "GetAdaptersInfo", but that's deprecated; let's just try
+ GetAdaptersAddresses and fall back to connect+getsockname.
+ */
+ HMODULE lib = evutil_load_windows_system_library_(TEXT("ihplapi.dll"));
+ GetAdaptersAddresses_fn_t fn;
+ ULONG size, res;
+ IP_ADAPTER_ADDRESSES *addresses = NULL, *address;
+ int result = -1;
+
+#define FLAGS (GAA_FLAG_SKIP_ANYCAST | \
+ GAA_FLAG_SKIP_MULTICAST | \
+ GAA_FLAG_SKIP_DNS_SERVER)
+
+ if (!lib)
+ goto done;
+
+ if (!(fn = (GetAdaptersAddresses_fn_t) GetProcAddress(lib, "GetAdaptersAddresses")))
+ goto done;
+
+ /* Guess how much space we need. */
+ size = 15*1024;
+ addresses = mm_malloc(size);
+ if (!addresses)
+ goto done;
+ res = fn(AF_UNSPEC, FLAGS, NULL, addresses, &size);
+ if (res == ERROR_BUFFER_OVERFLOW) {
+ /* we didn't guess that we needed enough space; try again */
+ mm_free(addresses);
+ addresses = mm_malloc(size);
+ if (!addresses)
+ goto done;
+ res = fn(AF_UNSPEC, FLAGS, NULL, addresses, &size);
+ }
+ if (res != NO_ERROR)
+ goto done;
+
+ for (address = addresses; address; address = address->Next) {
+ IP_ADAPTER_UNICAST_ADDRESS *a;
+ for (a = address->FirstUnicastAddress; a; a = a->Next) {
+ /* Yes, it's a linked list inside a linked list */
+ struct sockaddr *sa = a->Address.lpSockaddr;
+ evutil_found_ifaddr(sa);
+ }
+ }
+
+ result = 0;
+done:
+ if (lib)
+ FreeLibrary(lib);
+ if (addresses)
+ mm_free(addresses);
+ return result;
+#else
+ return -1;
+#endif
+}
+
+/* Test whether we have an ipv4 interface and an ipv6 interface. Return 0 if
+ * the test seemed successful. */
+static int
+evutil_check_interfaces(int force_recheck)
+{
+ evutil_socket_t fd = -1;
+ struct sockaddr_in sin, sin_out;
+ struct sockaddr_in6 sin6, sin6_out;
+ ev_socklen_t sin_out_len = sizeof(sin_out);
+ ev_socklen_t sin6_out_len = sizeof(sin6_out);
+ int r;
+ if (have_checked_interfaces && !force_recheck)
+ return 0;
+
+ if (evutil_check_ifaddrs() == 0) {
+ /* Use a nice sane interface, if this system has one. */
+ return 0;
+ }
+
+ /* Ugh. There was no nice sane interface. So to check whether we have
+ * an interface open for a given protocol, will try to make a UDP
+ * 'connection' to a remote host on the internet. We don't actually
+ * use it, so the address doesn't matter, but we want to pick one that
+ * keep us from using a host- or link-local interface. */
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_port = htons(53);
+ r = evutil_inet_pton(AF_INET, "18.244.0.188", &sin.sin_addr);
+ EVUTIL_ASSERT(r);
+
+ memset(&sin6, 0, sizeof(sin6));
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_port = htons(53);
+ r = evutil_inet_pton(AF_INET6, "2001:4860:b002::68", &sin6.sin6_addr);
+ EVUTIL_ASSERT(r);
+
+ memset(&sin_out, 0, sizeof(sin_out));
+ memset(&sin6_out, 0, sizeof(sin6_out));
+
+ /* XXX some errnos mean 'no address'; some mean 'not enough sockets'. */
+ if ((fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) >= 0 &&
+ connect(fd, (struct sockaddr*)&sin, sizeof(sin)) == 0 &&
+ getsockname(fd, (struct sockaddr*)&sin_out, &sin_out_len) == 0) {
+ /* We might have an IPv4 interface. */
+ evutil_found_ifaddr((struct sockaddr*) &sin_out);
+ }
+ if (fd >= 0)
+ evutil_closesocket(fd);
+
+ if ((fd = socket(AF_INET6, SOCK_DGRAM, IPPROTO_UDP)) >= 0 &&
+ connect(fd, (struct sockaddr*)&sin6, sizeof(sin6)) == 0 &&
+ getsockname(fd, (struct sockaddr*)&sin6_out, &sin6_out_len) == 0) {
+ /* We might have an IPv6 interface. */
+ evutil_found_ifaddr((struct sockaddr*) &sin6_out);
+ }
+
+ if (fd >= 0)
+ evutil_closesocket(fd);
+
+ return 0;
+}
+
+/* Internal addrinfo flag. This one is set when we allocate the addrinfo from
+ * inside libevent. Otherwise, the built-in getaddrinfo() function allocated
+ * it, and we should trust what they said.
+ **/
+#define EVUTIL_AI_LIBEVENT_ALLOCATED 0x80000000
+
+/* Helper: construct a new addrinfo containing the socket address in
+ * 'sa', which must be a sockaddr_in or a sockaddr_in6. Take the
+ * socktype and protocol info from hints. If they weren't set, then
+ * allocate both a TCP and a UDP addrinfo.
+ */
+struct evutil_addrinfo *
+evutil_new_addrinfo_(struct sockaddr *sa, ev_socklen_t socklen,
+ const struct evutil_addrinfo *hints)
+{
+ struct evutil_addrinfo *res;
+ EVUTIL_ASSERT(hints);
+
+ if (hints->ai_socktype == 0 && hints->ai_protocol == 0) {
+ /* Indecisive user! Give them a UDP and a TCP. */
+ struct evutil_addrinfo *r1, *r2;
+ struct evutil_addrinfo tmp;
+ memcpy(&tmp, hints, sizeof(tmp));
+ tmp.ai_socktype = SOCK_STREAM; tmp.ai_protocol = IPPROTO_TCP;
+ r1 = evutil_new_addrinfo_(sa, socklen, &tmp);
+ if (!r1)
+ return NULL;
+ tmp.ai_socktype = SOCK_DGRAM; tmp.ai_protocol = IPPROTO_UDP;
+ r2 = evutil_new_addrinfo_(sa, socklen, &tmp);
+ if (!r2) {
+ evutil_freeaddrinfo(r1);
+ return NULL;
+ }
+ r1->ai_next = r2;
+ return r1;
+ }
+
+ /* We're going to allocate extra space to hold the sockaddr. */
+ res = mm_calloc(1,sizeof(struct evutil_addrinfo)+socklen);
+ if (!res)
+ return NULL;
+ res->ai_addr = (struct sockaddr*)
+ (((char*)res) + sizeof(struct evutil_addrinfo));
+ memcpy(res->ai_addr, sa, socklen);
+ res->ai_addrlen = socklen;
+ res->ai_family = sa->sa_family; /* Same or not? XXX */
+ res->ai_flags = EVUTIL_AI_LIBEVENT_ALLOCATED;
+ res->ai_socktype = hints->ai_socktype;
+ res->ai_protocol = hints->ai_protocol;
+
+ return res;
+}
+
+/* Append the addrinfo 'append' to the end of 'first', and return the start of
+ * the list. Either element can be NULL, in which case we return the element
+ * that is not NULL. */
+struct evutil_addrinfo *
+evutil_addrinfo_append_(struct evutil_addrinfo *first,
+ struct evutil_addrinfo *append)
+{
+ struct evutil_addrinfo *ai = first;
+ if (!ai)
+ return append;
+ while (ai->ai_next)
+ ai = ai->ai_next;
+ ai->ai_next = append;
+
+ return first;
+}
+
+static int
+parse_numeric_servname(const char *servname)
+{
+ int n;
+ char *endptr=NULL;
+ n = (int) strtol(servname, &endptr, 10);
+ if (n>=0 && n <= 65535 && servname[0] && endptr && !endptr[0])
+ return n;
+ else
+ return -1;
+}
+
+/** Parse a service name in 'servname', which can be a decimal port.
+ * Return the port number, or -1 on error.
+ */
+static int
+evutil_parse_servname(const char *servname, const char *protocol,
+ const struct evutil_addrinfo *hints)
+{
+ int n = parse_numeric_servname(servname);
+ if (n>=0)
+ return n;
+#if defined(EVENT__HAVE_GETSERVBYNAME) || defined(_WIN32)
+ if (!(hints->ai_flags & EVUTIL_AI_NUMERICSERV)) {
+ struct servent *ent = getservbyname(servname, protocol);
+ if (ent) {
+ return ntohs(ent->s_port);
+ }
+ }
+#endif
+ return -1;
+}
+
+/* Return a string corresponding to a protocol number that we can pass to
+ * getservyname. */
+static const char *
+evutil_unparse_protoname(int proto)
+{
+ switch (proto) {
+ case 0:
+ return NULL;
+ case IPPROTO_TCP:
+ return "tcp";
+ case IPPROTO_UDP:
+ return "udp";
+#ifdef IPPROTO_SCTP
+ case IPPROTO_SCTP:
+ return "sctp";
+#endif
+ default:
+#ifdef EVENT__HAVE_GETPROTOBYNUMBER
+ {
+ struct protoent *ent = getprotobynumber(proto);
+ if (ent)
+ return ent->p_name;
+ }
+#endif
+ return NULL;
+ }
+}
+
+static void
+evutil_getaddrinfo_infer_protocols(struct evutil_addrinfo *hints)
+{
+ /* If we can guess the protocol from the socktype, do so. */
+ if (!hints->ai_protocol && hints->ai_socktype) {
+ if (hints->ai_socktype == SOCK_DGRAM)
+ hints->ai_protocol = IPPROTO_UDP;
+ else if (hints->ai_socktype == SOCK_STREAM)
+ hints->ai_protocol = IPPROTO_TCP;
+ }
+
+ /* Set the socktype if it isn't set. */
+ if (!hints->ai_socktype && hints->ai_protocol) {
+ if (hints->ai_protocol == IPPROTO_UDP)
+ hints->ai_socktype = SOCK_DGRAM;
+ else if (hints->ai_protocol == IPPROTO_TCP)
+ hints->ai_socktype = SOCK_STREAM;
+#ifdef IPPROTO_SCTP
+ else if (hints->ai_protocol == IPPROTO_SCTP)
+ hints->ai_socktype = SOCK_STREAM;
+#endif
+ }
+}
+
+#if AF_UNSPEC != PF_UNSPEC
+#error "I cannot build on a system where AF_UNSPEC != PF_UNSPEC"
+#endif
+
+/** Implements the part of looking up hosts by name that's common to both
+ * the blocking and nonblocking resolver:
+ * - Adjust 'hints' to have a reasonable socktype and protocol.
+ * - Look up the port based on 'servname', and store it in *portnum,
+ * - Handle the nodename==NULL case
+ * - Handle some invalid arguments cases.
+ * - Handle the cases where nodename is an IPv4 or IPv6 address.
+ *
+ * If we need the resolver to look up the hostname, we return
+ * EVUTIL_EAI_NEED_RESOLVE. Otherwise, we can completely implement
+ * getaddrinfo: we return 0 or an appropriate EVUTIL_EAI_* error, and
+ * set *res as getaddrinfo would.
+ */
+int
+evutil_getaddrinfo_common_(const char *nodename, const char *servname,
+ struct evutil_addrinfo *hints, struct evutil_addrinfo **res, int *portnum)
+{
+ int port = 0;
+ const char *pname;
+
+ if (nodename == NULL && servname == NULL)
+ return EVUTIL_EAI_NONAME;
+
+ /* We only understand 3 families */
+ if (hints->ai_family != PF_UNSPEC && hints->ai_family != PF_INET &&
+ hints->ai_family != PF_INET6)
+ return EVUTIL_EAI_FAMILY;
+
+ evutil_getaddrinfo_infer_protocols(hints);
+
+ /* Look up the port number and protocol, if possible. */
+ pname = evutil_unparse_protoname(hints->ai_protocol);
+ if (servname) {
+ /* XXXX We could look at the protocol we got back from
+ * getservbyname, but it doesn't seem too useful. */
+ port = evutil_parse_servname(servname, pname, hints);
+ if (port < 0) {
+ return EVUTIL_EAI_NONAME;
+ }
+ }
+
+ /* If we have no node name, then we're supposed to bind to 'any' and
+ * connect to localhost. */
+ if (nodename == NULL) {
+ struct evutil_addrinfo *res4=NULL, *res6=NULL;
+ if (hints->ai_family != PF_INET) { /* INET6 or UNSPEC. */
+ struct sockaddr_in6 sin6;
+ memset(&sin6, 0, sizeof(sin6));
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_port = htons(port);
+ if (hints->ai_flags & EVUTIL_AI_PASSIVE) {
+ /* Bind to :: */
+ } else {
+ /* connect to ::1 */
+ sin6.sin6_addr.s6_addr[15] = 1;
+ }
+ res6 = evutil_new_addrinfo_((struct sockaddr*)&sin6,
+ sizeof(sin6), hints);
+ if (!res6)
+ return EVUTIL_EAI_MEMORY;
+ }
+
+ if (hints->ai_family != PF_INET6) { /* INET or UNSPEC */
+ struct sockaddr_in sin;
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_port = htons(port);
+ if (hints->ai_flags & EVUTIL_AI_PASSIVE) {
+ /* Bind to 0.0.0.0 */
+ } else {
+ /* connect to 127.0.0.1 */
+ sin.sin_addr.s_addr = htonl(0x7f000001);
+ }
+ res4 = evutil_new_addrinfo_((struct sockaddr*)&sin,
+ sizeof(sin), hints);
+ if (!res4) {
+ if (res6)
+ evutil_freeaddrinfo(res6);
+ return EVUTIL_EAI_MEMORY;
+ }
+ }
+ *res = evutil_addrinfo_append_(res4, res6);
+ return 0;
+ }
+
+ /* If we can, we should try to parse the hostname without resolving
+ * it. */
+ /* Try ipv6. */
+ if (hints->ai_family == PF_INET6 || hints->ai_family == PF_UNSPEC) {
+ struct sockaddr_in6 sin6;
+ memset(&sin6, 0, sizeof(sin6));
+ if (1==evutil_inet_pton(AF_INET6, nodename, &sin6.sin6_addr)) {
+ /* Got an ipv6 address. */
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_port = htons(port);
+ *res = evutil_new_addrinfo_((struct sockaddr*)&sin6,
+ sizeof(sin6), hints);
+ if (!*res)
+ return EVUTIL_EAI_MEMORY;
+ return 0;
+ }
+ }
+
+ /* Try ipv4. */
+ if (hints->ai_family == PF_INET || hints->ai_family == PF_UNSPEC) {
+ struct sockaddr_in sin;
+ memset(&sin, 0, sizeof(sin));
+ if (1==evutil_inet_pton(AF_INET, nodename, &sin.sin_addr)) {
+ /* Got an ipv6 address. */
+ sin.sin_family = AF_INET;
+ sin.sin_port = htons(port);
+ *res = evutil_new_addrinfo_((struct sockaddr*)&sin,
+ sizeof(sin), hints);
+ if (!*res)
+ return EVUTIL_EAI_MEMORY;
+ return 0;
+ }
+ }
+
+
+ /* If we have reached this point, we definitely need to do a DNS
+ * lookup. */
+ if ((hints->ai_flags & EVUTIL_AI_NUMERICHOST)) {
+ /* If we're not allowed to do one, then say so. */
+ return EVUTIL_EAI_NONAME;
+ }
+ *portnum = port;
+ return EVUTIL_EAI_NEED_RESOLVE;
+}
+
+#ifdef EVENT__HAVE_GETADDRINFO
+#define USE_NATIVE_GETADDRINFO
+#endif
+
+#ifdef USE_NATIVE_GETADDRINFO
+/* A mask of all the flags that we declare, so we can clear them before calling
+ * the native getaddrinfo */
+static const unsigned int ALL_NONNATIVE_AI_FLAGS =
+#ifndef AI_PASSIVE
+ EVUTIL_AI_PASSIVE |
+#endif
+#ifndef AI_CANONNAME
+ EVUTIL_AI_CANONNAME |
+#endif
+#ifndef AI_NUMERICHOST
+ EVUTIL_AI_NUMERICHOST |
+#endif
+#ifndef AI_NUMERICSERV
+ EVUTIL_AI_NUMERICSERV |
+#endif
+#ifndef AI_ADDRCONFIG
+ EVUTIL_AI_ADDRCONFIG |
+#endif
+#ifndef AI_ALL
+ EVUTIL_AI_ALL |
+#endif
+#ifndef AI_V4MAPPED
+ EVUTIL_AI_V4MAPPED |
+#endif
+ EVUTIL_AI_LIBEVENT_ALLOCATED;
+
+static const unsigned int ALL_NATIVE_AI_FLAGS =
+#ifdef AI_PASSIVE
+ AI_PASSIVE |
+#endif
+#ifdef AI_CANONNAME
+ AI_CANONNAME |
+#endif
+#ifdef AI_NUMERICHOST
+ AI_NUMERICHOST |
+#endif
+#ifdef AI_NUMERICSERV
+ AI_NUMERICSERV |
+#endif
+#ifdef AI_ADDRCONFIG
+ AI_ADDRCONFIG |
+#endif
+#ifdef AI_ALL
+ AI_ALL |
+#endif
+#ifdef AI_V4MAPPED
+ AI_V4MAPPED |
+#endif
+ 0;
+#endif
+
+#ifndef USE_NATIVE_GETADDRINFO
+/* Helper for systems with no getaddrinfo(): make one or more addrinfos out of
+ * a struct hostent.
+ */
+static struct evutil_addrinfo *
+addrinfo_from_hostent(const struct hostent *ent,
+ int port, const struct evutil_addrinfo *hints)
+{
+ int i;
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+ struct sockaddr *sa;
+ int socklen;
+ struct evutil_addrinfo *res=NULL, *ai;
+ void *addrp;
+
+ if (ent->h_addrtype == PF_INET) {
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_port = htons(port);
+ sa = (struct sockaddr *)&sin;
+ socklen = sizeof(struct sockaddr_in);
+ addrp = &sin.sin_addr;
+ if (ent->h_length != sizeof(sin.sin_addr)) {
+ event_warnx("Weird h_length from gethostbyname");
+ return NULL;
+ }
+ } else if (ent->h_addrtype == PF_INET6) {
+ memset(&sin6, 0, sizeof(sin6));
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_port = htons(port);
+ sa = (struct sockaddr *)&sin6;
+ socklen = sizeof(struct sockaddr_in6);
+ addrp = &sin6.sin6_addr;
+ if (ent->h_length != sizeof(sin6.sin6_addr)) {
+ event_warnx("Weird h_length from gethostbyname");
+ return NULL;
+ }
+ } else
+ return NULL;
+
+ for (i = 0; ent->h_addr_list[i]; ++i) {
+ memcpy(addrp, ent->h_addr_list[i], ent->h_length);
+ ai = evutil_new_addrinfo_(sa, socklen, hints);
+ if (!ai) {
+ evutil_freeaddrinfo(res);
+ return NULL;
+ }
+ res = evutil_addrinfo_append_(res, ai);
+ }
+
+ if (res && ((hints->ai_flags & EVUTIL_AI_CANONNAME) && ent->h_name)) {
+ res->ai_canonname = mm_strdup(ent->h_name);
+ if (res->ai_canonname == NULL) {
+ evutil_freeaddrinfo(res);
+ return NULL;
+ }
+ }
+
+ return res;
+}
+#endif
+
+/* If the EVUTIL_AI_ADDRCONFIG flag is set on hints->ai_flags, and
+ * hints->ai_family is PF_UNSPEC, then revise the value of hints->ai_family so
+ * that we'll only get addresses we could maybe connect to.
+ */
+void
+evutil_adjust_hints_for_addrconfig_(struct evutil_addrinfo *hints)
+{
+ if (!(hints->ai_flags & EVUTIL_AI_ADDRCONFIG))
+ return;
+ if (hints->ai_family != PF_UNSPEC)
+ return;
+ if (!have_checked_interfaces)
+ evutil_check_interfaces(0);
+ if (had_ipv4_address && !had_ipv6_address) {
+ hints->ai_family = PF_INET;
+ } else if (!had_ipv4_address && had_ipv6_address) {
+ hints->ai_family = PF_INET6;
+ }
+}
+
+#ifdef USE_NATIVE_GETADDRINFO
+static int need_numeric_port_hack_=0;
+static int need_socktype_protocol_hack_=0;
+static int tested_for_getaddrinfo_hacks=0;
+
+/* Some older BSDs (like OpenBSD up to 4.6) used to believe that
+ giving a numeric port without giving an ai_socktype was verboten.
+ We test for this so we can apply an appropriate workaround. If it
+ turns out that the bug is present, then:
+
+ - If nodename==NULL and servname is numeric, we build an answer
+ ourselves using evutil_getaddrinfo_common_().
+
+ - If nodename!=NULL and servname is numeric, then we set
+ servname=NULL when calling getaddrinfo, and post-process the
+ result to set the ports on it.
+
+ We test for this bug at runtime, since otherwise we can't have the
+ same binary run on multiple BSD versions.
+
+ - Some versions of Solaris believe that it's nice to leave to protocol
+ field set to 0. We test for this so we can apply an appropriate
+ workaround.
+*/
+static void
+test_for_getaddrinfo_hacks(void)
+{
+ int r, r2;
+ struct evutil_addrinfo *ai=NULL, *ai2=NULL;
+ struct evutil_addrinfo hints;
+
+ memset(&hints,0,sizeof(hints));
+ hints.ai_family = PF_UNSPEC;
+ hints.ai_flags =
+#ifdef AI_NUMERICHOST
+ AI_NUMERICHOST |
+#endif
+#ifdef AI_NUMERICSERV
+ AI_NUMERICSERV |
+#endif
+ 0;
+ r = getaddrinfo("1.2.3.4", "80", &hints, &ai);
+ hints.ai_socktype = SOCK_STREAM;
+ r2 = getaddrinfo("1.2.3.4", "80", &hints, &ai2);
+ if (r2 == 0 && r != 0) {
+ need_numeric_port_hack_=1;
+ }
+ if (ai2 && ai2->ai_protocol == 0) {
+ need_socktype_protocol_hack_=1;
+ }
+
+ if (ai)
+ freeaddrinfo(ai);
+ if (ai2)
+ freeaddrinfo(ai2);
+ tested_for_getaddrinfo_hacks=1;
+}
+
+static inline int
+need_numeric_port_hack(void)
+{
+ if (!tested_for_getaddrinfo_hacks)
+ test_for_getaddrinfo_hacks();
+ return need_numeric_port_hack_;
+}
+
+static inline int
+need_socktype_protocol_hack(void)
+{
+ if (!tested_for_getaddrinfo_hacks)
+ test_for_getaddrinfo_hacks();
+ return need_socktype_protocol_hack_;
+}
+
+static void
+apply_numeric_port_hack(int port, struct evutil_addrinfo **ai)
+{
+ /* Now we run through the list and set the ports on all of the
+ * results where ports would make sense. */
+ for ( ; *ai; ai = &(*ai)->ai_next) {
+ struct sockaddr *sa = (*ai)->ai_addr;
+ if (sa && sa->sa_family == AF_INET) {
+ struct sockaddr_in *sin = (struct sockaddr_in*)sa;
+ sin->sin_port = htons(port);
+ } else if (sa && sa->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)sa;
+ sin6->sin6_port = htons(port);
+ } else {
+ /* A numeric port makes no sense here; remove this one
+ * from the list. */
+ struct evutil_addrinfo *victim = *ai;
+ *ai = victim->ai_next;
+ victim->ai_next = NULL;
+ freeaddrinfo(victim);
+ }
+ }
+}
+
+static int
+apply_socktype_protocol_hack(struct evutil_addrinfo *ai)
+{
+ struct evutil_addrinfo *ai_new;
+ for (; ai; ai = ai->ai_next) {
+ evutil_getaddrinfo_infer_protocols(ai);
+ if (ai->ai_socktype || ai->ai_protocol)
+ continue;
+ ai_new = mm_malloc(sizeof(*ai_new));
+ if (!ai_new)
+ return -1;
+ memcpy(ai_new, ai, sizeof(*ai_new));
+ ai->ai_socktype = SOCK_STREAM;
+ ai->ai_protocol = IPPROTO_TCP;
+ ai_new->ai_socktype = SOCK_DGRAM;
+ ai_new->ai_protocol = IPPROTO_UDP;
+
+ ai_new->ai_next = ai->ai_next;
+ ai->ai_next = ai_new;
+ }
+ return 0;
+}
+#endif
+
+int
+evutil_getaddrinfo(const char *nodename, const char *servname,
+ const struct evutil_addrinfo *hints_in, struct evutil_addrinfo **res)
+{
+#ifdef USE_NATIVE_GETADDRINFO
+ struct evutil_addrinfo hints;
+ int portnum=-1, need_np_hack, err;
+
+ if (hints_in) {
+ memcpy(&hints, hints_in, sizeof(hints));
+ } else {
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = PF_UNSPEC;
+ }
+
+#ifndef AI_ADDRCONFIG
+ /* Not every system has AI_ADDRCONFIG, so fake it. */
+ if (hints.ai_family == PF_UNSPEC &&
+ (hints.ai_flags & EVUTIL_AI_ADDRCONFIG)) {
+ evutil_adjust_hints_for_addrconfig_(&hints);
+ }
+#endif
+
+#ifndef AI_NUMERICSERV
+ /* Not every system has AI_NUMERICSERV, so fake it. */
+ if (hints.ai_flags & EVUTIL_AI_NUMERICSERV) {
+ if (servname && parse_numeric_servname(servname)<0)
+ return EVUTIL_EAI_NONAME;
+ }
+#endif
+
+ /* Enough operating systems handle enough common non-resolve
+ * cases here weirdly enough that we are better off just
+ * overriding them. For example:
+ *
+ * - Windows doesn't like to infer the protocol from the
+ * socket type, or fill in socket or protocol types much at
+ * all. It also seems to do its own broken implicit
+ * always-on version of AI_ADDRCONFIG that keeps it from
+ * ever resolving even a literal IPv6 address when
+ * ai_addrtype is PF_UNSPEC.
+ */
+#ifdef _WIN32
+ {
+ int tmp_port;
+ err = evutil_getaddrinfo_common_(nodename,servname,&hints,
+ res, &tmp_port);
+ if (err == 0 ||
+ err == EVUTIL_EAI_MEMORY ||
+ err == EVUTIL_EAI_NONAME)
+ return err;
+ /* If we make it here, the system getaddrinfo can
+ * have a crack at it. */
+ }
+#endif
+
+ /* See documentation for need_numeric_port_hack above.*/
+ need_np_hack = need_numeric_port_hack() && servname && !hints.ai_socktype
+ && ((portnum=parse_numeric_servname(servname)) >= 0);
+ if (need_np_hack) {
+ if (!nodename)
+ return evutil_getaddrinfo_common_(
+ NULL,servname,&hints, res, &portnum);
+ servname = NULL;
+ }
+
+ if (need_socktype_protocol_hack()) {
+ evutil_getaddrinfo_infer_protocols(&hints);
+ }
+
+ /* Make sure that we didn't actually steal any AI_FLAGS values that
+ * the system is using. (This is a constant expression, and should ge
+ * optimized out.)
+ *
+ * XXXX Turn this into a compile-time failure rather than a run-time
+ * failure.
+ */
+ EVUTIL_ASSERT((ALL_NONNATIVE_AI_FLAGS & ALL_NATIVE_AI_FLAGS) == 0);
+
+ /* Clear any flags that only libevent understands. */
+ hints.ai_flags &= ~ALL_NONNATIVE_AI_FLAGS;
+
+ err = getaddrinfo(nodename, servname, &hints, res);
+ if (need_np_hack)
+ apply_numeric_port_hack(portnum, res);
+
+ if (need_socktype_protocol_hack()) {
+ if (apply_socktype_protocol_hack(*res) < 0) {
+ evutil_freeaddrinfo(*res);
+ *res = NULL;
+ return EVUTIL_EAI_MEMORY;
+ }
+ }
+ return err;
+#else
+ int port=0, err;
+ struct hostent *ent = NULL;
+ struct evutil_addrinfo hints;
+
+ if (hints_in) {
+ memcpy(&hints, hints_in, sizeof(hints));
+ } else {
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = PF_UNSPEC;
+ }
+
+ evutil_adjust_hints_for_addrconfig_(&hints);
+
+ err = evutil_getaddrinfo_common_(nodename, servname, &hints, res, &port);
+ if (err != EVUTIL_EAI_NEED_RESOLVE) {
+ /* We either succeeded or failed. No need to continue */
+ return err;
+ }
+
+ err = 0;
+ /* Use any of the various gethostbyname_r variants as available. */
+ {
+#ifdef EVENT__HAVE_GETHOSTBYNAME_R_6_ARG
+ /* This one is what glibc provides. */
+ char buf[2048];
+ struct hostent hostent;
+ int r;
+ r = gethostbyname_r(nodename, &hostent, buf, sizeof(buf), &ent,
+ &err);
+#elif defined(EVENT__HAVE_GETHOSTBYNAME_R_5_ARG)
+ char buf[2048];
+ struct hostent hostent;
+ ent = gethostbyname_r(nodename, &hostent, buf, sizeof(buf),
+ &err);
+#elif defined(EVENT__HAVE_GETHOSTBYNAME_R_3_ARG)
+ struct hostent_data data;
+ struct hostent hostent;
+ memset(&data, 0, sizeof(data));
+ err = gethostbyname_r(nodename, &hostent, &data);
+ ent = err ? NULL : &hostent;
+#else
+ /* fall back to gethostbyname. */
+ /* XXXX This needs a lock everywhere but Windows. */
+ ent = gethostbyname(nodename);
+#ifdef _WIN32
+ err = WSAGetLastError();
+#else
+ err = h_errno;
+#endif
+#endif
+
+ /* Now we have either ent or err set. */
+ if (!ent) {
+ /* XXX is this right for windows ? */
+ switch (err) {
+ case TRY_AGAIN:
+ return EVUTIL_EAI_AGAIN;
+ case NO_RECOVERY:
+ default:
+ return EVUTIL_EAI_FAIL;
+ case HOST_NOT_FOUND:
+ return EVUTIL_EAI_NONAME;
+ case NO_ADDRESS:
+#if NO_DATA != NO_ADDRESS
+ case NO_DATA:
+#endif
+ return EVUTIL_EAI_NODATA;
+ }
+ }
+
+ if (ent->h_addrtype != hints.ai_family &&
+ hints.ai_family != PF_UNSPEC) {
+ /* This wasn't the type we were hoping for. Too bad
+ * we never had a chance to ask gethostbyname for what
+ * we wanted. */
+ return EVUTIL_EAI_NONAME;
+ }
+
+ /* Make sure we got _some_ answers. */
+ if (ent->h_length == 0)
+ return EVUTIL_EAI_NODATA;
+
+ /* If we got an address type we don't know how to make a
+ sockaddr for, give up. */
+ if (ent->h_addrtype != PF_INET && ent->h_addrtype != PF_INET6)
+ return EVUTIL_EAI_FAMILY;
+
+ *res = addrinfo_from_hostent(ent, port, &hints);
+ if (! *res)
+ return EVUTIL_EAI_MEMORY;
+ }
+
+ return 0;
+#endif
+}
+
+void
+evutil_freeaddrinfo(struct evutil_addrinfo *ai)
+{
+#ifdef EVENT__HAVE_GETADDRINFO
+ if (!(ai->ai_flags & EVUTIL_AI_LIBEVENT_ALLOCATED)) {
+ freeaddrinfo(ai);
+ return;
+ }
+#endif
+ while (ai) {
+ struct evutil_addrinfo *next = ai->ai_next;
+ if (ai->ai_canonname)
+ mm_free(ai->ai_canonname);
+ mm_free(ai);
+ ai = next;
+ }
+}
+
+static evdns_getaddrinfo_fn evdns_getaddrinfo_impl = NULL;
+
+void
+evutil_set_evdns_getaddrinfo_fn_(evdns_getaddrinfo_fn fn)
+{
+ if (!evdns_getaddrinfo_impl)
+ evdns_getaddrinfo_impl = fn;
+}
+
+/* Internal helper function: act like evdns_getaddrinfo if dns_base is set;
+ * otherwise do a blocking resolve and pass the result to the callback in the
+ * way that evdns_getaddrinfo would.
+ */
+int
+evutil_getaddrinfo_async_(struct evdns_base *dns_base,
+ const char *nodename, const char *servname,
+ const struct evutil_addrinfo *hints_in,
+ void (*cb)(int, struct evutil_addrinfo *, void *), void *arg)
+{
+ if (dns_base && evdns_getaddrinfo_impl) {
+ evdns_getaddrinfo_impl(
+ dns_base, nodename, servname, hints_in, cb, arg);
+ } else {
+ struct evutil_addrinfo *ai=NULL;
+ int err;
+ err = evutil_getaddrinfo(nodename, servname, hints_in, &ai);
+ cb(err, ai, arg);
+ }
+ return 0;
+}
+
+const char *
+evutil_gai_strerror(int err)
+{
+ /* As a sneaky side-benefit, this case statement will get most
+ * compilers to tell us if any of the error codes we defined
+ * conflict with the platform's native error codes. */
+ switch (err) {
+ case EVUTIL_EAI_CANCEL:
+ return "Request canceled";
+ case 0:
+ return "No error";
+
+ case EVUTIL_EAI_ADDRFAMILY:
+ return "address family for nodename not supported";
+ case EVUTIL_EAI_AGAIN:
+ return "temporary failure in name resolution";
+ case EVUTIL_EAI_BADFLAGS:
+ return "invalid value for ai_flags";
+ case EVUTIL_EAI_FAIL:
+ return "non-recoverable failure in name resolution";
+ case EVUTIL_EAI_FAMILY:
+ return "ai_family not supported";
+ case EVUTIL_EAI_MEMORY:
+ return "memory allocation failure";
+ case EVUTIL_EAI_NODATA:
+ return "no address associated with nodename";
+ case EVUTIL_EAI_NONAME:
+ return "nodename nor servname provided, or not known";
+ case EVUTIL_EAI_SERVICE:
+ return "servname not supported for ai_socktype";
+ case EVUTIL_EAI_SOCKTYPE:
+ return "ai_socktype not supported";
+ case EVUTIL_EAI_SYSTEM:
+ return "system error";
+ default:
+#if defined(USE_NATIVE_GETADDRINFO) && defined(_WIN32)
+ return gai_strerrorA(err);
+#elif defined(USE_NATIVE_GETADDRINFO)
+ return gai_strerror(err);
+#else
+ return "Unknown error code";
+#endif
+ }
+}
+
+#ifdef _WIN32
+/* destructively remove a trailing line terminator from s */
+static void
+chomp (char *s)
+{
+ size_t len;
+ if (s && (len = strlen (s)) > 0 && s[len - 1] == '\n') {
+ s[--len] = 0;
+ if (len > 0 && s[len - 1] == '\r')
+ s[--len] = 0;
+ }
+}
+
+/* FormatMessage returns allocated strings, but evutil_socket_error_to_string
+ * is supposed to return a string which is good indefinitely without having
+ * to be freed. To make this work without leaking memory, we cache the
+ * string the first time FormatMessage is called on a particular error
+ * code, and then return the cached string on subsequent calls with the
+ * same code. The strings aren't freed until libevent_global_shutdown
+ * (or never). We use a linked list to cache the errors, because we
+ * only expect there to be a few dozen, and that should be fast enough.
+ */
+
+struct cached_sock_errs_entry {
+ HT_ENTRY(cached_sock_errs_entry) node;
+ DWORD code;
+ char *msg; /* allocated with LocalAlloc; free with LocalFree */
+};
+
+static inline unsigned
+hash_cached_sock_errs(const struct cached_sock_errs_entry *e)
+{
+ /* Use Murmur3's 32-bit finalizer as an integer hash function */
+ DWORD h = e->code;
+ h ^= h >> 16;
+ h *= 0x85ebca6b;
+ h ^= h >> 13;
+ h *= 0xc2b2ae35;
+ h ^= h >> 16;
+ return h;
+}
+
+static inline int
+eq_cached_sock_errs(const struct cached_sock_errs_entry *a,
+ const struct cached_sock_errs_entry *b)
+{
+ return a->code == b->code;
+}
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+static void *windows_socket_errors_lock_ = NULL;
+#endif
+
+static HT_HEAD(cached_sock_errs_map, cached_sock_errs_entry)
+ windows_socket_errors = HT_INITIALIZER();
+
+HT_PROTOTYPE(cached_sock_errs_map,
+ cached_sock_errs_entry,
+ node,
+ hash_cached_sock_errs,
+ eq_cached_sock_errs);
+
+HT_GENERATE(cached_sock_errs_map,
+ cached_sock_errs_entry,
+ node,
+ hash_cached_sock_errs,
+ eq_cached_sock_errs,
+ 0.5,
+ mm_malloc,
+ mm_realloc,
+ mm_free);
+
+/** Equivalent to strerror, but for windows socket errors. */
+const char *
+evutil_socket_error_to_string(int errcode)
+{
+ struct cached_sock_errs_entry *errs, *newerr, find;
+ char *msg = NULL;
+
+ EVLOCK_LOCK(windows_socket_errors_lock_, 0);
+
+ find.code = errcode;
+ errs = HT_FIND(cached_sock_errs_map, &windows_socket_errors, &find);
+ if (errs) {
+ msg = errs->msg;
+ goto done;
+ }
+
+ if (0 != FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS |
+ FORMAT_MESSAGE_ALLOCATE_BUFFER,
+ NULL, errcode, 0, (char *)&msg, 0, NULL))
+ chomp (msg); /* because message has trailing newline */
+ else {
+ size_t len = 50;
+ /* use LocalAlloc because FormatMessage does */
+ msg = LocalAlloc(LMEM_FIXED, len);
+ if (!msg) {
+ msg = (char *)"LocalAlloc failed during Winsock error";
+ goto done;
+ }
+ evutil_snprintf(msg, len, "winsock error 0x%08x", errcode);
+ }
+
+ newerr = (struct cached_sock_errs_entry *)
+ mm_malloc(sizeof (struct cached_sock_errs_entry));
+
+ if (!newerr) {
+ LocalFree(msg);
+ msg = (char *)"malloc failed during Winsock error";
+ goto done;
+ }
+
+ newerr->code = errcode;
+ newerr->msg = msg;
+ HT_INSERT(cached_sock_errs_map, &windows_socket_errors, newerr);
+
+ done:
+ EVLOCK_UNLOCK(windows_socket_errors_lock_, 0);
+
+ return msg;
+}
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+int
+evutil_global_setup_locks_(const int enable_locks)
+{
+ EVTHREAD_SETUP_GLOBAL_LOCK(windows_socket_errors_lock_, 0);
+ return 0;
+}
+#endif
+
+static void
+evutil_free_sock_err_globals(void)
+{
+ struct cached_sock_errs_entry **errs, *tofree;
+
+ for (errs = HT_START(cached_sock_errs_map, &windows_socket_errors)
+ ; errs; ) {
+ tofree = *errs;
+ errs = HT_NEXT_RMV(cached_sock_errs_map,
+ &windows_socket_errors,
+ errs);
+ LocalFree(tofree->msg);
+ mm_free(tofree);
+ }
+
+ HT_CLEAR(cached_sock_errs_map, &windows_socket_errors);
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ if (windows_socket_errors_lock_ != NULL) {
+ EVTHREAD_FREE_LOCK(windows_socket_errors_lock_, 0);
+ windows_socket_errors_lock_ = NULL;
+ }
+#endif
+}
+
+#else
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+int
+evutil_global_setup_locks_(const int enable_locks)
+{
+ return 0;
+}
+#endif
+
+static void
+evutil_free_sock_err_globals(void)
+{
+}
+
+#endif
+
+int
+evutil_snprintf(char *buf, size_t buflen, const char *format, ...)
+{
+ int r;
+ va_list ap;
+ va_start(ap, format);
+ r = evutil_vsnprintf(buf, buflen, format, ap);
+ va_end(ap);
+ return r;
+}
+
+int
+evutil_vsnprintf(char *buf, size_t buflen, const char *format, va_list ap)
+{
+ int r;
+ if (!buflen)
+ return 0;
+#if defined(_MSC_VER) || defined(_WIN32)
+ r = _vsnprintf(buf, buflen, format, ap);
+ if (r < 0)
+ r = _vscprintf(format, ap);
+#elif defined(sgi)
+ /* Make sure we always use the correct vsnprintf on IRIX */
+ extern int _xpg5_vsnprintf(char * __restrict,
+ __SGI_LIBC_NAMESPACE_QUALIFIER size_t,
+ const char * __restrict, /* va_list */ char *);
+
+ r = _xpg5_vsnprintf(buf, buflen, format, ap);
+#else
+ r = vsnprintf(buf, buflen, format, ap);
+#endif
+ buf[buflen-1] = '\0';
+ return r;
+}
+
+#define USE_INTERNAL_NTOP
+#define USE_INTERNAL_PTON
+
+const char *
+evutil_inet_ntop(int af, const void *src, char *dst, size_t len)
+{
+#if defined(EVENT__HAVE_INET_NTOP) && !defined(USE_INTERNAL_NTOP)
+ return inet_ntop(af, src, dst, len);
+#else
+ if (af == AF_INET) {
+ const struct in_addr *in = src;
+ const ev_uint32_t a = ntohl(in->s_addr);
+ int r;
+ r = evutil_snprintf(dst, len, "%d.%d.%d.%d",
+ (int)(ev_uint8_t)((a>>24)&0xff),
+ (int)(ev_uint8_t)((a>>16)&0xff),
+ (int)(ev_uint8_t)((a>>8 )&0xff),
+ (int)(ev_uint8_t)((a )&0xff));
+ if (r<0||(size_t)r>=len)
+ return NULL;
+ else
+ return dst;
+#ifdef AF_INET6
+ } else if (af == AF_INET6) {
+ const struct in6_addr *addr = src;
+ char buf[64], *cp;
+ int longestGapLen = 0, longestGapPos = -1, i,
+ curGapPos = -1, curGapLen = 0;
+ ev_uint16_t words[8];
+ for (i = 0; i < 8; ++i) {
+ words[i] =
+ (((ev_uint16_t)addr->s6_addr[2*i])<<8) + addr->s6_addr[2*i+1];
+ }
+ if (words[0] == 0 && words[1] == 0 && words[2] == 0 && words[3] == 0 &&
+ words[4] == 0 && ((words[5] == 0 && words[6] && words[7]) ||
+ (words[5] == 0xffff))) {
+ /* This is an IPv4 address. */
+ if (words[5] == 0) {
+ evutil_snprintf(buf, sizeof(buf), "::%d.%d.%d.%d",
+ addr->s6_addr[12], addr->s6_addr[13],
+ addr->s6_addr[14], addr->s6_addr[15]);
+ } else {
+ evutil_snprintf(buf, sizeof(buf), "::%x:%d.%d.%d.%d", words[5],
+ addr->s6_addr[12], addr->s6_addr[13],
+ addr->s6_addr[14], addr->s6_addr[15]);
+ }
+ if (strlen(buf) > len)
+ return NULL;
+ strlcpy(dst, buf, len);
+ return dst;
+ }
+ i = 0;
+ while (i < 8) {
+ if (words[i] == 0) {
+ curGapPos = i++;
+ curGapLen = 1;
+ while (i<8 && words[i] == 0) {
+ ++i; ++curGapLen;
+ }
+ if (curGapLen > longestGapLen) {
+ longestGapPos = curGapPos;
+ longestGapLen = curGapLen;
+ }
+ } else {
+ ++i;
+ }
+ }
+ if (longestGapLen<=1)
+ longestGapPos = -1;
+
+ cp = buf;
+ for (i = 0; i < 8; ++i) {
+ if (words[i] == 0 && longestGapPos == i) {
+ if (i == 0)
+ *cp++ = ':';
+ *cp++ = ':';
+ while (i < 8 && words[i] == 0)
+ ++i;
+ --i; /* to compensate for loop increment. */
+ } else {
+ evutil_snprintf(cp,
+ sizeof(buf)-(cp-buf), "%x", (unsigned)words[i]);
+ cp += strlen(cp);
+ if (i != 7)
+ *cp++ = ':';
+ }
+ }
+ *cp = '\0';
+ if (strlen(buf) > len)
+ return NULL;
+ strlcpy(dst, buf, len);
+ return dst;
+#endif
+ } else {
+ return NULL;
+ }
+#endif
+}
+
+int
+evutil_inet_pton(int af, const char *src, void *dst)
+{
+#if defined(EVENT__HAVE_INET_PTON) && !defined(USE_INTERNAL_PTON)
+ return inet_pton(af, src, dst);
+#else
+ if (af == AF_INET) {
+ unsigned a,b,c,d;
+ char more;
+ struct in_addr *addr = dst;
+ if (sscanf(src, "%u.%u.%u.%u%c", &a,&b,&c,&d,&more) != 4)
+ return 0;
+ if (a > 255) return 0;
+ if (b > 255) return 0;
+ if (c > 255) return 0;
+ if (d > 255) return 0;
+ addr->s_addr = htonl((a<<24) | (b<<16) | (c<<8) | d);
+ return 1;
+#ifdef AF_INET6
+ } else if (af == AF_INET6) {
+ struct in6_addr *out = dst;
+ ev_uint16_t words[8];
+ int gapPos = -1, i, setWords=0;
+ const char *dot = strchr(src, '.');
+ const char *eow; /* end of words. */
+ if (dot == src)
+ return 0;
+ else if (!dot)
+ eow = src+strlen(src);
+ else {
+ unsigned byte1,byte2,byte3,byte4;
+ char more;
+ for (eow = dot-1; eow >= src && EVUTIL_ISDIGIT_(*eow); --eow)
+ ;
+ ++eow;
+
+ /* We use "scanf" because some platform inet_aton()s are too lax
+ * about IPv4 addresses of the form "1.2.3" */
+ if (sscanf(eow, "%u.%u.%u.%u%c",
+ &byte1,&byte2,&byte3,&byte4,&more) != 4)
+ return 0;
+
+ if (byte1 > 255 ||
+ byte2 > 255 ||
+ byte3 > 255 ||
+ byte4 > 255)
+ return 0;
+
+ words[6] = (byte1<<8) | byte2;
+ words[7] = (byte3<<8) | byte4;
+ setWords += 2;
+ }
+
+ i = 0;
+ while (src < eow) {
+ if (i > 7)
+ return 0;
+ if (EVUTIL_ISXDIGIT_(*src)) {
+ char *next;
+ long r = strtol(src, &next, 16);
+ if (next > 4+src)
+ return 0;
+ if (next == src)
+ return 0;
+ if (r<0 || r>65536)
+ return 0;
+
+ words[i++] = (ev_uint16_t)r;
+ setWords++;
+ src = next;
+ if (*src != ':' && src != eow)
+ return 0;
+ ++src;
+ } else if (*src == ':' && i > 0 && gapPos==-1) {
+ gapPos = i;
+ ++src;
+ } else if (*src == ':' && i == 0 && src[1] == ':' && gapPos==-1) {
+ gapPos = i;
+ src += 2;
+ } else {
+ return 0;
+ }
+ }
+
+ if (setWords > 8 ||
+ (setWords == 8 && gapPos != -1) ||
+ (setWords < 8 && gapPos == -1))
+ return 0;
+
+ if (gapPos >= 0) {
+ int nToMove = setWords - (dot ? 2 : 0) - gapPos;
+ int gapLen = 8 - setWords;
+ /* assert(nToMove >= 0); */
+ if (nToMove < 0)
+ return -1; /* should be impossible */
+ memmove(&words[gapPos+gapLen], &words[gapPos],
+ sizeof(ev_uint16_t)*nToMove);
+ memset(&words[gapPos], 0, sizeof(ev_uint16_t)*gapLen);
+ }
+ for (i = 0; i < 8; ++i) {
+ out->s6_addr[2*i ] = words[i] >> 8;
+ out->s6_addr[2*i+1] = words[i] & 0xff;
+ }
+
+ return 1;
+#endif
+ } else {
+ return -1;
+ }
+#endif
+}
+
+int
+evutil_parse_sockaddr_port(const char *ip_as_string, struct sockaddr *out, int *outlen)
+{
+ int port;
+ char buf[128];
+ const char *cp, *addr_part, *port_part;
+ int is_ipv6;
+ /* recognized formats are:
+ * [ipv6]:port
+ * ipv6
+ * [ipv6]
+ * ipv4:port
+ * ipv4
+ */
+
+ cp = strchr(ip_as_string, ':');
+ if (*ip_as_string == '[') {
+ size_t len;
+ if (!(cp = strchr(ip_as_string, ']'))) {
+ return -1;
+ }
+ len = ( cp-(ip_as_string + 1) );
+ if (len > sizeof(buf)-1) {
+ return -1;
+ }
+ memcpy(buf, ip_as_string+1, len);
+ buf[len] = '\0';
+ addr_part = buf;
+ if (cp[1] == ':')
+ port_part = cp+2;
+ else
+ port_part = NULL;
+ is_ipv6 = 1;
+ } else if (cp && strchr(cp+1, ':')) {
+ is_ipv6 = 1;
+ addr_part = ip_as_string;
+ port_part = NULL;
+ } else if (cp) {
+ is_ipv6 = 0;
+ if (cp - ip_as_string > (int)sizeof(buf)-1) {
+ return -1;
+ }
+ memcpy(buf, ip_as_string, cp-ip_as_string);
+ buf[cp-ip_as_string] = '\0';
+ addr_part = buf;
+ port_part = cp+1;
+ } else {
+ addr_part = ip_as_string;
+ port_part = NULL;
+ is_ipv6 = 0;
+ }
+
+ if (port_part == NULL) {
+ port = 0;
+ } else {
+ port = atoi(port_part);
+ if (port <= 0 || port > 65535) {
+ return -1;
+ }
+ }
+
+ if (!addr_part)
+ return -1; /* Should be impossible. */
+#ifdef AF_INET6
+ if (is_ipv6)
+ {
+ struct sockaddr_in6 sin6;
+ memset(&sin6, 0, sizeof(sin6));
+#ifdef EVENT__HAVE_STRUCT_SOCKADDR_IN6_SIN6_LEN
+ sin6.sin6_len = sizeof(sin6);
+#endif
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_port = htons(port);
+ if (1 != evutil_inet_pton(AF_INET6, addr_part, &sin6.sin6_addr))
+ return -1;
+ if ((int)sizeof(sin6) > *outlen)
+ return -1;
+ memset(out, 0, *outlen);
+ memcpy(out, &sin6, sizeof(sin6));
+ *outlen = sizeof(sin6);
+ return 0;
+ }
+ else
+#endif
+ {
+ struct sockaddr_in sin;
+ memset(&sin, 0, sizeof(sin));
+#ifdef EVENT__HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
+ sin.sin_len = sizeof(sin);
+#endif
+ sin.sin_family = AF_INET;
+ sin.sin_port = htons(port);
+ if (1 != evutil_inet_pton(AF_INET, addr_part, &sin.sin_addr))
+ return -1;
+ if ((int)sizeof(sin) > *outlen)
+ return -1;
+ memset(out, 0, *outlen);
+ memcpy(out, &sin, sizeof(sin));
+ *outlen = sizeof(sin);
+ return 0;
+ }
+}
+
+const char *
+evutil_format_sockaddr_port_(const struct sockaddr *sa, char *out, size_t outlen)
+{
+ char b[128];
+ const char *res=NULL;
+ int port;
+ if (sa->sa_family == AF_INET) {
+ const struct sockaddr_in *sin = (const struct sockaddr_in*)sa;
+ res = evutil_inet_ntop(AF_INET, &sin->sin_addr,b,sizeof(b));
+ port = ntohs(sin->sin_port);
+ if (res) {
+ evutil_snprintf(out, outlen, "%s:%d", b, port);
+ return out;
+ }
+ } else if (sa->sa_family == AF_INET6) {
+ const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6*)sa;
+ res = evutil_inet_ntop(AF_INET6, &sin6->sin6_addr,b,sizeof(b));
+ port = ntohs(sin6->sin6_port);
+ if (res) {
+ evutil_snprintf(out, outlen, "[%s]:%d", b, port);
+ return out;
+ }
+ }
+
+ evutil_snprintf(out, outlen, "<addr with socktype %d>",
+ (int)sa->sa_family);
+ return out;
+}
+
+int
+evutil_sockaddr_cmp(const struct sockaddr *sa1, const struct sockaddr *sa2,
+ int include_port)
+{
+ int r;
+ if (0 != (r = (sa1->sa_family - sa2->sa_family)))
+ return r;
+
+ if (sa1->sa_family == AF_INET) {
+ const struct sockaddr_in *sin1, *sin2;
+ sin1 = (const struct sockaddr_in *)sa1;
+ sin2 = (const struct sockaddr_in *)sa2;
+ if (sin1->sin_addr.s_addr < sin2->sin_addr.s_addr)
+ return -1;
+ else if (sin1->sin_addr.s_addr > sin2->sin_addr.s_addr)
+ return 1;
+ else if (include_port &&
+ (r = ((int)sin1->sin_port - (int)sin2->sin_port)))
+ return r;
+ else
+ return 0;
+ }
+#ifdef AF_INET6
+ else if (sa1->sa_family == AF_INET6) {
+ const struct sockaddr_in6 *sin1, *sin2;
+ sin1 = (const struct sockaddr_in6 *)sa1;
+ sin2 = (const struct sockaddr_in6 *)sa2;
+ if ((r = memcmp(sin1->sin6_addr.s6_addr, sin2->sin6_addr.s6_addr, 16)))
+ return r;
+ else if (include_port &&
+ (r = ((int)sin1->sin6_port - (int)sin2->sin6_port)))
+ return r;
+ else
+ return 0;
+ }
+#endif
+ return 1;
+}
+
+/* Tables to implement ctypes-replacement EVUTIL_IS*() functions. Each table
+ * has 256 bits to look up whether a character is in some set or not. This
+ * fails on non-ASCII platforms, but so does every other place where we
+ * take a char and write it onto the network.
+ **/
+static const ev_uint32_t EVUTIL_ISALPHA_TABLE[8] =
+ { 0, 0, 0x7fffffe, 0x7fffffe, 0, 0, 0, 0 };
+static const ev_uint32_t EVUTIL_ISALNUM_TABLE[8] =
+ { 0, 0x3ff0000, 0x7fffffe, 0x7fffffe, 0, 0, 0, 0 };
+static const ev_uint32_t EVUTIL_ISSPACE_TABLE[8] = { 0x3e00, 0x1, 0, 0, 0, 0, 0, 0 };
+static const ev_uint32_t EVUTIL_ISXDIGIT_TABLE[8] =
+ { 0, 0x3ff0000, 0x7e, 0x7e, 0, 0, 0, 0 };
+static const ev_uint32_t EVUTIL_ISDIGIT_TABLE[8] = { 0, 0x3ff0000, 0, 0, 0, 0, 0, 0 };
+static const ev_uint32_t EVUTIL_ISPRINT_TABLE[8] =
+ { 0, 0xffffffff, 0xffffffff, 0x7fffffff, 0, 0, 0, 0x0 };
+static const ev_uint32_t EVUTIL_ISUPPER_TABLE[8] = { 0, 0, 0x7fffffe, 0, 0, 0, 0, 0 };
+static const ev_uint32_t EVUTIL_ISLOWER_TABLE[8] = { 0, 0, 0, 0x7fffffe, 0, 0, 0, 0 };
+/* Upper-casing and lowercasing tables to map characters to upper/lowercase
+ * equivalents. */
+static const unsigned char EVUTIL_TOUPPER_TABLE[256] = {
+ 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
+ 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
+ 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
+ 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
+ 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
+ 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
+ 96,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
+ 80,81,82,83,84,85,86,87,88,89,90,123,124,125,126,127,
+ 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,
+ 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,
+ 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,
+ 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,
+ 192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,
+ 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,
+ 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,
+ 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,
+};
+static const unsigned char EVUTIL_TOLOWER_TABLE[256] = {
+ 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
+ 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
+ 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
+ 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
+ 64,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
+ 112,113,114,115,116,117,118,119,120,121,122,91,92,93,94,95,
+ 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
+ 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
+ 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,
+ 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,
+ 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,
+ 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,
+ 192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,
+ 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,
+ 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,
+ 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,
+};
+
+#define IMPL_CTYPE_FN(name) \
+ int EVUTIL_##name##_(char c) { \
+ ev_uint8_t u = c; \
+ return !!(EVUTIL_##name##_TABLE[(u >> 5) & 7] & (1 << (u & 31))); \
+ }
+IMPL_CTYPE_FN(ISALPHA)
+IMPL_CTYPE_FN(ISALNUM)
+IMPL_CTYPE_FN(ISSPACE)
+IMPL_CTYPE_FN(ISDIGIT)
+IMPL_CTYPE_FN(ISXDIGIT)
+IMPL_CTYPE_FN(ISPRINT)
+IMPL_CTYPE_FN(ISLOWER)
+IMPL_CTYPE_FN(ISUPPER)
+
+char EVUTIL_TOLOWER_(char c)
+{
+ return ((char)EVUTIL_TOLOWER_TABLE[(ev_uint8_t)c]);
+}
+char EVUTIL_TOUPPER_(char c)
+{
+ return ((char)EVUTIL_TOUPPER_TABLE[(ev_uint8_t)c]);
+}
+int
+evutil_ascii_strcasecmp(const char *s1, const char *s2)
+{
+ char c1, c2;
+ while (1) {
+ c1 = EVUTIL_TOLOWER_(*s1++);
+ c2 = EVUTIL_TOLOWER_(*s2++);
+ if (c1 < c2)
+ return -1;
+ else if (c1 > c2)
+ return 1;
+ else if (c1 == 0)
+ return 0;
+ }
+}
+int evutil_ascii_strncasecmp(const char *s1, const char *s2, size_t n)
+{
+ char c1, c2;
+ while (n--) {
+ c1 = EVUTIL_TOLOWER_(*s1++);
+ c2 = EVUTIL_TOLOWER_(*s2++);
+ if (c1 < c2)
+ return -1;
+ else if (c1 > c2)
+ return 1;
+ else if (c1 == 0)
+ return 0;
+ }
+ return 0;
+}
+
+void
+evutil_rtrim_lws_(char *str)
+{
+ char *cp;
+
+ if (str == NULL)
+ return;
+
+ if ((cp = strchr(str, '\0')) == NULL || (cp == str))
+ return;
+
+ --cp;
+
+ while (*cp == ' ' || *cp == '\t') {
+ *cp = '\0';
+ if (cp == str)
+ break;
+ --cp;
+ }
+}
+
+static int
+evutil_issetugid(void)
+{
+#ifdef EVENT__HAVE_ISSETUGID
+ return issetugid();
+#else
+
+#ifdef EVENT__HAVE_GETEUID
+ if (getuid() != geteuid())
+ return 1;
+#endif
+#ifdef EVENT__HAVE_GETEGID
+ if (getgid() != getegid())
+ return 1;
+#endif
+ return 0;
+#endif
+}
+
+const char *
+evutil_getenv_(const char *varname)
+{
+ if (evutil_issetugid())
+ return NULL;
+
+ return getenv(varname);
+}
+
+ev_uint32_t
+evutil_weakrand_seed_(struct evutil_weakrand_state *state, ev_uint32_t seed)
+{
+ if (seed == 0) {
+ struct timeval tv;
+ evutil_gettimeofday(&tv, NULL);
+ seed = (ev_uint32_t)tv.tv_sec + (ev_uint32_t)tv.tv_usec;
+#ifdef _WIN32
+ seed += (ev_uint32_t) _getpid();
+#else
+ seed += (ev_uint32_t) getpid();
+#endif
+ }
+ state->seed = seed;
+ return seed;
+}
+
+ev_int32_t
+evutil_weakrand_(struct evutil_weakrand_state *state)
+{
+ /* This RNG implementation is a linear congruential generator, with
+ * modulus 2^31, multiplier 1103515245, and addend 12345. It's also
+ * used by OpenBSD, and by Glibc's TYPE_0 RNG.
+ *
+ * The linear congruential generator is not an industrial-strength
+ * RNG! It's fast, but it can have higher-order patterns. Notably,
+ * the low bits tend to have periodicity.
+ */
+ state->seed = ((state->seed) * 1103515245 + 12345) & 0x7fffffff;
+ return (ev_int32_t)(state->seed);
+}
+
+ev_int32_t
+evutil_weakrand_range_(struct evutil_weakrand_state *state, ev_int32_t top)
+{
+ ev_int32_t divisor, result;
+
+ /* We can't just do weakrand() % top, since the low bits of the LCG
+ * are less random than the high ones. (Specifically, since the LCG
+ * modulus is 2^N, every 2^m for m<N will divide the modulus, and so
+ * therefore the low m bits of the LCG will have period 2^m.) */
+ divisor = EVUTIL_WEAKRAND_MAX / top;
+ do {
+ result = evutil_weakrand_(state) / divisor;
+ } while (result >= top);
+ return result;
+}
+
+/**
+ * Volatile pointer to memset: we use this to keep the compiler from
+ * eliminating our call to memset.
+ */
+void * (*volatile evutil_memset_volatile_)(void *, int, size_t) = memset;
+
+void
+evutil_memclear_(void *mem, size_t len)
+{
+ evutil_memset_volatile_(mem, 0, len);
+}
+
+int
+evutil_sockaddr_is_loopback_(const struct sockaddr *addr)
+{
+ static const char LOOPBACK_S6[16] =
+ "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1";
+ if (addr->sa_family == AF_INET) {
+ struct sockaddr_in *sin = (struct sockaddr_in *)addr;
+ return (ntohl(sin->sin_addr.s_addr) & 0xff000000) == 0x7f000000;
+ } else if (addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
+ return !memcmp(sin6->sin6_addr.s6_addr, LOOPBACK_S6, 16);
+ }
+ return 0;
+}
+
+int
+evutil_hex_char_to_int_(char c)
+{
+ switch(c)
+ {
+ case '0': return 0;
+ case '1': return 1;
+ case '2': return 2;
+ case '3': return 3;
+ case '4': return 4;
+ case '5': return 5;
+ case '6': return 6;
+ case '7': return 7;
+ case '8': return 8;
+ case '9': return 9;
+ case 'A': case 'a': return 10;
+ case 'B': case 'b': return 11;
+ case 'C': case 'c': return 12;
+ case 'D': case 'd': return 13;
+ case 'E': case 'e': return 14;
+ case 'F': case 'f': return 15;
+ }
+ return -1;
+}
+
+#ifdef _WIN32
+HMODULE
+evutil_load_windows_system_library_(const TCHAR *library_name)
+{
+ TCHAR path[MAX_PATH];
+ unsigned n;
+ n = GetSystemDirectory(path, MAX_PATH);
+ if (n == 0 || n + _tcslen(library_name) + 2 >= MAX_PATH)
+ return 0;
+ _tcscat(path, TEXT("\\"));
+ _tcscat(path, library_name);
+ return LoadLibrary(path);
+}
+#endif
+
+/* Internal wrapper around 'socket' to provide Linux-style support for
+ * syscall-saving methods where available.
+ *
+ * In addition to regular socket behavior, you can use a bitwise or to set the
+ * flags EVUTIL_SOCK_NONBLOCK and EVUTIL_SOCK_CLOEXEC in the 'type' argument,
+ * to make the socket nonblocking or close-on-exec with as few syscalls as
+ * possible.
+ */
+evutil_socket_t
+evutil_socket_(int domain, int type, int protocol)
+{
+ evutil_socket_t r;
+#if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
+ r = socket(domain, type, protocol);
+ if (r >= 0)
+ return r;
+ else if ((type & (SOCK_NONBLOCK|SOCK_CLOEXEC)) == 0)
+ return -1;
+#endif
+#define SOCKET_TYPE_MASK (~(EVUTIL_SOCK_NONBLOCK|EVUTIL_SOCK_CLOEXEC))
+ r = socket(domain, type & SOCKET_TYPE_MASK, protocol);
+ if (r < 0)
+ return -1;
+ if (type & EVUTIL_SOCK_NONBLOCK) {
+ if (evutil_fast_socket_nonblocking(r) < 0) {
+ evutil_closesocket(r);
+ return -1;
+ }
+ }
+ if (type & EVUTIL_SOCK_CLOEXEC) {
+ if (evutil_fast_socket_closeonexec(r) < 0) {
+ evutil_closesocket(r);
+ return -1;
+ }
+ }
+ return r;
+}
+
+/* Internal wrapper around 'accept' or 'accept4' to provide Linux-style
+ * support for syscall-saving methods where available.
+ *
+ * In addition to regular accept behavior, you can set one or more of flags
+ * EVUTIL_SOCK_NONBLOCK and EVUTIL_SOCK_CLOEXEC in the 'flags' argument, to
+ * make the socket nonblocking or close-on-exec with as few syscalls as
+ * possible.
+ */
+evutil_socket_t
+evutil_accept4_(evutil_socket_t sockfd, struct sockaddr *addr,
+ ev_socklen_t *addrlen, int flags)
+{
+ evutil_socket_t result;
+#if defined(EVENT__HAVE_ACCEPT4) && defined(SOCK_CLOEXEC) && defined(SOCK_NONBLOCK)
+ result = accept4(sockfd, addr, addrlen, flags);
+ if (result >= 0 || (errno != EINVAL && errno != ENOSYS)) {
+ /* A nonnegative result means that we succeeded, so return.
+ * Failing with EINVAL means that an option wasn't supported,
+ * and failing with ENOSYS means that the syscall wasn't
+ * there: in those cases we want to fall back. Otherwise, we
+ * got a real error, and we should return. */
+ return result;
+ }
+#endif
+ result = accept(sockfd, addr, addrlen);
+ if (result < 0)
+ return result;
+
+ if (flags & EVUTIL_SOCK_CLOEXEC) {
+ if (evutil_fast_socket_closeonexec(result) < 0) {
+ evutil_closesocket(result);
+ return -1;
+ }
+ }
+ if (flags & EVUTIL_SOCK_NONBLOCK) {
+ if (evutil_fast_socket_nonblocking(result) < 0) {
+ evutil_closesocket(result);
+ return -1;
+ }
+ }
+ return result;
+}
+
+/* Internal function: Set fd[0] and fd[1] to a pair of fds such that writes on
+ * fd[0] get read from fd[1]. Make both fds nonblocking and close-on-exec.
+ * Return 0 on success, -1 on failure.
+ */
+int
+evutil_make_internal_pipe_(evutil_socket_t fd[2])
+{
+ /*
+ Making the second socket nonblocking is a bit subtle, given that we
+ ignore any EAGAIN returns when writing to it, and you don't usally
+ do that for a nonblocking socket. But if the kernel gives us EAGAIN,
+ then there's no need to add any more data to the buffer, since
+ the main thread is already either about to wake up and drain it,
+ or woken up and in the process of draining it.
+ */
+
+#if defined(EVENT__HAVE_PIPE2)
+ if (pipe2(fd, O_NONBLOCK|O_CLOEXEC) == 0)
+ return 0;
+#endif
+#if defined(EVENT__HAVE_PIPE)
+ if (pipe(fd) == 0) {
+ if (evutil_fast_socket_nonblocking(fd[0]) < 0 ||
+ evutil_fast_socket_nonblocking(fd[1]) < 0 ||
+ evutil_fast_socket_closeonexec(fd[0]) < 0 ||
+ evutil_fast_socket_closeonexec(fd[1]) < 0) {
+ close(fd[0]);
+ close(fd[1]);
+ fd[0] = fd[1] = -1;
+ return -1;
+ }
+ return 0;
+ } else {
+ event_warn("%s: pipe", __func__);
+ }
+#endif
+
+#ifdef _WIN32
+#define LOCAL_SOCKETPAIR_AF AF_INET
+#else
+#define LOCAL_SOCKETPAIR_AF AF_UNIX
+#endif
+ if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, fd) == 0) {
+ if (evutil_fast_socket_nonblocking(fd[0]) < 0 ||
+ evutil_fast_socket_nonblocking(fd[1]) < 0 ||
+ evutil_fast_socket_closeonexec(fd[0]) < 0 ||
+ evutil_fast_socket_closeonexec(fd[1]) < 0) {
+ evutil_closesocket(fd[0]);
+ evutil_closesocket(fd[1]);
+ fd[0] = fd[1] = -1;
+ return -1;
+ }
+ return 0;
+ }
+ fd[0] = fd[1] = -1;
+ return -1;
+}
+
+/* Wrapper around eventfd on systems that provide it. Unlike the system
+ * eventfd, it always supports EVUTIL_EFD_CLOEXEC and EVUTIL_EFD_NONBLOCK as
+ * flags. Returns -1 on error or if eventfd is not supported.
+ */
+evutil_socket_t
+evutil_eventfd_(unsigned initval, int flags)
+{
+#if defined(EVENT__HAVE_EVENTFD) && defined(EVENT__HAVE_SYS_EVENTFD_H)
+ int r;
+#if defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK)
+ r = eventfd(initval, flags);
+ if (r >= 0 || flags == 0)
+ return r;
+#endif
+ r = eventfd(initval, 0);
+ if (r < 0)
+ return r;
+ if (flags & EVUTIL_EFD_CLOEXEC) {
+ if (evutil_fast_socket_closeonexec(r) < 0) {
+ evutil_closesocket(r);
+ return -1;
+ }
+ }
+ if (flags & EVUTIL_EFD_NONBLOCK) {
+ if (evutil_fast_socket_nonblocking(r) < 0) {
+ evutil_closesocket(r);
+ return -1;
+ }
+ }
+ return r;
+#else
+ return -1;
+#endif
+}
+
+void
+evutil_free_globals_(void)
+{
+ evutil_free_secure_rng_globals_();
+ evutil_free_sock_err_globals();
+}
diff --git a/libs/libevent/src/evutil_rand.c b/libs/libevent/src/evutil_rand.c
new file mode 100644
index 0000000000..046a14b07a
--- /dev/null
+++ b/libs/libevent/src/evutil_rand.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* This file has our secure PRNG code. On platforms that have arc4random(),
+ * we just use that. Otherwise, we include arc4random.c as a bunch of static
+ * functions, and wrap it lightly. We don't expose the arc4random*() APIs
+ * because A) they aren't in our namespace, and B) it's not nice to name your
+ * APIs after their implementations. We keep them in a separate file
+ * so that other people can rip it out and use it for whatever.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <limits.h>
+
+#include "util-internal.h"
+#include "evthread-internal.h"
+
+#ifdef EVENT__HAVE_ARC4RANDOM
+#include <stdlib.h>
+#include <string.h>
+int
+evutil_secure_rng_set_urandom_device_file(char *fname)
+{
+ (void) fname;
+ return -1;
+}
+int
+evutil_secure_rng_init(void)
+{
+ /* call arc4random() now to force it to self-initialize */
+ (void) arc4random();
+ return 0;
+}
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+int
+evutil_secure_rng_global_setup_locks_(const int enable_locks)
+{
+ return 0;
+}
+#endif
+static void
+evutil_free_secure_rng_globals_locks(void)
+{
+}
+
+static void
+ev_arc4random_buf(void *buf, size_t n)
+{
+#if defined(EVENT__HAVE_ARC4RANDOM_BUF) && !defined(__APPLE__)
+ arc4random_buf(buf, n);
+ return;
+#else
+ unsigned char *b = buf;
+
+#if defined(EVENT__HAVE_ARC4RANDOM_BUF)
+ /* OSX 10.7 introducd arc4random_buf, so if you build your program
+ * there, you'll get surprised when older versions of OSX fail to run.
+ * To solve this, we can check whether the function pointer is set,
+ * and fall back otherwise. (OSX does this using some linker
+ * trickery.)
+ */
+ {
+ void (*tptr)(void *,size_t) =
+ (void (*)(void*,size_t))arc4random_buf;
+ if (tptr != NULL) {
+ arc4random_buf(buf, n);
+ return;
+ }
+ }
+#endif
+ /* Make sure that we start out with b at a 4-byte alignment; plenty
+ * of CPUs care about this for 32-bit access. */
+ if (n >= 4 && ((ev_uintptr_t)b) & 3) {
+ ev_uint32_t u = arc4random();
+ int n_bytes = 4 - (((ev_uintptr_t)b) & 3);
+ memcpy(b, &u, n_bytes);
+ b += n_bytes;
+ n -= n_bytes;
+ }
+ while (n >= 4) {
+ *(ev_uint32_t*)b = arc4random();
+ b += 4;
+ n -= 4;
+ }
+ if (n) {
+ ev_uint32_t u = arc4random();
+ memcpy(b, &u, n);
+ }
+#endif
+}
+
+#else /* !EVENT__HAVE_ARC4RANDOM { */
+
+#ifdef EVENT__ssize_t
+#define ssize_t EVENT__ssize_t
+#endif
+#define ARC4RANDOM_EXPORT static
+#define ARC4_LOCK_() EVLOCK_LOCK(arc4rand_lock, 0)
+#define ARC4_UNLOCK_() EVLOCK_UNLOCK(arc4rand_lock, 0)
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+static void *arc4rand_lock;
+#endif
+
+#define ARC4RANDOM_UINT32 ev_uint32_t
+#define ARC4RANDOM_NOSTIR
+#define ARC4RANDOM_NORANDOM
+#define ARC4RANDOM_NOUNIFORM
+
+#include "./arc4random.c"
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+int
+evutil_secure_rng_global_setup_locks_(const int enable_locks)
+{
+ EVTHREAD_SETUP_GLOBAL_LOCK(arc4rand_lock, 0);
+ return 0;
+}
+#endif
+
+static void
+evutil_free_secure_rng_globals_locks(void)
+{
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ if (arc4rand_lock != NULL) {
+ EVTHREAD_FREE_LOCK(arc4rand_lock, 0);
+ arc4rand_lock = NULL;
+ }
+#endif
+ return;
+}
+
+int
+evutil_secure_rng_set_urandom_device_file(char *fname)
+{
+#ifdef TRY_SEED_URANDOM
+ ARC4_LOCK_();
+ arc4random_urandom_filename = fname;
+ ARC4_UNLOCK_();
+#endif
+ return 0;
+}
+
+int
+evutil_secure_rng_init(void)
+{
+ int val;
+
+ ARC4_LOCK_();
+ if (!arc4_seeded_ok)
+ arc4_stir();
+ val = arc4_seeded_ok ? 0 : -1;
+ ARC4_UNLOCK_();
+ return val;
+}
+
+static void
+ev_arc4random_buf(void *buf, size_t n)
+{
+ arc4random_buf(buf, n);
+}
+
+#endif /* } !EVENT__HAVE_ARC4RANDOM */
+
+void
+evutil_secure_rng_get_bytes(void *buf, size_t n)
+{
+ ev_arc4random_buf(buf, n);
+}
+
+void
+evutil_secure_rng_add_bytes(const char *buf, size_t n)
+{
+ arc4random_addrandom((unsigned char*)buf,
+ n>(size_t)INT_MAX ? INT_MAX : (int)n);
+}
+
+void
+evutil_free_secure_rng_globals_(void)
+{
+ evutil_free_secure_rng_globals_locks();
+}
diff --git a/libs/libevent/src/evutil_time.c b/libs/libevent/src/evutil_time.c
new file mode 100644
index 0000000000..8f53c66b68
--- /dev/null
+++ b/libs/libevent/src/evutil_time.c
@@ -0,0 +1,538 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+
+#include <sys/types.h>
+#ifdef EVENT__HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+#include <errno.h>
+#include <limits.h>
+#ifndef EVENT__HAVE_GETTIMEOFDAY
+#include <sys/timeb.h>
+#endif
+#if !defined(EVENT__HAVE_NANOSLEEP) && !defined(EVENT_HAVE_USLEEP) && \
+ !defined(_WIN32)
+#include <sys/select.h>
+#endif
+#include <time.h>
+#include <sys/stat.h>
+#include <string.h>
+
+#include "event2/util.h"
+#include "util-internal.h"
+#include "log-internal.h"
+#include "mm-internal.h"
+
+#ifndef EVENT__HAVE_GETTIMEOFDAY
+/* No gettimeofday; this must be windows. */
+int
+evutil_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+#ifdef _MSC_VER
+#define U64_LITERAL(n) n##ui64
+#else
+#define U64_LITERAL(n) n##llu
+#endif
+
+ /* Conversion logic taken from Tor, which in turn took it
+ * from Perl. GetSystemTimeAsFileTime returns its value as
+ * an unaligned (!) 64-bit value containing the number of
+ * 100-nanosecond intervals since 1 January 1601 UTC. */
+#define EPOCH_BIAS U64_LITERAL(116444736000000000)
+#define UNITS_PER_SEC U64_LITERAL(10000000)
+#define USEC_PER_SEC U64_LITERAL(1000000)
+#define UNITS_PER_USEC U64_LITERAL(10)
+ union {
+ FILETIME ft_ft;
+ ev_uint64_t ft_64;
+ } ft;
+
+ if (tv == NULL)
+ return -1;
+
+ GetSystemTimeAsFileTime(&ft.ft_ft);
+
+ if (EVUTIL_UNLIKELY(ft.ft_64 < EPOCH_BIAS)) {
+ /* Time before the unix epoch. */
+ return -1;
+ }
+ ft.ft_64 -= EPOCH_BIAS;
+ tv->tv_sec = (long) (ft.ft_64 / UNITS_PER_SEC);
+ tv->tv_usec = (long) ((ft.ft_64 / UNITS_PER_USEC) % USEC_PER_SEC);
+ return 0;
+}
+#endif
+
+#define MAX_SECONDS_IN_MSEC_LONG \
+ (((LONG_MAX) - 999) / 1000)
+
+long
+evutil_tv_to_msec_(const struct timeval *tv)
+{
+ if (tv->tv_usec > 1000000 || tv->tv_sec > MAX_SECONDS_IN_MSEC_LONG)
+ return -1;
+
+ return (tv->tv_sec * 1000) + ((tv->tv_usec + 999) / 1000);
+}
+
+/*
+ Replacement for usleep on platforms that don't have one. Not guaranteed to
+ be any more finegrained than 1 msec.
+ */
+void
+evutil_usleep_(const struct timeval *tv)
+{
+ if (!tv)
+ return;
+#if defined(_WIN32)
+ {
+ long msec = evutil_tv_to_msec_(tv);
+ Sleep((DWORD)msec);
+ }
+#elif defined(EVENT__HAVE_NANOSLEEP)
+ {
+ struct timespec ts;
+ ts.tv_sec = tv->tv_sec;
+ ts.tv_nsec = tv->tv_usec*1000;
+ nanosleep(&ts, NULL);
+ }
+#elif defined(EVENT__HAVE_USLEEP)
+ /* Some systems don't like to usleep more than 999999 usec */
+ sleep(tv->tv_sec);
+ usleep(tv->tv_usec);
+#else
+ select(0, NULL, NULL, NULL, tv);
+#endif
+}
+
+/*
+ This function assumes it's called repeatedly with a
+ not-actually-so-monotonic time source whose outputs are in 'tv'. It
+ implements a trivial ratcheting mechanism so that the values never go
+ backwards.
+ */
+static void
+adjust_monotonic_time(struct evutil_monotonic_timer *base,
+ struct timeval *tv)
+{
+ evutil_timeradd(tv, &base->adjust_monotonic_clock, tv);
+
+ if (evutil_timercmp(tv, &base->last_time, <)) {
+ /* Guess it wasn't monotonic after all. */
+ struct timeval adjust;
+ evutil_timersub(&base->last_time, tv, &adjust);
+ evutil_timeradd(&adjust, &base->adjust_monotonic_clock,
+ &base->adjust_monotonic_clock);
+ *tv = base->last_time;
+ }
+ base->last_time = *tv;
+}
+
+/*
+ Allocate a new struct evutil_monotonic_timer
+ */
+struct evutil_monotonic_timer *
+evutil_monotonic_timer_new(void)
+{
+ struct evutil_monotonic_timer *p = NULL;
+
+ p = mm_malloc(sizeof(*p));
+ if (!p) goto done;
+
+ memset(p, 0, sizeof(*p));
+
+ done:
+ return p;
+}
+
+/*
+ Free a struct evutil_monotonic_timer
+ */
+void
+evutil_monotonic_timer_free(struct evutil_monotonic_timer *timer)
+{
+ if (timer) {
+ mm_free(timer);
+ }
+}
+
+/*
+ Set up a struct evutil_monotonic_timer for initial use
+ */
+int
+evutil_configure_monotonic_time(struct evutil_monotonic_timer *timer,
+ int flags)
+{
+ return evutil_configure_monotonic_time_(timer, flags);
+}
+
+/*
+ Query the current monotonic time
+ */
+int
+evutil_gettime_monotonic(struct evutil_monotonic_timer *timer,
+ struct timeval *tp)
+{
+ return evutil_gettime_monotonic_(timer, tp);
+}
+
+
+#if defined(HAVE_POSIX_MONOTONIC)
+/* =====
+ The POSIX clock_gettime() interface provides a few ways to get at a
+ monotonic clock. CLOCK_MONOTONIC is most widely supported. Linux also
+ provides a CLOCK_MONOTONIC_COARSE with accuracy of about 1-4 msec.
+
+ On all platforms I'm aware of, CLOCK_MONOTONIC really is monotonic.
+ Platforms don't agree about whether it should jump on a sleep/resume.
+ */
+
+int
+evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
+ int flags)
+{
+ /* CLOCK_MONOTONIC exists on FreeBSD, Linux, and Solaris. You need to
+ * check for it at runtime, because some older kernel versions won't
+ * have it working. */
+#ifdef CLOCK_MONOTONIC_COARSE
+ const int precise = flags & EV_MONOT_PRECISE;
+#endif
+ const int fallback = flags & EV_MONOT_FALLBACK;
+ struct timespec ts;
+
+#ifdef CLOCK_MONOTONIC_COARSE
+ if (CLOCK_MONOTONIC_COARSE < 0) {
+ /* Technically speaking, nothing keeps CLOCK_* from being
+ * negative (as far as I know). This check and the one below
+ * make sure that it's safe for us to use -1 as an "unset"
+ * value. */
+ event_errx(1,"I didn't expect CLOCK_MONOTONIC_COARSE to be < 0");
+ }
+ if (! precise && ! fallback) {
+ if (clock_gettime(CLOCK_MONOTONIC_COARSE, &ts) == 0) {
+ base->monotonic_clock = CLOCK_MONOTONIC_COARSE;
+ return 0;
+ }
+ }
+#endif
+ if (!fallback && clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
+ base->monotonic_clock = CLOCK_MONOTONIC;
+ return 0;
+ }
+
+ if (CLOCK_MONOTONIC < 0) {
+ event_errx(1,"I didn't expect CLOCK_MONOTONIC to be < 0");
+ }
+
+ base->monotonic_clock = -1;
+ return 0;
+}
+
+int
+evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
+ struct timeval *tp)
+{
+ struct timespec ts;
+
+ if (base->monotonic_clock < 0) {
+ if (evutil_gettimeofday(tp, NULL) < 0)
+ return -1;
+ adjust_monotonic_time(base, tp);
+ return 0;
+ }
+
+ if (clock_gettime(base->monotonic_clock, &ts) == -1)
+ return -1;
+ tp->tv_sec = ts.tv_sec;
+ tp->tv_usec = ts.tv_nsec / 1000;
+
+ return 0;
+}
+#endif
+
+#if defined(HAVE_MACH_MONOTONIC)
+/* ======
+ Apple is a little late to the POSIX party. And why not? Instead of
+ clock_gettime(), they provide mach_absolute_time(). Its units are not
+ fixed; we need to use mach_timebase_info() to get the right functions to
+ convert its units into nanoseconds.
+
+ To all appearances, mach_absolute_time() seems to be honest-to-goodness
+ monotonic. Whether it stops during sleep or not is unspecified in
+ principle, and dependent on CPU architecture in practice.
+ */
+
+int
+evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
+ int flags)
+{
+ const int fallback = flags & EV_MONOT_FALLBACK;
+ struct mach_timebase_info mi;
+ memset(base, 0, sizeof(*base));
+ /* OSX has mach_absolute_time() */
+ if (!fallback &&
+ mach_timebase_info(&mi) == 0 &&
+ mach_absolute_time() != 0) {
+ /* mach_timebase_info tells us how to convert
+ * mach_absolute_time() into nanoseconds, but we
+ * want to use microseconds instead. */
+ mi.denom *= 1000;
+ memcpy(&base->mach_timebase_units, &mi, sizeof(mi));
+ } else {
+ base->mach_timebase_units.numer = 0;
+ }
+ return 0;
+}
+
+int
+evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
+ struct timeval *tp)
+{
+ ev_uint64_t abstime, usec;
+ if (base->mach_timebase_units.numer == 0) {
+ if (evutil_gettimeofday(tp, NULL) < 0)
+ return -1;
+ adjust_monotonic_time(base, tp);
+ return 0;
+ }
+
+ abstime = mach_absolute_time();
+ usec = (abstime * base->mach_timebase_units.numer)
+ / (base->mach_timebase_units.denom);
+ tp->tv_sec = usec / 1000000;
+ tp->tv_usec = usec % 1000000;
+
+ return 0;
+}
+#endif
+
+#if defined(HAVE_WIN32_MONOTONIC)
+/* =====
+ Turn we now to Windows. Want monontonic time on Windows?
+
+ Windows has QueryPerformanceCounter(), which gives time most high-
+ resolution time. It's a pity it's not so monotonic in practice; it's
+ also got some fun bugs, especially: with older Windowses, under
+ virtualizations, with funny hardware, on multiprocessor systems, and so
+ on. PEP418 [1] has a nice roundup of the issues here.
+
+ There's GetTickCount64() on Vista and later, which gives a number of 1-msec
+ ticks since startup. The accuracy here might be as bad as 10-20 msec, I
+ hear. There's an undocumented function (NtSetTimerResolution) that
+ allegedly increases the accuracy. Good luck!
+
+ There's also GetTickCount(), which is only 32 bits, but seems to be
+ supported on pre-Vista versions of Windows. Apparently, you can coax
+ another 14 bits out of it, giving you 2231 years before rollover.
+
+ The less said about timeGetTime() the better.
+
+ "We don't care. We don't have to. We're the Phone Company."
+ -- Lily Tomlin, SNL
+
+ Our strategy, if precise timers are turned off, is to just use the best
+ GetTickCount equivalent available. If we've been asked for precise timing,
+ then we mostly[2] assume that GetTickCount is monotonic, and correct
+ GetPerformanceCounter to approximate it.
+
+ [1] http://www.python.org/dev/peps/pep-0418
+ [2] Of course, we feed the Windows stuff into adjust_monotonic_time()
+ anyway, just in case it isn't.
+
+ */
+/*
+ Parts of our logic in the win32 timer code here are closely based on
+ BitTorrent's libUTP library. That code is subject to the following
+ license:
+
+ Copyright (c) 2010 BitTorrent, Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+static ev_uint64_t
+evutil_GetTickCount_(struct evutil_monotonic_timer *base)
+{
+ if (base->GetTickCount64_fn) {
+ /* Let's just use GetTickCount64 if we can. */
+ return base->GetTickCount64_fn();
+ } else if (base->GetTickCount_fn) {
+ /* Greg Hazel assures me that this works, that BitTorrent has
+ * done it for years, and this it won't turn around and
+ * bite us. He says they found it on some game programmers'
+ * forum some time around 2007.
+ */
+ ev_uint64_t v = base->GetTickCount_fn();
+ return (DWORD)v | ((v >> 18) & 0xFFFFFFFF00000000);
+ } else {
+ /* Here's the fallback implementation. We have to use
+ * GetTickCount() with its given signature, so we only get
+ * 32 bits worth of milliseconds, which will roll ove every
+ * 49 days or so. */
+ DWORD ticks = GetTickCount();
+ if (ticks < base->last_tick_count) {
+ base->adjust_tick_count += ((ev_uint64_t)1) << 32;
+ }
+ base->last_tick_count = ticks;
+ return ticks + base->adjust_tick_count;
+ }
+}
+
+int
+evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
+ int flags)
+{
+ const int precise = flags & EV_MONOT_PRECISE;
+ const int fallback = flags & EV_MONOT_FALLBACK;
+ HANDLE h;
+ memset(base, 0, sizeof(*base));
+
+ h = evutil_load_windows_system_library_(TEXT("kernel32.dll"));
+ if (h != NULL && !fallback) {
+ base->GetTickCount64_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount64");
+ base->GetTickCount_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount");
+ }
+
+ base->first_tick = base->last_tick_count = evutil_GetTickCount_(base);
+ if (precise && !fallback) {
+ LARGE_INTEGER freq;
+ if (QueryPerformanceFrequency(&freq)) {
+ LARGE_INTEGER counter;
+ QueryPerformanceCounter(&counter);
+ base->first_counter = counter.QuadPart;
+ base->usec_per_count = 1.0e6 / freq.QuadPart;
+ base->use_performance_counter = 1;
+ }
+ }
+
+ return 0;
+}
+
+static inline ev_int64_t
+abs64(ev_int64_t i)
+{
+ return i < 0 ? -i : i;
+}
+
+
+int
+evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
+ struct timeval *tp)
+{
+ ev_uint64_t ticks = evutil_GetTickCount_(base);
+ if (base->use_performance_counter) {
+ /* Here's a trick we took from BitTorrent's libutp, at Greg
+ * Hazel's recommendation. We use QueryPerformanceCounter for
+ * our high-resolution timer, but use GetTickCount*() to keep
+ * it sane, and adjust_monotonic_time() to keep it monotonic.
+ */
+ LARGE_INTEGER counter;
+ ev_int64_t counter_elapsed, counter_usec_elapsed, ticks_elapsed;
+ QueryPerformanceCounter(&counter);
+ counter_elapsed = (ev_int64_t)
+ (counter.QuadPart - base->first_counter);
+ ticks_elapsed = ticks - base->first_tick;
+ /* TODO: This may upset VC6. If you need this to work with
+ * VC6, please supply an appropriate patch. */
+ counter_usec_elapsed = (ev_int64_t)
+ (counter_elapsed * base->usec_per_count);
+
+ if (abs64(ticks_elapsed*1000 - counter_usec_elapsed) > 1000000) {
+ /* It appears that the QueryPerformanceCounter()
+ * result is more than 1 second away from
+ * GetTickCount() result. Let's adjust it to be as
+ * accurate as we can; adjust_monotnonic_time() below
+ * will keep it monotonic. */
+ counter_usec_elapsed = ticks_elapsed * 1000;
+ base->first_counter = (ev_uint64_t) (counter.QuadPart - counter_usec_elapsed / base->usec_per_count);
+ }
+ tp->tv_sec = (time_t) (counter_usec_elapsed / 1000000);
+ tp->tv_usec = counter_usec_elapsed % 1000000;
+
+ } else {
+ /* We're just using GetTickCount(). */
+ tp->tv_sec = (time_t) (ticks / 1000);
+ tp->tv_usec = (ticks % 1000) * 1000;
+ }
+ adjust_monotonic_time(base, tp);
+
+ return 0;
+}
+#endif
+
+#if defined(HAVE_FALLBACK_MONOTONIC)
+/* =====
+ And if none of the other options work, let's just use gettimeofday(), and
+ ratchet it forward so that it acts like a monotonic timer, whether it
+ wants to or not.
+ */
+
+int
+evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
+ int precise)
+{
+ memset(base, 0, sizeof(*base));
+ return 0;
+}
+
+int
+evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
+ struct timeval *tp)
+{
+ if (evutil_gettimeofday(tp, NULL) < 0)
+ return -1;
+ adjust_monotonic_time(base, tp);
+ return 0;
+
+}
+#endif
diff --git a/libs/libevent/src/ht-internal.h b/libs/libevent/src/ht-internal.h
new file mode 100644
index 0000000000..50375bbaa9
--- /dev/null
+++ b/libs/libevent/src/ht-internal.h
@@ -0,0 +1,487 @@
+/* Copyright 2002 Christopher Clark */
+/* Copyright 2005-2012 Nick Mathewson */
+/* Copyright 2009-2012 Niels Provos and Nick Mathewson */
+/* See license at end. */
+
+/* Based on ideas by Christopher Clark and interfaces from Niels Provos. */
+
+#ifndef HT_INTERNAL_H_INCLUDED_
+#define HT_INTERNAL_H_INCLUDED_
+
+#define HT_HEAD(name, type) \
+ struct name { \
+ /* The hash table itself. */ \
+ struct type **hth_table; \
+ /* How long is the hash table? */ \
+ unsigned hth_table_length; \
+ /* How many elements does the table contain? */ \
+ unsigned hth_n_entries; \
+ /* How many elements will we allow in the table before resizing it? */ \
+ unsigned hth_load_limit; \
+ /* Position of hth_table_length in the primes table. */ \
+ int hth_prime_idx; \
+ }
+
+#define HT_INITIALIZER() \
+ { NULL, 0, 0, 0, -1 }
+
+#ifdef HT_NO_CACHE_HASH_VALUES
+#define HT_ENTRY(type) \
+ struct { \
+ struct type *hte_next; \
+ }
+#else
+#define HT_ENTRY(type) \
+ struct { \
+ struct type *hte_next; \
+ unsigned hte_hash; \
+ }
+#endif
+
+#define HT_EMPTY(head) \
+ ((head)->hth_n_entries == 0)
+
+/* How many elements in 'head'? */
+#define HT_SIZE(head) \
+ ((head)->hth_n_entries)
+
+/* Return memory usage for a hashtable (not counting the entries themselves) */
+#define HT_MEM_USAGE(head) \
+ (sizeof(*head) + (head)->hth_table_length * sizeof(void*))
+
+#define HT_FIND(name, head, elm) name##_HT_FIND((head), (elm))
+#define HT_INSERT(name, head, elm) name##_HT_INSERT((head), (elm))
+#define HT_REPLACE(name, head, elm) name##_HT_REPLACE((head), (elm))
+#define HT_REMOVE(name, head, elm) name##_HT_REMOVE((head), (elm))
+#define HT_START(name, head) name##_HT_START(head)
+#define HT_NEXT(name, head, elm) name##_HT_NEXT((head), (elm))
+#define HT_NEXT_RMV(name, head, elm) name##_HT_NEXT_RMV((head), (elm))
+#define HT_CLEAR(name, head) name##_HT_CLEAR(head)
+#define HT_INIT(name, head) name##_HT_INIT(head)
+/* Helper: */
+static inline unsigned
+ht_improve_hash_(unsigned h)
+{
+ /* Aim to protect against poor hash functions by adding logic here
+ * - logic taken from java 1.4 hashtable source */
+ h += ~(h << 9);
+ h ^= ((h >> 14) | (h << 18)); /* >>> */
+ h += (h << 4);
+ h ^= ((h >> 10) | (h << 22)); /* >>> */
+ return h;
+}
+
+#if 0
+/** Basic string hash function, from Java standard String.hashCode(). */
+static inline unsigned
+ht_string_hash_(const char *s)
+{
+ unsigned h = 0;
+ int m = 1;
+ while (*s) {
+ h += ((signed char)*s++)*m;
+ m = (m<<5)-1; /* m *= 31 */
+ }
+ return h;
+}
+#endif
+
+/** Basic string hash function, from Python's str.__hash__() */
+static inline unsigned
+ht_string_hash_(const char *s)
+{
+ unsigned h;
+ const unsigned char *cp = (const unsigned char *)s;
+ h = *cp << 7;
+ while (*cp) {
+ h = (1000003*h) ^ *cp++;
+ }
+ /* This conversion truncates the length of the string, but that's ok. */
+ h ^= (unsigned)(cp-(const unsigned char*)s);
+ return h;
+}
+
+#ifndef HT_NO_CACHE_HASH_VALUES
+#define HT_SET_HASH_(elm, field, hashfn) \
+ do { (elm)->field.hte_hash = hashfn(elm); } while (0)
+#define HT_SET_HASHVAL_(elm, field, val) \
+ do { (elm)->field.hte_hash = (val); } while (0)
+#define HT_ELT_HASH_(elm, field, hashfn) \
+ ((elm)->field.hte_hash)
+#else
+#define HT_SET_HASH_(elm, field, hashfn) \
+ ((void)0)
+#define HT_ELT_HASH_(elm, field, hashfn) \
+ (hashfn(elm))
+#define HT_SET_HASHVAL_(elm, field, val) \
+ ((void)0)
+#endif
+
+/* Helper: alias for the bucket containing 'elm'. */
+#define HT_BUCKET_(head, field, elm, hashfn) \
+ ((head)->hth_table[HT_ELT_HASH_(elm,field,hashfn) % head->hth_table_length])
+
+#define HT_FOREACH(x, name, head) \
+ for ((x) = HT_START(name, head); \
+ (x) != NULL; \
+ (x) = HT_NEXT(name, head, x))
+
+#define HT_PROTOTYPE(name, type, field, hashfn, eqfn) \
+ int name##_HT_GROW(struct name *ht, unsigned min_capacity); \
+ void name##_HT_CLEAR(struct name *ht); \
+ int name##_HT_REP_IS_BAD_(const struct name *ht); \
+ static inline void \
+ name##_HT_INIT(struct name *head) { \
+ head->hth_table_length = 0; \
+ head->hth_table = NULL; \
+ head->hth_n_entries = 0; \
+ head->hth_load_limit = 0; \
+ head->hth_prime_idx = -1; \
+ } \
+ /* Helper: returns a pointer to the right location in the table \
+ * 'head' to find or insert the element 'elm'. */ \
+ static inline struct type ** \
+ name##_HT_FIND_P_(struct name *head, struct type *elm) \
+ { \
+ struct type **p; \
+ if (!head->hth_table) \
+ return NULL; \
+ p = &HT_BUCKET_(head, field, elm, hashfn); \
+ while (*p) { \
+ if (eqfn(*p, elm)) \
+ return p; \
+ p = &(*p)->field.hte_next; \
+ } \
+ return p; \
+ } \
+ /* Return a pointer to the element in the table 'head' matching 'elm', \
+ * or NULL if no such element exists */ \
+ static inline struct type * \
+ name##_HT_FIND(const struct name *head, struct type *elm) \
+ { \
+ struct type **p; \
+ struct name *h = (struct name *) head; \
+ HT_SET_HASH_(elm, field, hashfn); \
+ p = name##_HT_FIND_P_(h, elm); \
+ return p ? *p : NULL; \
+ } \
+ /* Insert the element 'elm' into the table 'head'. Do not call this \
+ * function if the table might already contain a matching element. */ \
+ static inline void \
+ name##_HT_INSERT(struct name *head, struct type *elm) \
+ { \
+ struct type **p; \
+ if (!head->hth_table || head->hth_n_entries >= head->hth_load_limit) \
+ name##_HT_GROW(head, head->hth_n_entries+1); \
+ ++head->hth_n_entries; \
+ HT_SET_HASH_(elm, field, hashfn); \
+ p = &HT_BUCKET_(head, field, elm, hashfn); \
+ elm->field.hte_next = *p; \
+ *p = elm; \
+ } \
+ /* Insert the element 'elm' into the table 'head'. If there already \
+ * a matching element in the table, replace that element and return \
+ * it. */ \
+ static inline struct type * \
+ name##_HT_REPLACE(struct name *head, struct type *elm) \
+ { \
+ struct type **p, *r; \
+ if (!head->hth_table || head->hth_n_entries >= head->hth_load_limit) \
+ name##_HT_GROW(head, head->hth_n_entries+1); \
+ HT_SET_HASH_(elm, field, hashfn); \
+ p = name##_HT_FIND_P_(head, elm); \
+ r = *p; \
+ *p = elm; \
+ if (r && (r!=elm)) { \
+ elm->field.hte_next = r->field.hte_next; \
+ r->field.hte_next = NULL; \
+ return r; \
+ } else { \
+ ++head->hth_n_entries; \
+ return NULL; \
+ } \
+ } \
+ /* Remove any element matching 'elm' from the table 'head'. If such \
+ * an element is found, return it; otherwise return NULL. */ \
+ static inline struct type * \
+ name##_HT_REMOVE(struct name *head, struct type *elm) \
+ { \
+ struct type **p, *r; \
+ HT_SET_HASH_(elm, field, hashfn); \
+ p = name##_HT_FIND_P_(head,elm); \
+ if (!p || !*p) \
+ return NULL; \
+ r = *p; \
+ *p = r->field.hte_next; \
+ r->field.hte_next = NULL; \
+ --head->hth_n_entries; \
+ return r; \
+ } \
+ /* Invoke the function 'fn' on every element of the table 'head', \
+ * using 'data' as its second argument. If the function returns \
+ * nonzero, remove the most recently examined element before invoking \
+ * the function again. */ \
+ static inline void \
+ name##_HT_FOREACH_FN(struct name *head, \
+ int (*fn)(struct type *, void *), \
+ void *data) \
+ { \
+ unsigned idx; \
+ struct type **p, **nextp, *next; \
+ if (!head->hth_table) \
+ return; \
+ for (idx=0; idx < head->hth_table_length; ++idx) { \
+ p = &head->hth_table[idx]; \
+ while (*p) { \
+ nextp = &(*p)->field.hte_next; \
+ next = *nextp; \
+ if (fn(*p, data)) { \
+ --head->hth_n_entries; \
+ *p = next; \
+ } else { \
+ p = nextp; \
+ } \
+ } \
+ } \
+ } \
+ /* Return a pointer to the first element in the table 'head', under \
+ * an arbitrary order. This order is stable under remove operations, \
+ * but not under others. If the table is empty, return NULL. */ \
+ static inline struct type ** \
+ name##_HT_START(struct name *head) \
+ { \
+ unsigned b = 0; \
+ while (b < head->hth_table_length) { \
+ if (head->hth_table[b]) \
+ return &head->hth_table[b]; \
+ ++b; \
+ } \
+ return NULL; \
+ } \
+ /* Return the next element in 'head' after 'elm', under the arbitrary \
+ * order used by HT_START. If there are no more elements, return \
+ * NULL. If 'elm' is to be removed from the table, you must call \
+ * this function for the next value before you remove it. \
+ */ \
+ static inline struct type ** \
+ name##_HT_NEXT(struct name *head, struct type **elm) \
+ { \
+ if ((*elm)->field.hte_next) { \
+ return &(*elm)->field.hte_next; \
+ } else { \
+ unsigned b = (HT_ELT_HASH_(*elm, field, hashfn) % head->hth_table_length)+1; \
+ while (b < head->hth_table_length) { \
+ if (head->hth_table[b]) \
+ return &head->hth_table[b]; \
+ ++b; \
+ } \
+ return NULL; \
+ } \
+ } \
+ static inline struct type ** \
+ name##_HT_NEXT_RMV(struct name *head, struct type **elm) \
+ { \
+ unsigned h = HT_ELT_HASH_(*elm, field, hashfn); \
+ *elm = (*elm)->field.hte_next; \
+ --head->hth_n_entries; \
+ if (*elm) { \
+ return elm; \
+ } else { \
+ unsigned b = (h % head->hth_table_length)+1; \
+ while (b < head->hth_table_length) { \
+ if (head->hth_table[b]) \
+ return &head->hth_table[b]; \
+ ++b; \
+ } \
+ return NULL; \
+ } \
+ }
+
+#define HT_GENERATE(name, type, field, hashfn, eqfn, load, mallocfn, \
+ reallocfn, freefn) \
+ static unsigned name##_PRIMES[] = { \
+ 53, 97, 193, 389, \
+ 769, 1543, 3079, 6151, \
+ 12289, 24593, 49157, 98317, \
+ 196613, 393241, 786433, 1572869, \
+ 3145739, 6291469, 12582917, 25165843, \
+ 50331653, 100663319, 201326611, 402653189, \
+ 805306457, 1610612741 \
+ }; \
+ static unsigned name##_N_PRIMES = \
+ (unsigned)(sizeof(name##_PRIMES)/sizeof(name##_PRIMES[0])); \
+ /* Expand the internal table of 'head' until it is large enough to \
+ * hold 'size' elements. Return 0 on success, -1 on allocation \
+ * failure. */ \
+ int \
+ name##_HT_GROW(struct name *head, unsigned size) \
+ { \
+ unsigned new_len, new_load_limit; \
+ int prime_idx; \
+ struct type **new_table; \
+ if (head->hth_prime_idx == (int)name##_N_PRIMES - 1) \
+ return 0; \
+ if (head->hth_load_limit > size) \
+ return 0; \
+ prime_idx = head->hth_prime_idx; \
+ do { \
+ new_len = name##_PRIMES[++prime_idx]; \
+ new_load_limit = (unsigned)(load*new_len); \
+ } while (new_load_limit <= size && \
+ prime_idx < (int)name##_N_PRIMES); \
+ if ((new_table = mallocfn(new_len*sizeof(struct type*)))) { \
+ unsigned b; \
+ memset(new_table, 0, new_len*sizeof(struct type*)); \
+ for (b = 0; b < head->hth_table_length; ++b) { \
+ struct type *elm, *next; \
+ unsigned b2; \
+ elm = head->hth_table[b]; \
+ while (elm) { \
+ next = elm->field.hte_next; \
+ b2 = HT_ELT_HASH_(elm, field, hashfn) % new_len; \
+ elm->field.hte_next = new_table[b2]; \
+ new_table[b2] = elm; \
+ elm = next; \
+ } \
+ } \
+ if (head->hth_table) \
+ freefn(head->hth_table); \
+ head->hth_table = new_table; \
+ } else { \
+ unsigned b, b2; \
+ new_table = reallocfn(head->hth_table, new_len*sizeof(struct type*)); \
+ if (!new_table) return -1; \
+ memset(new_table + head->hth_table_length, 0, \
+ (new_len - head->hth_table_length)*sizeof(struct type*)); \
+ for (b=0; b < head->hth_table_length; ++b) { \
+ struct type *e, **pE; \
+ for (pE = &new_table[b], e = *pE; e != NULL; e = *pE) { \
+ b2 = HT_ELT_HASH_(e, field, hashfn) % new_len; \
+ if (b2 == b) { \
+ pE = &e->field.hte_next; \
+ } else { \
+ *pE = e->field.hte_next; \
+ e->field.hte_next = new_table[b2]; \
+ new_table[b2] = e; \
+ } \
+ } \
+ } \
+ head->hth_table = new_table; \
+ } \
+ head->hth_table_length = new_len; \
+ head->hth_prime_idx = prime_idx; \
+ head->hth_load_limit = new_load_limit; \
+ return 0; \
+ } \
+ /* Free all storage held by 'head'. Does not free 'head' itself, or \
+ * individual elements. */ \
+ void \
+ name##_HT_CLEAR(struct name *head) \
+ { \
+ if (head->hth_table) \
+ freefn(head->hth_table); \
+ name##_HT_INIT(head); \
+ } \
+ /* Debugging helper: return false iff the representation of 'head' is \
+ * internally consistent. */ \
+ int \
+ name##_HT_REP_IS_BAD_(const struct name *head) \
+ { \
+ unsigned n, i; \
+ struct type *elm; \
+ if (!head->hth_table_length) { \
+ if (!head->hth_table && !head->hth_n_entries && \
+ !head->hth_load_limit && head->hth_prime_idx == -1) \
+ return 0; \
+ else \
+ return 1; \
+ } \
+ if (!head->hth_table || head->hth_prime_idx < 0 || \
+ !head->hth_load_limit) \
+ return 2; \
+ if (head->hth_n_entries > head->hth_load_limit) \
+ return 3; \
+ if (head->hth_table_length != name##_PRIMES[head->hth_prime_idx]) \
+ return 4; \
+ if (head->hth_load_limit != (unsigned)(load*head->hth_table_length)) \
+ return 5; \
+ for (n = i = 0; i < head->hth_table_length; ++i) { \
+ for (elm = head->hth_table[i]; elm; elm = elm->field.hte_next) { \
+ if (HT_ELT_HASH_(elm, field, hashfn) != hashfn(elm)) \
+ return 1000 + i; \
+ if ((HT_ELT_HASH_(elm, field, hashfn) % head->hth_table_length) != i) \
+ return 10000 + i; \
+ ++n; \
+ } \
+ } \
+ if (n != head->hth_n_entries) \
+ return 6; \
+ return 0; \
+ }
+
+/** Implements an over-optimized "find and insert if absent" block;
+ * not meant for direct usage by typical code, or usage outside the critical
+ * path.*/
+#define HT_FIND_OR_INSERT_(name, field, hashfn, head, eltype, elm, var, y, n) \
+ { \
+ struct name *var##_head_ = head; \
+ struct eltype **var; \
+ if (!var##_head_->hth_table || \
+ var##_head_->hth_n_entries >= var##_head_->hth_load_limit) \
+ name##_HT_GROW(var##_head_, var##_head_->hth_n_entries+1); \
+ HT_SET_HASH_((elm), field, hashfn); \
+ var = name##_HT_FIND_P_(var##_head_, (elm)); \
+ if (*var) { \
+ y; \
+ } else { \
+ n; \
+ } \
+ }
+#define HT_FOI_INSERT_(field, head, elm, newent, var) \
+ { \
+ HT_SET_HASHVAL_(newent, field, (elm)->field.hte_hash); \
+ newent->field.hte_next = NULL; \
+ *var = newent; \
+ ++((head)->hth_n_entries); \
+ }
+
+/*
+ * Copyright 2005, Nick Mathewson. Implementation logic is adapted from code
+ * by Christopher Clark, retrofit to allow drop-in memory management, and to
+ * use the same interface as Niels Provos's tree.h. This is probably still
+ * a derived work, so the original license below still applies.
+ *
+ * Copyright (c) 2002, Christopher Clark
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of the original author; nor the names of any contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
+ * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#endif
+
diff --git a/libs/libevent/src/http-internal.h b/libs/libevent/src/http-internal.h
new file mode 100644
index 0000000000..ba6e49ef9b
--- /dev/null
+++ b/libs/libevent/src/http-internal.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright 2001-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * This header file contains definitions for dealing with HTTP requests
+ * that are internal to libevent. As user of the library, you should not
+ * need to know about these.
+ */
+
+#ifndef HTTP_INTERNAL_H_INCLUDED_
+#define HTTP_INTERNAL_H_INCLUDED_
+
+#include "event2/event_struct.h"
+#include "util-internal.h"
+#include "defer-internal.h"
+
+#define HTTP_CONNECT_TIMEOUT 45
+#define HTTP_WRITE_TIMEOUT 50
+#define HTTP_READ_TIMEOUT 50
+
+#define HTTP_PREFIX "http://"
+#define HTTP_DEFAULTPORT 80
+
+enum message_read_status {
+ ALL_DATA_READ = 1,
+ MORE_DATA_EXPECTED = 0,
+ DATA_CORRUPTED = -1,
+ REQUEST_CANCELED = -2,
+ DATA_TOO_LONG = -3
+};
+
+struct evbuffer;
+struct addrinfo;
+struct evhttp_request;
+
+/* Indicates an unknown request method. */
+#define EVHTTP_REQ_UNKNOWN_ (1<<15)
+
+enum evhttp_connection_state {
+ EVCON_DISCONNECTED, /**< not currently connected not trying either*/
+ EVCON_CONNECTING, /**< tries to currently connect */
+ EVCON_IDLE, /**< connection is established */
+ EVCON_READING_FIRSTLINE,/**< reading Request-Line (incoming conn) or
+ **< Status-Line (outgoing conn) */
+ EVCON_READING_HEADERS, /**< reading request/response headers */
+ EVCON_READING_BODY, /**< reading request/response body */
+ EVCON_READING_TRAILER, /**< reading request/response chunked trailer */
+ EVCON_WRITING /**< writing request/response headers/body */
+};
+
+struct event_base;
+
+/* A client or server connection. */
+struct evhttp_connection {
+ /* we use this tailq only if this connection was created for an http
+ * server */
+ TAILQ_ENTRY(evhttp_connection) next;
+
+ evutil_socket_t fd;
+ struct bufferevent *bufev;
+
+ struct event retry_ev; /* for retrying connects */
+
+ char *bind_address; /* address to use for binding the src */
+ unsigned short bind_port; /* local port for binding the src */
+
+ char *address; /* address to connect to */
+ unsigned short port;
+
+ size_t max_headers_size;
+ ev_uint64_t max_body_size;
+
+ int flags;
+#define EVHTTP_CON_INCOMING 0x0001 /* only one request on it ever */
+#define EVHTTP_CON_OUTGOING 0x0002 /* multiple requests possible */
+#define EVHTTP_CON_CLOSEDETECT 0x0004 /* detecting if persistent close */
+/* set when we want to auto free the connection */
+#define EVHTTP_CON_AUTOFREE EVHTTP_CON_PUBLIC_FLAGS_END
+
+ struct timeval timeout; /* timeout for events */
+ int retry_cnt; /* retry count */
+ int retry_max; /* maximum number of retries */
+ struct timeval initial_retry_timeout; /* Timeout for low long to wait
+ * after first failing attempt
+ * before retry */
+
+ enum evhttp_connection_state state;
+
+ /* for server connections, the http server they are connected with */
+ struct evhttp *http_server;
+
+ TAILQ_HEAD(evcon_requestq, evhttp_request) requests;
+
+ void (*cb)(struct evhttp_connection *, void *);
+ void *cb_arg;
+
+ void (*closecb)(struct evhttp_connection *, void *);
+ void *closecb_arg;
+
+ struct event_callback read_more_deferred_cb;
+
+ struct event_base *base;
+ struct evdns_base *dns_base;
+ int ai_family;
+};
+
+/* A callback for an http server */
+struct evhttp_cb {
+ TAILQ_ENTRY(evhttp_cb) next;
+
+ char *what;
+
+ void (*cb)(struct evhttp_request *req, void *);
+ void *cbarg;
+};
+
+/* both the http server as well as the rpc system need to queue connections */
+TAILQ_HEAD(evconq, evhttp_connection);
+
+/* each bound socket is stored in one of these */
+struct evhttp_bound_socket {
+ TAILQ_ENTRY(evhttp_bound_socket) next;
+
+ struct evconnlistener *listener;
+};
+
+/* server alias list item. */
+struct evhttp_server_alias {
+ TAILQ_ENTRY(evhttp_server_alias) next;
+
+ char *alias; /* the server alias. */
+};
+
+struct evhttp {
+ /* Next vhost, if this is a vhost. */
+ TAILQ_ENTRY(evhttp) next_vhost;
+
+ /* All listeners for this host */
+ TAILQ_HEAD(boundq, evhttp_bound_socket) sockets;
+
+ TAILQ_HEAD(httpcbq, evhttp_cb) callbacks;
+
+ /* All live connections on this host. */
+ struct evconq connections;
+
+ TAILQ_HEAD(vhostsq, evhttp) virtualhosts;
+
+ TAILQ_HEAD(aliasq, evhttp_server_alias) aliases;
+
+ /* NULL if this server is not a vhost */
+ char *vhost_pattern;
+
+ struct timeval timeout;
+
+ size_t default_max_headers_size;
+ ev_uint64_t default_max_body_size;
+ const char *default_content_type;
+
+ /* Bitmask of all HTTP methods that we accept and pass to user
+ * callbacks. */
+ ev_uint16_t allowed_methods;
+
+ /* Fallback callback if all the other callbacks for this connection
+ don't match. */
+ void (*gencb)(struct evhttp_request *req, void *);
+ void *gencbarg;
+ struct bufferevent* (*bevcb)(struct event_base *, void *);
+ void *bevcbarg;
+
+ struct event_base *base;
+};
+
+/* XXX most of these functions could be static. */
+
+/* resets the connection; can be reused for more requests */
+void evhttp_connection_reset_(struct evhttp_connection *);
+
+/* connects if necessary */
+int evhttp_connection_connect_(struct evhttp_connection *);
+
+enum evhttp_request_error;
+/* notifies the current request that it failed; resets connection */
+void evhttp_connection_fail_(struct evhttp_connection *,
+ enum evhttp_request_error error);
+
+enum message_read_status;
+
+enum message_read_status evhttp_parse_firstline_(struct evhttp_request *, struct evbuffer*);
+enum message_read_status evhttp_parse_headers_(struct evhttp_request *, struct evbuffer*);
+
+void evhttp_start_read_(struct evhttp_connection *);
+
+/* response sending HTML the data in the buffer */
+void evhttp_response_code_(struct evhttp_request *, int, const char *);
+void evhttp_send_page_(struct evhttp_request *, struct evbuffer *);
+
+int evhttp_decode_uri_internal(const char *uri, size_t length,
+ char *ret, int decode_plus);
+
+#endif /* _HTTP_H */
diff --git a/libs/libevent/src/http.c b/libs/libevent/src/http.c
new file mode 100644
index 0000000000..fd7ce3cbf2
--- /dev/null
+++ b/libs/libevent/src/http.c
@@ -0,0 +1,4892 @@
+/*
+ * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef EVENT__HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+#ifdef EVENT__HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#ifdef HAVE_SYS_IOCCOM_H
+#include <sys/ioccom.h>
+#endif
+#ifdef EVENT__HAVE_SYS_RESOURCE_H
+#include <sys/resource.h>
+#endif
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef EVENT__HAVE_SYS_WAIT_H
+#include <sys/wait.h>
+#endif
+
+#ifndef _WIN32
+#include <sys/socket.h>
+#include <sys/stat.h>
+#else
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#endif
+
+#include <sys/queue.h>
+
+#ifdef EVENT__HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#endif
+#ifdef EVENT__HAVE_ARPA_INET_H
+#include <arpa/inet.h>
+#endif
+#ifdef EVENT__HAVE_NETDB_H
+#include <netdb.h>
+#endif
+
+#ifdef _WIN32
+#include <winsock2.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef _WIN32
+#include <syslog.h>
+#endif
+#include <signal.h>
+#include <time.h>
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#ifdef EVENT__HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+
+#undef timeout_pending
+#undef timeout_initialized
+
+#include "strlcpy-internal.h"
+#include "event2/http.h"
+#include "event2/event.h"
+#include "event2/buffer.h"
+#include "event2/bufferevent.h"
+#include "event2/http_struct.h"
+#include "event2/http_compat.h"
+#include "event2/util.h"
+#include "event2/listener.h"
+#include "log-internal.h"
+#include "util-internal.h"
+#include "http-internal.h"
+#include "mm-internal.h"
+#include "bufferevent-internal.h"
+
+#ifndef EVENT__HAVE_GETNAMEINFO
+#define NI_MAXSERV 32
+#define NI_MAXHOST 1025
+
+#ifndef NI_NUMERICHOST
+#define NI_NUMERICHOST 1
+#endif
+
+#ifndef NI_NUMERICSERV
+#define NI_NUMERICSERV 2
+#endif
+
+static int
+fake_getnameinfo(const struct sockaddr *sa, size_t salen, char *host,
+ size_t hostlen, char *serv, size_t servlen, int flags)
+{
+ struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+
+ if (serv != NULL) {
+ char tmpserv[16];
+ evutil_snprintf(tmpserv, sizeof(tmpserv),
+ "%d", ntohs(sin->sin_port));
+ if (strlcpy(serv, tmpserv, servlen) >= servlen)
+ return (-1);
+ }
+
+ if (host != NULL) {
+ if (flags & NI_NUMERICHOST) {
+ if (strlcpy(host, inet_ntoa(sin->sin_addr),
+ hostlen) >= hostlen)
+ return (-1);
+ else
+ return (0);
+ } else {
+ struct hostent *hp;
+ hp = gethostbyaddr((char *)&sin->sin_addr,
+ sizeof(struct in_addr), AF_INET);
+ if (hp == NULL)
+ return (-2);
+
+ if (strlcpy(host, hp->h_name, hostlen) >= hostlen)
+ return (-1);
+ else
+ return (0);
+ }
+ }
+ return (0);
+}
+
+#endif
+
+#define REQ_VERSION_BEFORE(req, major_v, minor_v) \
+ ((req)->major < (major_v) || \
+ ((req)->major == (major_v) && (req)->minor < (minor_v)))
+
+#define REQ_VERSION_ATLEAST(req, major_v, minor_v) \
+ ((req)->major > (major_v) || \
+ ((req)->major == (major_v) && (req)->minor >= (minor_v)))
+
+#ifndef MIN
+#define MIN(a,b) (((a)<(b))?(a):(b))
+#endif
+
+extern int debug;
+
+static evutil_socket_t bind_socket_ai(struct evutil_addrinfo *, int reuse);
+static evutil_socket_t bind_socket(const char *, ev_uint16_t, int reuse);
+static void name_from_addr(struct sockaddr *, ev_socklen_t, char **, char **);
+static int evhttp_associate_new_request_with_connection(
+ struct evhttp_connection *evcon);
+static void evhttp_connection_start_detectclose(
+ struct evhttp_connection *evcon);
+static void evhttp_connection_stop_detectclose(
+ struct evhttp_connection *evcon);
+static void evhttp_request_dispatch(struct evhttp_connection* evcon);
+static void evhttp_read_firstline(struct evhttp_connection *evcon,
+ struct evhttp_request *req);
+static void evhttp_read_header(struct evhttp_connection *evcon,
+ struct evhttp_request *req);
+static int evhttp_add_header_internal(struct evkeyvalq *headers,
+ const char *key, const char *value);
+static const char *evhttp_response_phrase_internal(int code);
+static void evhttp_get_request(struct evhttp *, evutil_socket_t, struct sockaddr *, ev_socklen_t);
+static void evhttp_write_buffer(struct evhttp_connection *,
+ void (*)(struct evhttp_connection *, void *), void *);
+static void evhttp_make_header(struct evhttp_connection *, struct evhttp_request *);
+
+/* callbacks for bufferevent */
+static void evhttp_read_cb(struct bufferevent *, void *);
+static void evhttp_write_cb(struct bufferevent *, void *);
+static void evhttp_error_cb(struct bufferevent *bufev, short what, void *arg);
+static int evhttp_find_vhost(struct evhttp *http, struct evhttp **outhttp,
+ const char *hostname);
+
+#ifndef EVENT__HAVE_STRSEP
+/* strsep replacement for platforms that lack it. Only works if
+ * del is one character long. */
+static char *
+strsep(char **s, const char *del)
+{
+ char *d, *tok;
+ EVUTIL_ASSERT(strlen(del) == 1);
+ if (!s || !*s)
+ return NULL;
+ tok = *s;
+ d = strstr(tok, del);
+ if (d) {
+ *d = '\0';
+ *s = d + 1;
+ } else
+ *s = NULL;
+ return tok;
+}
+#endif
+
+static size_t
+html_replace(const char ch, const char **escaped)
+{
+ switch (ch) {
+ case '<':
+ *escaped = "&lt;";
+ return 4;
+ case '>':
+ *escaped = "&gt;";
+ return 4;
+ case '"':
+ *escaped = "&quot;";
+ return 6;
+ case '\'':
+ *escaped = "&#039;";
+ return 6;
+ case '&':
+ *escaped = "&amp;";
+ return 5;
+ default:
+ break;
+ }
+
+ return 1;
+}
+
+/*
+ * Replaces <, >, ", ' and & with &lt;, &gt;, &quot;,
+ * &#039; and &amp; correspondingly.
+ *
+ * The returned string needs to be freed by the caller.
+ */
+
+char *
+evhttp_htmlescape(const char *html)
+{
+ size_t i;
+ size_t new_size = 0, old_size = 0;
+ char *escaped_html, *p;
+
+ if (html == NULL)
+ return (NULL);
+
+ old_size = strlen(html);
+ for (i = 0; i < old_size; ++i) {
+ const char *replaced = NULL;
+ const size_t replace_size = html_replace(html[i], &replaced);
+ if (replace_size > EV_SIZE_MAX - new_size) {
+ event_warn("%s: html_replace overflow", __func__);
+ return (NULL);
+ }
+ new_size += replace_size;
+ }
+
+ if (new_size == EV_SIZE_MAX)
+ return (NULL);
+ p = escaped_html = mm_malloc(new_size + 1);
+ if (escaped_html == NULL) {
+ event_warn("%s: malloc(%lu)", __func__,
+ (unsigned long)(new_size + 1));
+ return (NULL);
+ }
+ for (i = 0; i < old_size; ++i) {
+ const char *replaced = &html[i];
+ const size_t len = html_replace(html[i], &replaced);
+ memcpy(p, replaced, len);
+ p += len;
+ }
+
+ *p = '\0';
+
+ return (escaped_html);
+}
+
+/** Given an evhttp_cmd_type, returns a constant string containing the
+ * equivalent HTTP command, or NULL if the evhttp_command_type is
+ * unrecognized. */
+static const char *
+evhttp_method(enum evhttp_cmd_type type)
+{
+ const char *method;
+
+ switch (type) {
+ case EVHTTP_REQ_GET:
+ method = "GET";
+ break;
+ case EVHTTP_REQ_POST:
+ method = "POST";
+ break;
+ case EVHTTP_REQ_HEAD:
+ method = "HEAD";
+ break;
+ case EVHTTP_REQ_PUT:
+ method = "PUT";
+ break;
+ case EVHTTP_REQ_DELETE:
+ method = "DELETE";
+ break;
+ case EVHTTP_REQ_OPTIONS:
+ method = "OPTIONS";
+ break;
+ case EVHTTP_REQ_TRACE:
+ method = "TRACE";
+ break;
+ case EVHTTP_REQ_CONNECT:
+ method = "CONNECT";
+ break;
+ case EVHTTP_REQ_PATCH:
+ method = "PATCH";
+ break;
+ default:
+ method = NULL;
+ break;
+ }
+
+ return (method);
+}
+
+/**
+ * Determines if a response should have a body.
+ * Follows the rules in RFC 2616 section 4.3.
+ * @return 1 if the response MUST have a body; 0 if the response MUST NOT have
+ * a body.
+ */
+static int
+evhttp_response_needs_body(struct evhttp_request *req)
+{
+ return (req->response_code != HTTP_NOCONTENT &&
+ req->response_code != HTTP_NOTMODIFIED &&
+ (req->response_code < 100 || req->response_code >= 200) &&
+ req->type != EVHTTP_REQ_HEAD);
+}
+
+/** Helper: called after we've added some data to an evcon's bufferevent's
+ * output buffer. Sets the evconn's writing-is-done callback, and puts
+ * the bufferevent into writing mode.
+ */
+static void
+evhttp_write_buffer(struct evhttp_connection *evcon,
+ void (*cb)(struct evhttp_connection *, void *), void *arg)
+{
+ event_debug(("%s: preparing to write buffer\n", __func__));
+
+ /* Set call back */
+ evcon->cb = cb;
+ evcon->cb_arg = arg;
+
+ /* Disable the read callback: we don't actually care about data;
+ * we only care about close detection. (We don't disable reading,
+ * since we *do* want to learn about any close events.) */
+ bufferevent_setcb(evcon->bufev,
+ NULL, /*read*/
+ evhttp_write_cb,
+ evhttp_error_cb,
+ evcon);
+
+ bufferevent_enable(evcon->bufev, EV_WRITE);
+}
+
+static void
+evhttp_send_continue_done(struct evhttp_connection *evcon, void *arg)
+{
+ bufferevent_disable(evcon->bufev, EV_WRITE);
+}
+
+static void
+evhttp_send_continue(struct evhttp_connection *evcon,
+ struct evhttp_request *req)
+{
+ bufferevent_enable(evcon->bufev, EV_WRITE);
+ evbuffer_add_printf(bufferevent_get_output(evcon->bufev),
+ "HTTP/%d.%d 100 Continue\r\n\r\n",
+ req->major, req->minor);
+ evcon->cb = evhttp_send_continue_done;
+ evcon->cb_arg = NULL;
+ bufferevent_setcb(evcon->bufev,
+ evhttp_read_cb,
+ evhttp_write_cb,
+ evhttp_error_cb,
+ evcon);
+}
+
+/** Helper: returns true iff evconn is in any connected state. */
+static int
+evhttp_connected(struct evhttp_connection *evcon)
+{
+ switch (evcon->state) {
+ case EVCON_DISCONNECTED:
+ case EVCON_CONNECTING:
+ return (0);
+ case EVCON_IDLE:
+ case EVCON_READING_FIRSTLINE:
+ case EVCON_READING_HEADERS:
+ case EVCON_READING_BODY:
+ case EVCON_READING_TRAILER:
+ case EVCON_WRITING:
+ default:
+ return (1);
+ }
+}
+
+/* Create the headers needed for an outgoing HTTP request, adds them to
+ * the request's header list, and writes the request line to the
+ * connection's output buffer.
+ */
+static void
+evhttp_make_header_request(struct evhttp_connection *evcon,
+ struct evhttp_request *req)
+{
+ const char *method;
+
+ evhttp_remove_header(req->output_headers, "Proxy-Connection");
+
+ /* Generate request line */
+ method = evhttp_method(req->type);
+ evbuffer_add_printf(bufferevent_get_output(evcon->bufev),
+ "%s %s HTTP/%d.%d\r\n",
+ method, req->uri, req->major, req->minor);
+
+ /* Add the content length on a post or put request if missing */
+ if ((req->type == EVHTTP_REQ_POST || req->type == EVHTTP_REQ_PUT) &&
+ evhttp_find_header(req->output_headers, "Content-Length") == NULL){
+ char size[22];
+ evutil_snprintf(size, sizeof(size), EV_SIZE_FMT,
+ EV_SIZE_ARG(evbuffer_get_length(req->output_buffer)));
+ evhttp_add_header(req->output_headers, "Content-Length", size);
+ }
+}
+
+/** Return true if the list of headers in 'headers', intepreted with respect
+ * to flags, means that we should send a "connection: close" when the request
+ * is done. */
+static int
+evhttp_is_connection_close(int flags, struct evkeyvalq* headers)
+{
+ if (flags & EVHTTP_PROXY_REQUEST) {
+ /* proxy connection */
+ const char *connection = evhttp_find_header(headers, "Proxy-Connection");
+ return (connection == NULL || evutil_ascii_strcasecmp(connection, "keep-alive") != 0);
+ } else {
+ const char *connection = evhttp_find_header(headers, "Connection");
+ return (connection != NULL && evutil_ascii_strcasecmp(connection, "close") == 0);
+ }
+}
+static int
+evhttp_is_request_connection_close(struct evhttp_request *req)
+{
+ return
+ evhttp_is_connection_close(req->flags, req->input_headers) ||
+ evhttp_is_connection_close(req->flags, req->output_headers);
+}
+
+/* Return true iff 'headers' contains 'Connection: keep-alive' */
+static int
+evhttp_is_connection_keepalive(struct evkeyvalq* headers)
+{
+ const char *connection = evhttp_find_header(headers, "Connection");
+ return (connection != NULL
+ && evutil_ascii_strncasecmp(connection, "keep-alive", 10) == 0);
+}
+
+/* Add a correct "Date" header to headers, unless it already has one. */
+static void
+evhttp_maybe_add_date_header(struct evkeyvalq *headers)
+{
+ if (evhttp_find_header(headers, "Date") == NULL) {
+ char date[50];
+#ifndef _WIN32
+ struct tm cur;
+#endif
+ struct tm *cur_p;
+ time_t t = time(NULL);
+#ifdef _WIN32
+ cur_p = gmtime(&t);
+#else
+ gmtime_r(&t, &cur);
+ cur_p = &cur;
+#endif
+ if (strftime(date, sizeof(date),
+ "%a, %d %b %Y %H:%M:%S GMT", cur_p) != 0) {
+ evhttp_add_header(headers, "Date", date);
+ }
+ }
+}
+
+/* Add a "Content-Length" header with value 'content_length' to headers,
+ * unless it already has a content-length or transfer-encoding header. */
+static void
+evhttp_maybe_add_content_length_header(struct evkeyvalq *headers,
+ size_t content_length)
+{
+ if (evhttp_find_header(headers, "Transfer-Encoding") == NULL &&
+ evhttp_find_header(headers, "Content-Length") == NULL) {
+ char len[22];
+ evutil_snprintf(len, sizeof(len), EV_SIZE_FMT,
+ EV_SIZE_ARG(content_length));
+ evhttp_add_header(headers, "Content-Length", len);
+ }
+}
+
+/*
+ * Create the headers needed for an HTTP reply in req->output_headers,
+ * and write the first HTTP response for req line to evcon.
+ */
+static void
+evhttp_make_header_response(struct evhttp_connection *evcon,
+ struct evhttp_request *req)
+{
+ int is_keepalive = evhttp_is_connection_keepalive(req->input_headers);
+ evbuffer_add_printf(bufferevent_get_output(evcon->bufev),
+ "HTTP/%d.%d %d %s\r\n",
+ req->major, req->minor, req->response_code,
+ req->response_code_line);
+
+ if (req->major == 1) {
+ if (req->minor >= 1)
+ evhttp_maybe_add_date_header(req->output_headers);
+
+ /*
+ * if the protocol is 1.0; and the connection was keep-alive
+ * we need to add a keep-alive header, too.
+ */
+ if (req->minor == 0 && is_keepalive)
+ evhttp_add_header(req->output_headers,
+ "Connection", "keep-alive");
+
+ if ((req->minor >= 1 || is_keepalive) &&
+ evhttp_response_needs_body(req)) {
+ /*
+ * we need to add the content length if the
+ * user did not give it, this is required for
+ * persistent connections to work.
+ */
+ evhttp_maybe_add_content_length_header(
+ req->output_headers,
+ evbuffer_get_length(req->output_buffer));
+ }
+ }
+
+ /* Potentially add headers for unidentified content. */
+ if (evhttp_response_needs_body(req)) {
+ if (evhttp_find_header(req->output_headers,
+ "Content-Type") == NULL
+ && evcon->http_server->default_content_type) {
+ evhttp_add_header(req->output_headers,
+ "Content-Type",
+ evcon->http_server->default_content_type);
+ }
+ }
+
+ /* if the request asked for a close, we send a close, too */
+ if (evhttp_is_connection_close(req->flags, req->input_headers)) {
+ evhttp_remove_header(req->output_headers, "Connection");
+ if (!(req->flags & EVHTTP_PROXY_REQUEST))
+ evhttp_add_header(req->output_headers, "Connection", "close");
+ evhttp_remove_header(req->output_headers, "Proxy-Connection");
+ }
+}
+
+/** Generate all headers appropriate for sending the http request in req (or
+ * the response, if we're sending a response), and write them to evcon's
+ * bufferevent. Also writes all data from req->output_buffer */
+static void
+evhttp_make_header(struct evhttp_connection *evcon, struct evhttp_request *req)
+{
+ struct evkeyval *header;
+ struct evbuffer *output = bufferevent_get_output(evcon->bufev);
+
+ /*
+ * Depending if this is a HTTP request or response, we might need to
+ * add some new headers or remove existing headers.
+ */
+ if (req->kind == EVHTTP_REQUEST) {
+ evhttp_make_header_request(evcon, req);
+ } else {
+ evhttp_make_header_response(evcon, req);
+ }
+
+ TAILQ_FOREACH(header, req->output_headers, next) {
+ evbuffer_add_printf(output, "%s: %s\r\n",
+ header->key, header->value);
+ }
+ evbuffer_add(output, "\r\n", 2);
+
+ if (evbuffer_get_length(req->output_buffer) > 0) {
+ /*
+ * For a request, we add the POST data, for a reply, this
+ * is the regular data.
+ */
+ /* XXX We might want to support waiting (a limited amount of
+ time) for a continue status line from the server before
+ sending POST/PUT message bodies. */
+ evbuffer_add_buffer(output, req->output_buffer);
+ }
+}
+
+void
+evhttp_connection_set_max_headers_size(struct evhttp_connection *evcon,
+ ev_ssize_t new_max_headers_size)
+{
+ if (new_max_headers_size<0)
+ evcon->max_headers_size = EV_SIZE_MAX;
+ else
+ evcon->max_headers_size = new_max_headers_size;
+}
+void
+evhttp_connection_set_max_body_size(struct evhttp_connection* evcon,
+ ev_ssize_t new_max_body_size)
+{
+ if (new_max_body_size<0)
+ evcon->max_body_size = EV_UINT64_MAX;
+ else
+ evcon->max_body_size = new_max_body_size;
+}
+
+static int
+evhttp_connection_incoming_fail(struct evhttp_request *req,
+ enum evhttp_request_error error)
+{
+ switch (error) {
+ case EVREQ_HTTP_TIMEOUT:
+ case EVREQ_HTTP_EOF:
+ /*
+ * these are cases in which we probably should just
+ * close the connection and not send a reply. this
+ * case may happen when a browser keeps a persistent
+ * connection open and we timeout on the read. when
+ * the request is still being used for sending, we
+ * need to disassociated it from the connection here.
+ */
+ if (!req->userdone) {
+ /* remove it so that it will not be freed */
+ TAILQ_REMOVE(&req->evcon->requests, req, next);
+ /* indicate that this request no longer has a
+ * connection object
+ */
+ req->evcon = NULL;
+ }
+ return (-1);
+ case EVREQ_HTTP_INVALID_HEADER:
+ case EVREQ_HTTP_BUFFER_ERROR:
+ case EVREQ_HTTP_REQUEST_CANCEL:
+ case EVREQ_HTTP_DATA_TOO_LONG:
+ default: /* xxx: probably should just error on default */
+ /* the callback looks at the uri to determine errors */
+ if (req->uri) {
+ mm_free(req->uri);
+ req->uri = NULL;
+ }
+ if (req->uri_elems) {
+ evhttp_uri_free(req->uri_elems);
+ req->uri_elems = NULL;
+ }
+
+ /*
+ * the callback needs to send a reply, once the reply has
+ * been send, the connection should get freed.
+ */
+ (*req->cb)(req, req->cb_arg);
+ }
+
+ return (0);
+}
+
+/* Free connection ownership of which can be acquired by user using
+ * evhttp_request_own(). */
+static inline void
+evhttp_request_free_auto(struct evhttp_request *req)
+{
+ if (!(req->flags & EVHTTP_USER_OWNED))
+ evhttp_request_free(req);
+}
+
+static void
+evhttp_request_free_(struct evhttp_connection *evcon, struct evhttp_request *req)
+{
+ TAILQ_REMOVE(&evcon->requests, req, next);
+ evhttp_request_free_auto(req);
+}
+
+/* Called when evcon has experienced a (non-recoverable? -NM) error, as
+ * given in error. If it's an outgoing connection, reset the connection,
+ * retry any pending requests, and inform the user. If it's incoming,
+ * delegates to evhttp_connection_incoming_fail(). */
+void
+evhttp_connection_fail_(struct evhttp_connection *evcon,
+ enum evhttp_request_error error)
+{
+ const int errsave = EVUTIL_SOCKET_ERROR();
+ struct evhttp_request* req = TAILQ_FIRST(&evcon->requests);
+ void (*cb)(struct evhttp_request *, void *);
+ void *cb_arg;
+ void (*error_cb)(enum evhttp_request_error, void *);
+ void *error_cb_arg;
+ EVUTIL_ASSERT(req != NULL);
+
+ bufferevent_disable(evcon->bufev, EV_READ|EV_WRITE);
+
+ if (evcon->flags & EVHTTP_CON_INCOMING) {
+ /*
+ * for incoming requests, there are two different
+ * failure cases. it's either a network level error
+ * or an http layer error. for problems on the network
+ * layer like timeouts we just drop the connections.
+ * For HTTP problems, we might have to send back a
+ * reply before the connection can be freed.
+ */
+ if (evhttp_connection_incoming_fail(req, error) == -1)
+ evhttp_connection_free(evcon);
+ return;
+ }
+
+ error_cb = req->error_cb;
+ error_cb_arg = req->cb_arg;
+ /* when the request was canceled, the callback is not executed */
+ if (error != EVREQ_HTTP_REQUEST_CANCEL) {
+ /* save the callback for later; the cb might free our object */
+ cb = req->cb;
+ cb_arg = req->cb_arg;
+ } else {
+ cb = NULL;
+ cb_arg = NULL;
+ }
+
+ /* do not fail all requests; the next request is going to get
+ * send over a new connection. when a user cancels a request,
+ * all other pending requests should be processed as normal
+ */
+ evhttp_request_free_(evcon, req);
+
+ /* reset the connection */
+ evhttp_connection_reset_(evcon);
+
+ /* We are trying the next request that was queued on us */
+ if (TAILQ_FIRST(&evcon->requests) != NULL)
+ evhttp_connection_connect_(evcon);
+
+ /* The call to evhttp_connection_reset_ overwrote errno.
+ * Let's restore the original errno, so that the user's
+ * callback can have a better idea of what the error was.
+ */
+ EVUTIL_SET_SOCKET_ERROR(errsave);
+
+ /* inform the user */
+ if (error_cb != NULL)
+ error_cb(error, error_cb_arg);
+ if (cb != NULL)
+ (*cb)(NULL, cb_arg);
+}
+
+/* Bufferevent callback: invoked when any data has been written from an
+ * http connection's bufferevent */
+static void
+evhttp_write_cb(struct bufferevent *bufev, void *arg)
+{
+ struct evhttp_connection *evcon = arg;
+
+ /* Activate our call back */
+ if (evcon->cb != NULL)
+ (*evcon->cb)(evcon, evcon->cb_arg);
+}
+
+/**
+ * Advance the connection state.
+ * - If this is an outgoing connection, we've just processed the response;
+ * idle or close the connection.
+ * - If this is an incoming connection, we've just processed the request;
+ * respond.
+ */
+static void
+evhttp_connection_done(struct evhttp_connection *evcon)
+{
+ struct evhttp_request *req = TAILQ_FIRST(&evcon->requests);
+ int con_outgoing = evcon->flags & EVHTTP_CON_OUTGOING;
+ int free_evcon = 0;
+
+ if (con_outgoing) {
+ /* idle or close the connection */
+ int need_close = evhttp_is_request_connection_close(req);
+ TAILQ_REMOVE(&evcon->requests, req, next);
+ req->evcon = NULL;
+
+ evcon->state = EVCON_IDLE;
+
+ /* check if we got asked to close the connection */
+ if (need_close)
+ evhttp_connection_reset_(evcon);
+
+ if (TAILQ_FIRST(&evcon->requests) != NULL) {
+ /*
+ * We have more requests; reset the connection
+ * and deal with the next request.
+ */
+ if (!evhttp_connected(evcon))
+ evhttp_connection_connect_(evcon);
+ else
+ evhttp_request_dispatch(evcon);
+ } else if (!need_close) {
+ /*
+ * The connection is going to be persistent, but we
+ * need to detect if the other side closes it.
+ */
+ evhttp_connection_start_detectclose(evcon);
+ } else if ((evcon->flags & EVHTTP_CON_AUTOFREE)) {
+ /*
+ * If we have no more requests that need completion
+ * and we're not waiting for the connection to close
+ */
+ free_evcon = 1;
+ }
+ } else {
+ /*
+ * incoming connection - we need to leave the request on the
+ * connection so that we can reply to it.
+ */
+ evcon->state = EVCON_WRITING;
+ }
+
+ /* notify the user of the request */
+ (*req->cb)(req, req->cb_arg);
+
+ /* if this was an outgoing request, we own and it's done. so free it. */
+ if (con_outgoing) {
+ evhttp_request_free_auto(req);
+ }
+
+ /* If this was the last request of an outgoing connection and we're
+ * not waiting to receive a connection close event and we want to
+ * automatically free the connection. We check to ensure our request
+ * list is empty one last time just in case our callback added a
+ * new request.
+ */
+ if (free_evcon && TAILQ_FIRST(&evcon->requests) == NULL) {
+ evhttp_connection_free(evcon);
+ }
+}
+
+/*
+ * Handles reading from a chunked request.
+ * return ALL_DATA_READ:
+ * all data has been read
+ * return MORE_DATA_EXPECTED:
+ * more data is expected
+ * return DATA_CORRUPTED:
+ * data is corrupted
+ * return REQUEST_CANCELED:
+ * request was canceled by the user calling evhttp_cancel_request
+ * return DATA_TOO_LONG:
+ * ran over the maximum limit
+ */
+
+static enum message_read_status
+evhttp_handle_chunked_read(struct evhttp_request *req, struct evbuffer *buf)
+{
+ if (req == NULL || buf == NULL) {
+ return DATA_CORRUPTED;
+ }
+
+ while (1) {
+ size_t buflen;
+
+ if ((buflen = evbuffer_get_length(buf)) == 0) {
+ break;
+ }
+
+ /* evbuffer_get_length returns size_t, but len variable is ssize_t,
+ * check for overflow conditions */
+ if (buflen > EV_SSIZE_MAX) {
+ return DATA_CORRUPTED;
+ }
+
+ if (req->ntoread < 0) {
+ /* Read chunk size */
+ ev_int64_t ntoread;
+ char *p = evbuffer_readln(buf, NULL, EVBUFFER_EOL_CRLF);
+ char *endp;
+ int error;
+ if (p == NULL)
+ break;
+ /* the last chunk is on a new line? */
+ if (strlen(p) == 0) {
+ mm_free(p);
+ continue;
+ }
+ ntoread = evutil_strtoll(p, &endp, 16);
+ error = (*p == '\0' ||
+ (*endp != '\0' && *endp != ' ') ||
+ ntoread < 0);
+ mm_free(p);
+ if (error) {
+ /* could not get chunk size */
+ return (DATA_CORRUPTED);
+ }
+
+ /* ntoread is signed int64, body_size is unsigned size_t, check for under/overflow conditions */
+ if ((ev_uint64_t)ntoread > EV_SIZE_MAX - req->body_size) {
+ return DATA_CORRUPTED;
+ }
+
+ if (req->body_size + (size_t)ntoread > req->evcon->max_body_size) {
+ /* failed body length test */
+ event_debug(("Request body is too long"));
+ return (DATA_TOO_LONG);
+ }
+
+ req->body_size += (size_t)ntoread;
+ req->ntoread = ntoread;
+ if (req->ntoread == 0) {
+ /* Last chunk */
+ return (ALL_DATA_READ);
+ }
+ continue;
+ }
+
+ /* req->ntoread is signed int64, len is ssize_t, based on arch,
+ * ssize_t could only be 32b, check for these conditions */
+ if (req->ntoread > EV_SSIZE_MAX) {
+ return DATA_CORRUPTED;
+ }
+
+ /* don't have enough to complete a chunk; wait for more */
+ if (req->ntoread > 0 && buflen < (ev_uint64_t)req->ntoread)
+ return (MORE_DATA_EXPECTED);
+
+ /* Completed chunk */
+ evbuffer_remove_buffer(buf, req->input_buffer, (size_t)req->ntoread);
+ req->ntoread = -1;
+ if (req->chunk_cb != NULL) {
+ req->flags |= EVHTTP_REQ_DEFER_FREE;
+ (*req->chunk_cb)(req, req->cb_arg);
+ evbuffer_drain(req->input_buffer,
+ evbuffer_get_length(req->input_buffer));
+ req->flags &= ~EVHTTP_REQ_DEFER_FREE;
+ if ((req->flags & EVHTTP_REQ_NEEDS_FREE) != 0) {
+ return (REQUEST_CANCELED);
+ }
+ }
+ }
+
+ return (MORE_DATA_EXPECTED);
+}
+
+static void
+evhttp_read_trailer(struct evhttp_connection *evcon, struct evhttp_request *req)
+{
+ struct evbuffer *buf = bufferevent_get_input(evcon->bufev);
+
+ switch (evhttp_parse_headers_(req, buf)) {
+ case DATA_CORRUPTED:
+ case DATA_TOO_LONG:
+ evhttp_connection_fail_(evcon, EVREQ_HTTP_DATA_TOO_LONG);
+ break;
+ case ALL_DATA_READ:
+ bufferevent_disable(evcon->bufev, EV_READ);
+ evhttp_connection_done(evcon);
+ break;
+ case MORE_DATA_EXPECTED:
+ case REQUEST_CANCELED: /* ??? */
+ default:
+ break;
+ }
+}
+
+static void
+evhttp_read_body(struct evhttp_connection *evcon, struct evhttp_request *req)
+{
+ struct evbuffer *buf = bufferevent_get_input(evcon->bufev);
+
+ if (req->chunked) {
+ switch (evhttp_handle_chunked_read(req, buf)) {
+ case ALL_DATA_READ:
+ /* finished last chunk */
+ evcon->state = EVCON_READING_TRAILER;
+ evhttp_read_trailer(evcon, req);
+ return;
+ case DATA_CORRUPTED:
+ case DATA_TOO_LONG:
+ /* corrupted data */
+ evhttp_connection_fail_(evcon,
+ EVREQ_HTTP_DATA_TOO_LONG);
+ return;
+ case REQUEST_CANCELED:
+ /* request canceled */
+ evhttp_request_free_auto(req);
+ return;
+ case MORE_DATA_EXPECTED:
+ default:
+ break;
+ }
+ } else if (req->ntoread < 0) {
+ /* Read until connection close. */
+ if ((size_t)(req->body_size + evbuffer_get_length(buf)) < req->body_size) {
+ evhttp_connection_fail_(evcon, EVREQ_HTTP_INVALID_HEADER);
+ return;
+ }
+
+ req->body_size += evbuffer_get_length(buf);
+ evbuffer_add_buffer(req->input_buffer, buf);
+ } else if (req->chunk_cb != NULL || evbuffer_get_length(buf) >= (size_t)req->ntoread) {
+ /* XXX: the above get_length comparison has to be fixed for overflow conditions! */
+ /* We've postponed moving the data until now, but we're
+ * about to use it. */
+ size_t n = evbuffer_get_length(buf);
+
+ if (n > (size_t) req->ntoread)
+ n = (size_t) req->ntoread;
+ req->ntoread -= n;
+ req->body_size += n;
+ evbuffer_remove_buffer(buf, req->input_buffer, n);
+ }
+
+ if (req->body_size > req->evcon->max_body_size ||
+ (!req->chunked && req->ntoread >= 0 &&
+ (size_t)req->ntoread > req->evcon->max_body_size)) {
+ /* XXX: The above casted comparison must checked for overflow */
+ /* failed body length test */
+ event_debug(("Request body is too long"));
+ evhttp_connection_fail_(evcon,
+ EVREQ_HTTP_DATA_TOO_LONG);
+ return;
+ }
+
+ if (evbuffer_get_length(req->input_buffer) > 0 && req->chunk_cb != NULL) {
+ req->flags |= EVHTTP_REQ_DEFER_FREE;
+ (*req->chunk_cb)(req, req->cb_arg);
+ req->flags &= ~EVHTTP_REQ_DEFER_FREE;
+ evbuffer_drain(req->input_buffer,
+ evbuffer_get_length(req->input_buffer));
+ if ((req->flags & EVHTTP_REQ_NEEDS_FREE) != 0) {
+ evhttp_request_free_auto(req);
+ return;
+ }
+ }
+
+ if (req->ntoread == 0) {
+ bufferevent_disable(evcon->bufev, EV_READ);
+ /* Completed content length */
+ evhttp_connection_done(evcon);
+ return;
+ }
+}
+
+#define get_deferred_queue(evcon) \
+ ((evcon)->base)
+
+/*
+ * Gets called when more data becomes available
+ */
+
+static void
+evhttp_read_cb(struct bufferevent *bufev, void *arg)
+{
+ struct evhttp_connection *evcon = arg;
+ struct evhttp_request *req = TAILQ_FIRST(&evcon->requests);
+
+ /* Cancel if it's pending. */
+ event_deferred_cb_cancel_(get_deferred_queue(evcon),
+ &evcon->read_more_deferred_cb);
+
+ switch (evcon->state) {
+ case EVCON_READING_FIRSTLINE:
+ evhttp_read_firstline(evcon, req);
+ /* note the request may have been freed in
+ * evhttp_read_body */
+ break;
+ case EVCON_READING_HEADERS:
+ evhttp_read_header(evcon, req);
+ /* note the request may have been freed in
+ * evhttp_read_body */
+ break;
+ case EVCON_READING_BODY:
+ evhttp_read_body(evcon, req);
+ /* note the request may have been freed in
+ * evhttp_read_body */
+ break;
+ case EVCON_READING_TRAILER:
+ evhttp_read_trailer(evcon, req);
+ break;
+ case EVCON_IDLE:
+ {
+#ifdef USE_DEBUG
+ struct evbuffer *input;
+ size_t total_len;
+
+ input = bufferevent_get_input(evcon->bufev);
+ total_len = evbuffer_get_length(input);
+ event_debug(("%s: read "EV_SIZE_FMT
+ " bytes in EVCON_IDLE state,"
+ " resetting connection",
+ __func__, EV_SIZE_ARG(total_len)));
+#endif
+
+ evhttp_connection_reset_(evcon);
+ }
+ break;
+ case EVCON_DISCONNECTED:
+ case EVCON_CONNECTING:
+ case EVCON_WRITING:
+ default:
+ event_errx(1, "%s: illegal connection state %d",
+ __func__, evcon->state);
+ }
+}
+
+static void
+evhttp_deferred_read_cb(struct event_callback *cb, void *data)
+{
+ struct evhttp_connection *evcon = data;
+ evhttp_read_cb(evcon->bufev, evcon);
+}
+
+static void
+evhttp_write_connectioncb(struct evhttp_connection *evcon, void *arg)
+{
+ /* This is after writing the request to the server */
+ struct evhttp_request *req = TAILQ_FIRST(&evcon->requests);
+ EVUTIL_ASSERT(req != NULL);
+
+ EVUTIL_ASSERT(evcon->state == EVCON_WRITING);
+
+ /* We need to wait until we've written all of our output data before we can continue */
+ if (evbuffer_get_length(bufferevent_get_output(evcon->bufev)) > 0) { return; }
+
+ /* We are done writing our header and are now expecting the response */
+ req->kind = EVHTTP_RESPONSE;
+
+ evhttp_start_read_(evcon);
+}
+
+/*
+ * Clean up a connection object
+ */
+
+void
+evhttp_connection_free(struct evhttp_connection *evcon)
+{
+ struct evhttp_request *req;
+
+ /* notify interested parties that this connection is going down */
+ if (evcon->fd != -1) {
+ if (evhttp_connected(evcon) && evcon->closecb != NULL)
+ (*evcon->closecb)(evcon, evcon->closecb_arg);
+ }
+
+ /* remove all requests that might be queued on this
+ * connection. for server connections, this should be empty.
+ * because it gets dequeued either in evhttp_connection_done or
+ * evhttp_connection_fail_.
+ */
+ while ((req = TAILQ_FIRST(&evcon->requests)) != NULL) {
+ evhttp_request_free_(evcon, req);
+ }
+
+ if (evcon->http_server != NULL) {
+ struct evhttp *http = evcon->http_server;
+ TAILQ_REMOVE(&http->connections, evcon, next);
+ }
+
+ if (event_initialized(&evcon->retry_ev)) {
+ event_del(&evcon->retry_ev);
+ event_debug_unassign(&evcon->retry_ev);
+ }
+
+ if (evcon->bufev != NULL)
+ bufferevent_free(evcon->bufev);
+
+ event_deferred_cb_cancel_(get_deferred_queue(evcon),
+ &evcon->read_more_deferred_cb);
+
+ if (evcon->fd != -1) {
+ bufferevent_disable(evcon->bufev, EV_READ|EV_WRITE);
+ shutdown(evcon->fd, EVUTIL_SHUT_WR);
+ if (!(bufferevent_get_options_(evcon->bufev) & BEV_OPT_CLOSE_ON_FREE)) {
+ evutil_closesocket(evcon->fd);
+ }
+ }
+
+ if (evcon->bind_address != NULL)
+ mm_free(evcon->bind_address);
+
+ if (evcon->address != NULL)
+ mm_free(evcon->address);
+
+ mm_free(evcon);
+}
+
+void
+evhttp_connection_free_on_completion(struct evhttp_connection *evcon) {
+ evcon->flags |= EVHTTP_CON_AUTOFREE;
+}
+
+void
+evhttp_connection_set_local_address(struct evhttp_connection *evcon,
+ const char *address)
+{
+ EVUTIL_ASSERT(evcon->state == EVCON_DISCONNECTED);
+ if (evcon->bind_address)
+ mm_free(evcon->bind_address);
+ if ((evcon->bind_address = mm_strdup(address)) == NULL)
+ event_warn("%s: strdup", __func__);
+}
+
+void
+evhttp_connection_set_local_port(struct evhttp_connection *evcon,
+ ev_uint16_t port)
+{
+ EVUTIL_ASSERT(evcon->state == EVCON_DISCONNECTED);
+ evcon->bind_port = port;
+}
+
+static void
+evhttp_request_dispatch(struct evhttp_connection* evcon)
+{
+ struct evhttp_request *req = TAILQ_FIRST(&evcon->requests);
+
+ /* this should not usually happy but it's possible */
+ if (req == NULL)
+ return;
+
+ /* delete possible close detection events */
+ evhttp_connection_stop_detectclose(evcon);
+
+ /* we assume that the connection is connected already */
+ EVUTIL_ASSERT(evcon->state == EVCON_IDLE);
+
+ evcon->state = EVCON_WRITING;
+
+ /* Create the header from the store arguments */
+ evhttp_make_header(evcon, req);
+
+ evhttp_write_buffer(evcon, evhttp_write_connectioncb, NULL);
+}
+
+/* Reset our connection state: disables reading/writing, closes our fd (if
+* any), clears out buffers, and puts us in state DISCONNECTED. */
+void
+evhttp_connection_reset_(struct evhttp_connection *evcon)
+{
+ struct evbuffer *tmp;
+
+ /* XXXX This is not actually an optimal fix. Instead we ought to have
+ an API for "stop connecting", or use bufferevent_setfd to turn off
+ connecting. But for Libevent 2.0, this seems like a minimal change
+ least likely to disrupt the rest of the bufferevent and http code.
+
+ Why is this here? If the fd is set in the bufferevent, and the
+ bufferevent is connecting, then you can't actually stop the
+ bufferevent from trying to connect with bufferevent_disable(). The
+ connect will never trigger, since we close the fd, but the timeout
+ might. That caused an assertion failure in evhttp_connection_fail_.
+ */
+ bufferevent_disable_hard_(evcon->bufev, EV_READ|EV_WRITE);
+
+ if (evcon->fd != -1) {
+ /* inform interested parties about connection close */
+ if (evhttp_connected(evcon) && evcon->closecb != NULL)
+ (*evcon->closecb)(evcon, evcon->closecb_arg);
+
+ shutdown(evcon->fd, EVUTIL_SHUT_WR);
+ evutil_closesocket(evcon->fd);
+ bufferevent_setfd(evcon->bufev, -1);
+ evcon->fd = -1;
+ }
+
+ /* we need to clean up any buffered data */
+ tmp = bufferevent_get_output(evcon->bufev);
+ evbuffer_drain(tmp, evbuffer_get_length(tmp));
+ tmp = bufferevent_get_input(evcon->bufev);
+ evbuffer_drain(tmp, evbuffer_get_length(tmp));
+
+ evcon->state = EVCON_DISCONNECTED;
+}
+
+static void
+evhttp_connection_start_detectclose(struct evhttp_connection *evcon)
+{
+ evcon->flags |= EVHTTP_CON_CLOSEDETECT;
+
+ bufferevent_enable(evcon->bufev, EV_READ);
+}
+
+static void
+evhttp_connection_stop_detectclose(struct evhttp_connection *evcon)
+{
+ evcon->flags &= ~EVHTTP_CON_CLOSEDETECT;
+
+ bufferevent_disable(evcon->bufev, EV_READ);
+}
+
+static void
+evhttp_connection_retry(evutil_socket_t fd, short what, void *arg)
+{
+ struct evhttp_connection *evcon = arg;
+
+ evcon->state = EVCON_DISCONNECTED;
+ evhttp_connection_connect_(evcon);
+}
+
+static void
+evhttp_connection_cb_cleanup(struct evhttp_connection *evcon)
+{
+ struct evcon_requestq requests;
+
+ evhttp_connection_reset_(evcon);
+ if (evcon->retry_max < 0 || evcon->retry_cnt < evcon->retry_max) {
+ struct timeval tv_retry = evcon->initial_retry_timeout;
+ int i;
+ evtimer_assign(&evcon->retry_ev, evcon->base, evhttp_connection_retry, evcon);
+ /* XXXX handle failure from evhttp_add_event */
+ for (i=0; i < evcon->retry_cnt; ++i) {
+ tv_retry.tv_usec *= 2;
+ if (tv_retry.tv_usec > 1000000) {
+ tv_retry.tv_usec -= 1000000;
+ tv_retry.tv_sec += 1;
+ }
+ tv_retry.tv_sec *= 2;
+ if (tv_retry.tv_sec > 3600) {
+ tv_retry.tv_sec = 3600;
+ tv_retry.tv_usec = 0;
+ }
+ }
+ event_add(&evcon->retry_ev, &tv_retry);
+ evcon->retry_cnt++;
+ return;
+ }
+
+ /*
+ * User callback can do evhttp_make_request() on the same
+ * evcon so new request will be added to evcon->requests. To
+ * avoid freeing it prematurely we iterate over the copy of
+ * the queue.
+ */
+ TAILQ_INIT(&requests);
+ while (TAILQ_FIRST(&evcon->requests) != NULL) {
+ struct evhttp_request *request = TAILQ_FIRST(&evcon->requests);
+ TAILQ_REMOVE(&evcon->requests, request, next);
+ TAILQ_INSERT_TAIL(&requests, request, next);
+ }
+
+ /* for now, we just signal all requests by executing their callbacks */
+ while (TAILQ_FIRST(&requests) != NULL) {
+ struct evhttp_request *request = TAILQ_FIRST(&requests);
+ TAILQ_REMOVE(&requests, request, next);
+ request->evcon = NULL;
+
+ /* we might want to set an error here */
+ request->cb(request, request->cb_arg);
+ evhttp_request_free_auto(request);
+ }
+}
+
+static void
+evhttp_error_cb(struct bufferevent *bufev, short what, void *arg)
+{
+ struct evhttp_connection *evcon = arg;
+ struct evhttp_request *req = TAILQ_FIRST(&evcon->requests);
+
+ if (evcon->fd == -1)
+ evcon->fd = bufferevent_getfd(bufev);
+
+ switch (evcon->state) {
+ case EVCON_CONNECTING:
+ if (what & BEV_EVENT_TIMEOUT) {
+ event_debug(("%s: connection timeout for \"%s:%d\" on "
+ EV_SOCK_FMT,
+ __func__, evcon->address, evcon->port,
+ EV_SOCK_ARG(evcon->fd)));
+ evhttp_connection_cb_cleanup(evcon);
+ return;
+ }
+ break;
+
+ case EVCON_READING_BODY:
+ if (!req->chunked && req->ntoread < 0
+ && what == (BEV_EVENT_READING|BEV_EVENT_EOF)) {
+ /* EOF on read can be benign */
+ evhttp_connection_done(evcon);
+ return;
+ }
+ break;
+
+ case EVCON_DISCONNECTED:
+ case EVCON_IDLE:
+ case EVCON_READING_FIRSTLINE:
+ case EVCON_READING_HEADERS:
+ case EVCON_READING_TRAILER:
+ case EVCON_WRITING:
+ default:
+ break;
+ }
+
+ /* when we are in close detect mode, a read error means that
+ * the other side closed their connection.
+ */
+ if (evcon->flags & EVHTTP_CON_CLOSEDETECT) {
+ evcon->flags &= ~EVHTTP_CON_CLOSEDETECT;
+ EVUTIL_ASSERT(evcon->http_server == NULL);
+ /* For connections from the client, we just
+ * reset the connection so that it becomes
+ * disconnected.
+ */
+ EVUTIL_ASSERT(evcon->state == EVCON_IDLE);
+ evhttp_connection_reset_(evcon);
+
+ /*
+ * If we have no more requests that need completion
+ * and we want to auto-free the connection when all
+ * requests have been completed.
+ */
+ if (TAILQ_FIRST(&evcon->requests) == NULL
+ && (evcon->flags & EVHTTP_CON_OUTGOING)
+ && (evcon->flags & EVHTTP_CON_AUTOFREE)) {
+ evhttp_connection_free(evcon);
+ }
+ return;
+ }
+
+ if (what & BEV_EVENT_TIMEOUT) {
+ evhttp_connection_fail_(evcon, EVREQ_HTTP_TIMEOUT);
+ } else if (what & (BEV_EVENT_EOF|BEV_EVENT_ERROR)) {
+ evhttp_connection_fail_(evcon, EVREQ_HTTP_EOF);
+ } else if (what == BEV_EVENT_CONNECTED) {
+ } else {
+ evhttp_connection_fail_(evcon, EVREQ_HTTP_BUFFER_ERROR);
+ }
+}
+
+/*
+ * Event callback for asynchronous connection attempt.
+ */
+static void
+evhttp_connection_cb(struct bufferevent *bufev, short what, void *arg)
+{
+ struct evhttp_connection *evcon = arg;
+ int error;
+ ev_socklen_t errsz = sizeof(error);
+
+ if (evcon->fd == -1)
+ evcon->fd = bufferevent_getfd(bufev);
+
+ if (!(what & BEV_EVENT_CONNECTED)) {
+ /* some operating systems return ECONNREFUSED immediately
+ * when connecting to a local address. the cleanup is going
+ * to reschedule this function call.
+ */
+#ifndef _WIN32
+ if (errno == ECONNREFUSED)
+ goto cleanup;
+#endif
+ evhttp_error_cb(bufev, what, arg);
+ return;
+ }
+
+ if (evcon->fd == -1) {
+ event_debug(("%s: bufferevent_getfd returned -1",
+ __func__));
+ goto cleanup;
+ }
+
+ /* Check if the connection completed */
+ if (getsockopt(evcon->fd, SOL_SOCKET, SO_ERROR, (void*)&error,
+ &errsz) == -1) {
+ event_debug(("%s: getsockopt for \"%s:%d\" on "EV_SOCK_FMT,
+ __func__, evcon->address, evcon->port,
+ EV_SOCK_ARG(evcon->fd)));
+ goto cleanup;
+ }
+
+ if (error) {
+ event_debug(("%s: connect failed for \"%s:%d\" on "
+ EV_SOCK_FMT": %s",
+ __func__, evcon->address, evcon->port,
+ EV_SOCK_ARG(evcon->fd),
+ evutil_socket_error_to_string(error)));
+ goto cleanup;
+ }
+
+ /* We are connected to the server now */
+ event_debug(("%s: connected to \"%s:%d\" on "EV_SOCK_FMT"\n",
+ __func__, evcon->address, evcon->port,
+ EV_SOCK_ARG(evcon->fd)));
+
+ /* Reset the retry count as we were successful in connecting */
+ evcon->retry_cnt = 0;
+ evcon->state = EVCON_IDLE;
+
+ /* reset the bufferevent cbs */
+ bufferevent_setcb(evcon->bufev,
+ evhttp_read_cb,
+ evhttp_write_cb,
+ evhttp_error_cb,
+ evcon);
+
+ if (!evutil_timerisset(&evcon->timeout)) {
+ const struct timeval read_tv = { HTTP_READ_TIMEOUT, 0 };
+ const struct timeval write_tv = { HTTP_WRITE_TIMEOUT, 0 };
+ bufferevent_set_timeouts(evcon->bufev, &read_tv, &write_tv);
+ } else {
+ bufferevent_set_timeouts(evcon->bufev, &evcon->timeout, &evcon->timeout);
+ }
+
+ /* try to start requests that have queued up on this connection */
+ evhttp_request_dispatch(evcon);
+ return;
+
+ cleanup:
+ evhttp_connection_cb_cleanup(evcon);
+}
+
+/*
+ * Check if we got a valid response code.
+ */
+
+static int
+evhttp_valid_response_code(int code)
+{
+ if (code == 0)
+ return (0);
+
+ return (1);
+}
+
+static int
+evhttp_parse_http_version(const char *version, struct evhttp_request *req)
+{
+ int major, minor;
+ char ch;
+ int n = sscanf(version, "HTTP/%d.%d%c", &major, &minor, &ch);
+ if (n != 2 || major > 1) {
+ event_debug(("%s: bad version %s on message %p from %s",
+ __func__, version, req, req->remote_host));
+ return (-1);
+ }
+ req->major = major;
+ req->minor = minor;
+ return (0);
+}
+
+/* Parses the status line of a web server */
+
+static int
+evhttp_parse_response_line(struct evhttp_request *req, char *line)
+{
+ char *protocol;
+ char *number;
+ const char *readable = "";
+
+ protocol = strsep(&line, " ");
+ if (line == NULL)
+ return (-1);
+ number = strsep(&line, " ");
+ if (line != NULL)
+ readable = line;
+
+ if (evhttp_parse_http_version(protocol, req) < 0)
+ return (-1);
+
+ req->response_code = atoi(number);
+ if (!evhttp_valid_response_code(req->response_code)) {
+ event_debug(("%s: bad response code \"%s\"",
+ __func__, number));
+ return (-1);
+ }
+
+ if ((req->response_code_line = mm_strdup(readable)) == NULL) {
+ event_warn("%s: strdup", __func__);
+ return (-1);
+ }
+
+ return (0);
+}
+
+/* Parse the first line of a HTTP request */
+
+static int
+evhttp_parse_request_line(struct evhttp_request *req, char *line)
+{
+ char *method;
+ char *uri;
+ char *version;
+ const char *hostname;
+ const char *scheme;
+ size_t method_len;
+ enum evhttp_cmd_type type;
+
+ /* Parse the request line */
+ method = strsep(&line, " ");
+ if (line == NULL)
+ return (-1);
+ uri = strsep(&line, " ");
+ if (line == NULL)
+ return (-1);
+ version = strsep(&line, " ");
+ if (line != NULL)
+ return (-1);
+
+ method_len = (uri - method) - 1;
+ type = EVHTTP_REQ_UNKNOWN_;
+
+ /* First line */
+ switch (method_len) {
+ case 3:
+ /* The length of the method string is 3, meaning it can only be one of two methods: GET or PUT */
+
+ /* Since both GET and PUT share the same character 'T' at the end,
+ * if the string doesn't have 'T', we can immediately determine this
+ * is an invalid HTTP method */
+
+ if (method[2] != 'T') {
+ break;
+ }
+
+ switch (*method) {
+ case 'G':
+ /* This first byte is 'G', so make sure the next byte is
+ * 'E', if it isn't then this isn't a valid method */
+
+ if (method[1] == 'E') {
+ type = EVHTTP_REQ_GET;
+ }
+
+ break;
+ case 'P':
+ /* First byte is P, check second byte for 'U', if not,
+ * we know it's an invalid method */
+ if (method[1] == 'U') {
+ type = EVHTTP_REQ_PUT;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ /* The method length is 4 bytes, leaving only the methods "POST" and "HEAD" */
+ switch (*method) {
+ case 'P':
+ if (method[3] == 'T' && method[2] == 'S' && method[1] == 'O') {
+ type = EVHTTP_REQ_POST;
+ }
+ break;
+ case 'H':
+ if (method[3] == 'D' && method[2] == 'A' && method[1] == 'E') {
+ type = EVHTTP_REQ_HEAD;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ case 5:
+ /* Method length is 5 bytes, which can only encompass PATCH and TRACE */
+ switch (*method) {
+ case 'P':
+ if (method[4] == 'H' && method[3] == 'C' && method[2] == 'T' && method[1] == 'A') {
+ type = EVHTTP_REQ_PATCH;
+ }
+ break;
+ case 'T':
+ if (method[4] == 'E' && method[3] == 'C' && method[2] == 'A' && method[1] == 'R') {
+ type = EVHTTP_REQ_TRACE;
+ }
+
+ break;
+ default:
+ break;
+ }
+ break;
+ case 6:
+ /* Method length is 6, only valid method 6 bytes in length is DELEte */
+
+ /* If the first byte isn't 'D' then it's invalid */
+ if (*method != 'D') {
+ break;
+ }
+
+ if (method[5] == 'E' && method[4] == 'T' && method[3] == 'E' && method[2] == 'L' && method[1] == 'E') {
+ type = EVHTTP_REQ_DELETE;
+ }
+
+ break;
+ case 7:
+ /* Method length is 7, only valid methods are "OPTIONS" and "CONNECT" */
+ switch (*method) {
+ case 'O':
+ if (method[6] == 'S' && method[5] == 'N' && method[4] == 'O' &&
+ method[3] == 'I' && method[2] == 'T' && method[1] == 'P') {
+ type = EVHTTP_REQ_OPTIONS;
+ }
+
+ break;
+ case 'C':
+ if (method[6] == 'T' && method[5] == 'C' && method[4] == 'E' &&
+ method[3] == 'N' && method[2] == 'N' && method[1] == 'O') {
+ type = EVHTTP_REQ_CONNECT;
+ }
+
+ break;
+ default:
+ break;
+ }
+ break;
+ } /* switch */
+
+ if ((int)type == EVHTTP_REQ_UNKNOWN_) {
+ event_debug(("%s: bad method %s on request %p from %s",
+ __func__, method, req, req->remote_host));
+ /* No error yet; we'll give a better error later when
+ * we see that req->type is unsupported. */
+ }
+
+ req->type = type;
+
+ if (evhttp_parse_http_version(version, req) < 0)
+ return (-1);
+
+ if ((req->uri = mm_strdup(uri)) == NULL) {
+ event_debug(("%s: mm_strdup", __func__));
+ return (-1);
+ }
+
+ if ((req->uri_elems = evhttp_uri_parse_with_flags(req->uri,
+ EVHTTP_URI_NONCONFORMANT)) == NULL) {
+ return -1;
+ }
+
+ /* If we have an absolute-URI, check to see if it is an http request
+ for a known vhost or server alias. If we don't know about this
+ host, we consider it a proxy request. */
+ scheme = evhttp_uri_get_scheme(req->uri_elems);
+ hostname = evhttp_uri_get_host(req->uri_elems);
+ if (scheme && (!evutil_ascii_strcasecmp(scheme, "http") ||
+ !evutil_ascii_strcasecmp(scheme, "https")) &&
+ hostname &&
+ !evhttp_find_vhost(req->evcon->http_server, NULL, hostname))
+ req->flags |= EVHTTP_PROXY_REQUEST;
+
+ return (0);
+}
+
+const char *
+evhttp_find_header(const struct evkeyvalq *headers, const char *key)
+{
+ struct evkeyval *header;
+
+ TAILQ_FOREACH(header, headers, next) {
+ if (evutil_ascii_strcasecmp(header->key, key) == 0)
+ return (header->value);
+ }
+
+ return (NULL);
+}
+
+void
+evhttp_clear_headers(struct evkeyvalq *headers)
+{
+ struct evkeyval *header;
+
+ for (header = TAILQ_FIRST(headers);
+ header != NULL;
+ header = TAILQ_FIRST(headers)) {
+ TAILQ_REMOVE(headers, header, next);
+ mm_free(header->key);
+ mm_free(header->value);
+ mm_free(header);
+ }
+}
+
+/*
+ * Returns 0, if the header was successfully removed.
+ * Returns -1, if the header could not be found.
+ */
+
+int
+evhttp_remove_header(struct evkeyvalq *headers, const char *key)
+{
+ struct evkeyval *header;
+
+ TAILQ_FOREACH(header, headers, next) {
+ if (evutil_ascii_strcasecmp(header->key, key) == 0)
+ break;
+ }
+
+ if (header == NULL)
+ return (-1);
+
+ /* Free and remove the header that we found */
+ TAILQ_REMOVE(headers, header, next);
+ mm_free(header->key);
+ mm_free(header->value);
+ mm_free(header);
+
+ return (0);
+}
+
+static int
+evhttp_header_is_valid_value(const char *value)
+{
+ const char *p = value;
+
+ while ((p = strpbrk(p, "\r\n")) != NULL) {
+ /* we really expect only one new line */
+ p += strspn(p, "\r\n");
+ /* we expect a space or tab for continuation */
+ if (*p != ' ' && *p != '\t')
+ return (0);
+ }
+ return (1);
+}
+
+int
+evhttp_add_header(struct evkeyvalq *headers,
+ const char *key, const char *value)
+{
+ event_debug(("%s: key: %s val: %s\n", __func__, key, value));
+
+ if (strchr(key, '\r') != NULL || strchr(key, '\n') != NULL) {
+ /* drop illegal headers */
+ event_debug(("%s: dropping illegal header key\n", __func__));
+ return (-1);
+ }
+
+ if (!evhttp_header_is_valid_value(value)) {
+ event_debug(("%s: dropping illegal header value\n", __func__));
+ return (-1);
+ }
+
+ return (evhttp_add_header_internal(headers, key, value));
+}
+
+static int
+evhttp_add_header_internal(struct evkeyvalq *headers,
+ const char *key, const char *value)
+{
+ struct evkeyval *header = mm_calloc(1, sizeof(struct evkeyval));
+ if (header == NULL) {
+ event_warn("%s: calloc", __func__);
+ return (-1);
+ }
+ if ((header->key = mm_strdup(key)) == NULL) {
+ mm_free(header);
+ event_warn("%s: strdup", __func__);
+ return (-1);
+ }
+ if ((header->value = mm_strdup(value)) == NULL) {
+ mm_free(header->key);
+ mm_free(header);
+ event_warn("%s: strdup", __func__);
+ return (-1);
+ }
+
+ TAILQ_INSERT_TAIL(headers, header, next);
+
+ return (0);
+}
+
+/*
+ * Parses header lines from a request or a response into the specified
+ * request object given an event buffer.
+ *
+ * Returns
+ * DATA_CORRUPTED on error
+ * MORE_DATA_EXPECTED when we need to read more headers
+ * ALL_DATA_READ when all headers have been read.
+ */
+
+enum message_read_status
+evhttp_parse_firstline_(struct evhttp_request *req, struct evbuffer *buffer)
+{
+ char *line;
+ enum message_read_status status = ALL_DATA_READ;
+
+ size_t line_length;
+ /* XXX try */
+ line = evbuffer_readln(buffer, &line_length, EVBUFFER_EOL_CRLF);
+ if (line == NULL) {
+ if (req->evcon != NULL &&
+ evbuffer_get_length(buffer) > req->evcon->max_headers_size)
+ return (DATA_TOO_LONG);
+ else
+ return (MORE_DATA_EXPECTED);
+ }
+
+ if (req->evcon != NULL &&
+ line_length > req->evcon->max_headers_size) {
+ mm_free(line);
+ return (DATA_TOO_LONG);
+ }
+
+ req->headers_size = line_length;
+
+ switch (req->kind) {
+ case EVHTTP_REQUEST:
+ if (evhttp_parse_request_line(req, line) == -1)
+ status = DATA_CORRUPTED;
+ break;
+ case EVHTTP_RESPONSE:
+ if (evhttp_parse_response_line(req, line) == -1)
+ status = DATA_CORRUPTED;
+ break;
+ default:
+ status = DATA_CORRUPTED;
+ }
+
+ mm_free(line);
+ return (status);
+}
+
+static int
+evhttp_append_to_last_header(struct evkeyvalq *headers, char *line)
+{
+ struct evkeyval *header = TAILQ_LAST(headers, evkeyvalq);
+ char *newval;
+ size_t old_len, line_len;
+
+ if (header == NULL)
+ return (-1);
+
+ old_len = strlen(header->value);
+
+ /* Strip space from start and end of line. */
+ while (*line == ' ' || *line == '\t')
+ ++line;
+ evutil_rtrim_lws_(line);
+
+ line_len = strlen(line);
+
+ newval = mm_realloc(header->value, old_len + line_len + 2);
+ if (newval == NULL)
+ return (-1);
+
+ newval[old_len] = ' ';
+ memcpy(newval + old_len + 1, line, line_len + 1);
+ header->value = newval;
+
+ return (0);
+}
+
+enum message_read_status
+evhttp_parse_headers_(struct evhttp_request *req, struct evbuffer* buffer)
+{
+ enum message_read_status errcode = DATA_CORRUPTED;
+ char *line;
+ enum message_read_status status = MORE_DATA_EXPECTED;
+
+ struct evkeyvalq* headers = req->input_headers;
+ size_t line_length;
+ while ((line = evbuffer_readln(buffer, &line_length, EVBUFFER_EOL_CRLF))
+ != NULL) {
+ char *skey, *svalue;
+
+ req->headers_size += line_length;
+
+ if (req->evcon != NULL &&
+ req->headers_size > req->evcon->max_headers_size) {
+ errcode = DATA_TOO_LONG;
+ goto error;
+ }
+
+ if (*line == '\0') { /* Last header - Done */
+ status = ALL_DATA_READ;
+ mm_free(line);
+ break;
+ }
+
+ /* Check if this is a continuation line */
+ if (*line == ' ' || *line == '\t') {
+ if (evhttp_append_to_last_header(headers, line) == -1)
+ goto error;
+ mm_free(line);
+ continue;
+ }
+
+ /* Processing of header lines */
+ svalue = line;
+ skey = strsep(&svalue, ":");
+ if (svalue == NULL)
+ goto error;
+
+ svalue += strspn(svalue, " ");
+ evutil_rtrim_lws_(svalue);
+
+ if (evhttp_add_header(headers, skey, svalue) == -1)
+ goto error;
+
+ mm_free(line);
+ }
+
+ if (status == MORE_DATA_EXPECTED) {
+ if (req->evcon != NULL &&
+ req->headers_size + evbuffer_get_length(buffer) > req->evcon->max_headers_size)
+ return (DATA_TOO_LONG);
+ }
+
+ return (status);
+
+ error:
+ mm_free(line);
+ return (errcode);
+}
+
+static int
+evhttp_get_body_length(struct evhttp_request *req)
+{
+ struct evkeyvalq *headers = req->input_headers;
+ const char *content_length;
+ const char *connection;
+
+ content_length = evhttp_find_header(headers, "Content-Length");
+ connection = evhttp_find_header(headers, "Connection");
+
+ if (content_length == NULL && connection == NULL)
+ req->ntoread = -1;
+ else if (content_length == NULL &&
+ evutil_ascii_strcasecmp(connection, "Close") != 0) {
+ /* Bad combination, we don't know when it will end */
+ event_warnx("%s: we got no content length, but the "
+ "server wants to keep the connection open: %s.",
+ __func__, connection);
+ return (-1);
+ } else if (content_length == NULL) {
+ req->ntoread = -1;
+ } else {
+ char *endp;
+ ev_int64_t ntoread = evutil_strtoll(content_length, &endp, 10);
+ if (*content_length == '\0' || *endp != '\0' || ntoread < 0) {
+ event_debug(("%s: illegal content length: %s",
+ __func__, content_length));
+ return (-1);
+ }
+ req->ntoread = ntoread;
+ }
+
+ event_debug(("%s: bytes to read: "EV_I64_FMT" (in buffer "EV_SIZE_FMT")\n",
+ __func__, EV_I64_ARG(req->ntoread),
+ EV_SIZE_ARG(evbuffer_get_length(bufferevent_get_input(req->evcon->bufev)))));
+
+ return (0);
+}
+
+static int
+evhttp_method_may_have_body(enum evhttp_cmd_type type)
+{
+ switch (type) {
+ case EVHTTP_REQ_POST:
+ case EVHTTP_REQ_PUT:
+ case EVHTTP_REQ_PATCH:
+ return 1;
+ case EVHTTP_REQ_TRACE:
+ return 0;
+ /* XXX May any of the below methods have a body? */
+ case EVHTTP_REQ_GET:
+ case EVHTTP_REQ_HEAD:
+ case EVHTTP_REQ_DELETE:
+ case EVHTTP_REQ_OPTIONS:
+ case EVHTTP_REQ_CONNECT:
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+static void
+evhttp_get_body(struct evhttp_connection *evcon, struct evhttp_request *req)
+{
+ const char *xfer_enc;
+
+ /* If this is a request without a body, then we are done */
+ if (req->kind == EVHTTP_REQUEST &&
+ !evhttp_method_may_have_body(req->type)) {
+ evhttp_connection_done(evcon);
+ return;
+ }
+ evcon->state = EVCON_READING_BODY;
+ xfer_enc = evhttp_find_header(req->input_headers, "Transfer-Encoding");
+ if (xfer_enc != NULL && evutil_ascii_strcasecmp(xfer_enc, "chunked") == 0) {
+ req->chunked = 1;
+ req->ntoread = -1;
+ } else {
+ if (evhttp_get_body_length(req) == -1) {
+ evhttp_connection_fail_(evcon,
+ EVREQ_HTTP_INVALID_HEADER);
+ return;
+ }
+ if (req->kind == EVHTTP_REQUEST && req->ntoread < 1) {
+ /* An incoming request with no content-length and no
+ * transfer-encoding has no body. */
+ evhttp_connection_done(evcon);
+ return;
+ }
+ }
+
+ /* Should we send a 100 Continue status line? */
+ if (req->kind == EVHTTP_REQUEST && REQ_VERSION_ATLEAST(req, 1, 1)) {
+ const char *expect;
+
+ expect = evhttp_find_header(req->input_headers, "Expect");
+ if (expect) {
+ if (!evutil_ascii_strcasecmp(expect, "100-continue")) {
+ /* XXX It would be nice to do some sanity
+ checking here. Does the resource exist?
+ Should the resource accept post requests? If
+ no, we should respond with an error. For
+ now, just optimistically tell the client to
+ send their message body. */
+ if (req->ntoread > 0) {
+ /* ntoread is ev_int64_t, max_body_size is ev_uint64_t */
+ if ((req->evcon->max_body_size <= EV_INT64_MAX) && (ev_uint64_t)req->ntoread > req->evcon->max_body_size) {
+ evhttp_send_error(req, HTTP_ENTITYTOOLARGE, NULL);
+ return;
+ }
+ }
+ if (!evbuffer_get_length(bufferevent_get_input(evcon->bufev)))
+ evhttp_send_continue(evcon, req);
+ } else {
+ evhttp_send_error(req, HTTP_EXPECTATIONFAILED,
+ NULL);
+ return;
+ }
+ }
+ }
+
+ evhttp_read_body(evcon, req);
+ /* note the request may have been freed in evhttp_read_body */
+}
+
+static void
+evhttp_read_firstline(struct evhttp_connection *evcon,
+ struct evhttp_request *req)
+{
+ enum message_read_status res;
+
+ res = evhttp_parse_firstline_(req, bufferevent_get_input(evcon->bufev));
+ if (res == DATA_CORRUPTED || res == DATA_TOO_LONG) {
+ /* Error while reading, terminate */
+ event_debug(("%s: bad header lines on "EV_SOCK_FMT"\n",
+ __func__, EV_SOCK_ARG(evcon->fd)));
+ evhttp_connection_fail_(evcon, EVREQ_HTTP_INVALID_HEADER);
+ return;
+ } else if (res == MORE_DATA_EXPECTED) {
+ /* Need more header lines */
+ return;
+ }
+
+ evcon->state = EVCON_READING_HEADERS;
+ evhttp_read_header(evcon, req);
+}
+
+static void
+evhttp_read_header(struct evhttp_connection *evcon,
+ struct evhttp_request *req)
+{
+ enum message_read_status res;
+ evutil_socket_t fd = evcon->fd;
+
+ res = evhttp_parse_headers_(req, bufferevent_get_input(evcon->bufev));
+ if (res == DATA_CORRUPTED || res == DATA_TOO_LONG) {
+ /* Error while reading, terminate */
+ event_debug(("%s: bad header lines on "EV_SOCK_FMT"\n",
+ __func__, EV_SOCK_ARG(fd)));
+ evhttp_connection_fail_(evcon, EVREQ_HTTP_INVALID_HEADER);
+ return;
+ } else if (res == MORE_DATA_EXPECTED) {
+ /* Need more header lines */
+ return;
+ }
+
+ /* Callback can shut down connection with negative return value */
+ if (req->header_cb != NULL) {
+ if ((*req->header_cb)(req, req->cb_arg) < 0) {
+ evhttp_connection_fail_(evcon, EVREQ_HTTP_EOF);
+ return;
+ }
+ }
+
+ /* Done reading headers, do the real work */
+ switch (req->kind) {
+ case EVHTTP_REQUEST:
+ event_debug(("%s: checking for post data on "EV_SOCK_FMT"\n",
+ __func__, EV_SOCK_ARG(fd)));
+ evhttp_get_body(evcon, req);
+ /* note the request may have been freed in evhttp_get_body */
+ break;
+
+ case EVHTTP_RESPONSE:
+ /* Start over if we got a 100 Continue response. */
+ if (req->response_code == 100) {
+ evhttp_start_read_(evcon);
+ return;
+ }
+ if (!evhttp_response_needs_body(req)) {
+ event_debug(("%s: skipping body for code %d\n",
+ __func__, req->response_code));
+ evhttp_connection_done(evcon);
+ } else {
+ event_debug(("%s: start of read body for %s on "
+ EV_SOCK_FMT"\n",
+ __func__, req->remote_host, EV_SOCK_ARG(fd)));
+ evhttp_get_body(evcon, req);
+ /* note the request may have been freed in
+ * evhttp_get_body */
+ }
+ break;
+
+ default:
+ event_warnx("%s: bad header on "EV_SOCK_FMT, __func__,
+ EV_SOCK_ARG(fd));
+ evhttp_connection_fail_(evcon, EVREQ_HTTP_INVALID_HEADER);
+ break;
+ }
+ /* request may have been freed above */
+}
+
+/*
+ * Creates a TCP connection to the specified port and executes a callback
+ * when finished. Failure or success is indicate by the passed connection
+ * object.
+ *
+ * Although this interface accepts a hostname, it is intended to take
+ * only numeric hostnames so that non-blocking DNS resolution can
+ * happen elsewhere.
+ */
+
+struct evhttp_connection *
+evhttp_connection_new(const char *address, unsigned short port)
+{
+ return (evhttp_connection_base_new(NULL, NULL, address, port));
+}
+
+struct evhttp_connection *
+evhttp_connection_base_bufferevent_new(struct event_base *base, struct evdns_base *dnsbase, struct bufferevent* bev,
+ const char *address, unsigned short port)
+{
+ struct evhttp_connection *evcon = NULL;
+
+ event_debug(("Attempting connection to %s:%d\n", address, port));
+
+ if ((evcon = mm_calloc(1, sizeof(struct evhttp_connection))) == NULL) {
+ event_warn("%s: calloc failed", __func__);
+ goto error;
+ }
+
+ evcon->fd = -1;
+ evcon->port = port;
+
+ evcon->max_headers_size = EV_SIZE_MAX;
+ evcon->max_body_size = EV_SIZE_MAX;
+
+ evutil_timerclear(&evcon->timeout);
+ evcon->retry_cnt = evcon->retry_max = 0;
+
+ if ((evcon->address = mm_strdup(address)) == NULL) {
+ event_warn("%s: strdup failed", __func__);
+ goto error;
+ }
+
+ if (bev == NULL) {
+ if (!(bev = bufferevent_socket_new(base, -1, 0))) {
+ event_warn("%s: bufferevent_socket_new failed", __func__);
+ goto error;
+ }
+ }
+
+ bufferevent_setcb(bev, evhttp_read_cb, evhttp_write_cb, evhttp_error_cb, evcon);
+ evcon->bufev = bev;
+
+ evcon->state = EVCON_DISCONNECTED;
+ TAILQ_INIT(&evcon->requests);
+
+ evcon->initial_retry_timeout.tv_sec = 2;
+ evcon->initial_retry_timeout.tv_usec = 0;
+
+ if (base != NULL) {
+ evcon->base = base;
+ if (bufferevent_get_base(bev) != base)
+ bufferevent_base_set(base, evcon->bufev);
+ }
+
+ event_deferred_cb_init_(
+ &evcon->read_more_deferred_cb,
+ bufferevent_get_priority(bev),
+ evhttp_deferred_read_cb, evcon);
+
+ evcon->dns_base = dnsbase;
+ evcon->ai_family = AF_UNSPEC;
+
+ return (evcon);
+
+ error:
+ if (evcon != NULL)
+ evhttp_connection_free(evcon);
+ return (NULL);
+}
+
+struct bufferevent* evhttp_connection_get_bufferevent(struct evhttp_connection *evcon)
+{
+ return evcon->bufev;
+}
+
+struct evhttp *
+evhttp_connection_get_server(struct evhttp_connection *evcon)
+{
+ return evcon->http_server;
+}
+
+struct evhttp_connection *
+evhttp_connection_base_new(struct event_base *base, struct evdns_base *dnsbase,
+ const char *address, unsigned short port)
+{
+ return evhttp_connection_base_bufferevent_new(base, dnsbase, NULL, address, port);
+}
+
+void evhttp_connection_set_family(struct evhttp_connection *evcon,
+ int family)
+{
+ evcon->ai_family = family;
+}
+
+int evhttp_connection_set_flags(struct evhttp_connection *evcon,
+ int flags)
+{
+ int avail_flags = 0;
+ avail_flags |= EVHTTP_CON_REUSE_CONNECTED_ADDR;
+
+ if (flags & ~avail_flags || flags > EVHTTP_CON_PUBLIC_FLAGS_END)
+ return 1;
+ evcon->flags &= ~avail_flags;
+
+ evcon->flags |= flags;
+
+ return 0;
+}
+
+void
+evhttp_connection_set_base(struct evhttp_connection *evcon,
+ struct event_base *base)
+{
+ EVUTIL_ASSERT(evcon->base == NULL);
+ EVUTIL_ASSERT(evcon->state == EVCON_DISCONNECTED);
+ evcon->base = base;
+ bufferevent_base_set(base, evcon->bufev);
+}
+
+void
+evhttp_connection_set_timeout(struct evhttp_connection *evcon,
+ int timeout_in_secs)
+{
+ if (timeout_in_secs == -1)
+ evhttp_connection_set_timeout_tv(evcon, NULL);
+ else {
+ struct timeval tv;
+ tv.tv_sec = timeout_in_secs;
+ tv.tv_usec = 0;
+ evhttp_connection_set_timeout_tv(evcon, &tv);
+ }
+}
+
+void
+evhttp_connection_set_timeout_tv(struct evhttp_connection *evcon,
+ const struct timeval* tv)
+{
+ if (tv) {
+ evcon->timeout = *tv;
+ bufferevent_set_timeouts(evcon->bufev, &evcon->timeout, &evcon->timeout);
+ } else {
+ const struct timeval read_tv = { HTTP_READ_TIMEOUT, 0 };
+ const struct timeval write_tv = { HTTP_WRITE_TIMEOUT, 0 };
+ evutil_timerclear(&evcon->timeout);
+ bufferevent_set_timeouts(evcon->bufev, &read_tv, &write_tv);
+ }
+}
+
+void
+evhttp_connection_set_initial_retry_tv(struct evhttp_connection *evcon,
+ const struct timeval *tv)
+{
+ if (tv) {
+ evcon->initial_retry_timeout = *tv;
+ } else {
+ evutil_timerclear(&evcon->initial_retry_timeout);
+ evcon->initial_retry_timeout.tv_sec = 2;
+ }
+}
+
+void
+evhttp_connection_set_retries(struct evhttp_connection *evcon,
+ int retry_max)
+{
+ evcon->retry_max = retry_max;
+}
+
+void
+evhttp_connection_set_closecb(struct evhttp_connection *evcon,
+ void (*cb)(struct evhttp_connection *, void *), void *cbarg)
+{
+ evcon->closecb = cb;
+ evcon->closecb_arg = cbarg;
+}
+
+void
+evhttp_connection_get_peer(struct evhttp_connection *evcon,
+ char **address, ev_uint16_t *port)
+{
+ *address = evcon->address;
+ *port = evcon->port;
+}
+
+const struct sockaddr*
+evhttp_connection_get_addr(struct evhttp_connection *evcon)
+{
+ return bufferevent_socket_get_conn_address_(evcon->bufev);
+}
+
+int
+evhttp_connection_connect_(struct evhttp_connection *evcon)
+{
+ int old_state = evcon->state;
+ const char *address = evcon->address;
+ const struct sockaddr *sa = evhttp_connection_get_addr(evcon);
+ int ret;
+
+ if (evcon->state == EVCON_CONNECTING)
+ return (0);
+
+ evhttp_connection_reset_(evcon);
+
+ EVUTIL_ASSERT(!(evcon->flags & EVHTTP_CON_INCOMING));
+ evcon->flags |= EVHTTP_CON_OUTGOING;
+
+ if (evcon->bind_address || evcon->bind_port) {
+ evcon->fd = bind_socket(
+ evcon->bind_address, evcon->bind_port, 0 /*reuse*/);
+ if (evcon->fd == -1) {
+ event_debug(("%s: failed to bind to \"%s\"",
+ __func__, evcon->bind_address));
+ return (-1);
+ }
+
+ bufferevent_setfd(evcon->bufev, evcon->fd);
+ } else {
+ bufferevent_setfd(evcon->bufev, -1);
+ }
+
+ /* Set up a callback for successful connection setup */
+ bufferevent_setcb(evcon->bufev,
+ NULL /* evhttp_read_cb */,
+ NULL /* evhttp_write_cb */,
+ evhttp_connection_cb,
+ evcon);
+ if (!evutil_timerisset(&evcon->timeout)) {
+ const struct timeval conn_tv = { HTTP_CONNECT_TIMEOUT, 0 };
+ bufferevent_set_timeouts(evcon->bufev, &conn_tv, &conn_tv);
+ } else {
+ bufferevent_set_timeouts(evcon->bufev, &evcon->timeout, &evcon->timeout);
+ }
+ /* make sure that we get a write callback */
+ bufferevent_enable(evcon->bufev, EV_WRITE);
+
+ evcon->state = EVCON_CONNECTING;
+
+ if (evcon->flags & EVHTTP_CON_REUSE_CONNECTED_ADDR &&
+ sa &&
+ (sa->sa_family == AF_INET || sa->sa_family == AF_INET6)) {
+ int socklen = sizeof(struct sockaddr_in);
+ if (sa->sa_family == AF_INET6) {
+ socklen = sizeof(struct sockaddr_in6);
+ }
+ ret = bufferevent_socket_connect(evcon->bufev, sa, socklen);
+ } else {
+ ret = bufferevent_socket_connect_hostname(evcon->bufev,
+ evcon->dns_base, evcon->ai_family, address, evcon->port);
+ }
+
+ if (ret < 0) {
+ evcon->state = old_state;
+ event_sock_warn(evcon->fd, "%s: connection to \"%s\" failed",
+ __func__, evcon->address);
+ /* some operating systems return ECONNREFUSED immediately
+ * when connecting to a local address. the cleanup is going
+ * to reschedule this function call.
+ */
+ evhttp_connection_cb_cleanup(evcon);
+ return (0);
+ }
+
+ return (0);
+}
+
+/*
+ * Starts an HTTP request on the provided evhttp_connection object.
+ * If the connection object is not connected to the web server already,
+ * this will start the connection.
+ */
+
+int
+evhttp_make_request(struct evhttp_connection *evcon,
+ struct evhttp_request *req,
+ enum evhttp_cmd_type type, const char *uri)
+{
+ /* We are making a request */
+ req->kind = EVHTTP_REQUEST;
+ req->type = type;
+ if (req->uri != NULL)
+ mm_free(req->uri);
+ if ((req->uri = mm_strdup(uri)) == NULL) {
+ event_warn("%s: strdup", __func__);
+ evhttp_request_free_auto(req);
+ return (-1);
+ }
+
+ /* Set the protocol version if it is not supplied */
+ if (!req->major && !req->minor) {
+ req->major = 1;
+ req->minor = 1;
+ }
+
+ EVUTIL_ASSERT(req->evcon == NULL);
+ req->evcon = evcon;
+ EVUTIL_ASSERT(!(req->flags & EVHTTP_REQ_OWN_CONNECTION));
+
+ TAILQ_INSERT_TAIL(&evcon->requests, req, next);
+
+ /* If the connection object is not connected; make it so */
+ if (!evhttp_connected(evcon)) {
+ int res = evhttp_connection_connect_(evcon);
+ /* evhttp_connection_fail_(), which is called through
+ * evhttp_connection_connect_(), assumes that req lies in
+ * evcon->requests. Thus, enqueue the request in advance and
+ * remove it in the error case. */
+ if (res != 0)
+ TAILQ_REMOVE(&evcon->requests, req, next);
+
+ return res;
+ }
+
+ /*
+ * If it's connected already and we are the first in the queue,
+ * then we can dispatch this request immediately. Otherwise, it
+ * will be dispatched once the pending requests are completed.
+ */
+ if (TAILQ_FIRST(&evcon->requests) == req)
+ evhttp_request_dispatch(evcon);
+
+ return (0);
+}
+
+void
+evhttp_cancel_request(struct evhttp_request *req)
+{
+ struct evhttp_connection *evcon = req->evcon;
+ if (evcon != NULL) {
+ /* We need to remove it from the connection */
+ if (TAILQ_FIRST(&evcon->requests) == req) {
+ /* it's currently being worked on, so reset
+ * the connection.
+ */
+ evhttp_connection_fail_(evcon,
+ EVREQ_HTTP_REQUEST_CANCEL);
+
+ /* connection fail freed the request */
+ return;
+ } else {
+ /* otherwise, we can just remove it from the
+ * queue
+ */
+ TAILQ_REMOVE(&evcon->requests, req, next);
+ }
+ }
+
+ evhttp_request_free_auto(req);
+}
+
+/*
+ * Reads data from file descriptor into request structure
+ * Request structure needs to be set up correctly.
+ */
+
+void
+evhttp_start_read_(struct evhttp_connection *evcon)
+{
+ bufferevent_disable(evcon->bufev, EV_WRITE);
+ bufferevent_enable(evcon->bufev, EV_READ);
+
+ evcon->state = EVCON_READING_FIRSTLINE;
+ /* Reset the bufferevent callbacks */
+ bufferevent_setcb(evcon->bufev,
+ evhttp_read_cb,
+ evhttp_write_cb,
+ evhttp_error_cb,
+ evcon);
+
+ /* If there's still data pending, process it next time through the
+ * loop. Don't do it now; that could get recusive. */
+ if (evbuffer_get_length(bufferevent_get_input(evcon->bufev))) {
+ event_deferred_cb_schedule_(get_deferred_queue(evcon),
+ &evcon->read_more_deferred_cb);
+ }
+}
+
+static void
+evhttp_send_done(struct evhttp_connection *evcon, void *arg)
+{
+ int need_close;
+ struct evhttp_request *req = TAILQ_FIRST(&evcon->requests);
+ TAILQ_REMOVE(&evcon->requests, req, next);
+
+ if (req->on_complete_cb != NULL) {
+ req->on_complete_cb(req, req->on_complete_cb_arg);
+ }
+
+ need_close =
+ (REQ_VERSION_BEFORE(req, 1, 1) &&
+ !evhttp_is_connection_keepalive(req->input_headers)) ||
+ evhttp_is_request_connection_close(req);
+
+ EVUTIL_ASSERT(req->flags & EVHTTP_REQ_OWN_CONNECTION);
+ evhttp_request_free(req);
+
+ if (need_close) {
+ evhttp_connection_free(evcon);
+ return;
+ }
+
+ /* we have a persistent connection; try to accept another request. */
+ if (evhttp_associate_new_request_with_connection(evcon) == -1) {
+ evhttp_connection_free(evcon);
+ }
+}
+
+/*
+ * Returns an error page.
+ */
+
+void
+evhttp_send_error(struct evhttp_request *req, int error, const char *reason)
+{
+
+#define ERR_FORMAT "<HTML><HEAD>\n" \
+ "<TITLE>%d %s</TITLE>\n" \
+ "</HEAD><BODY>\n" \
+ "<H1>%s</H1>\n" \
+ "</BODY></HTML>\n"
+
+ struct evbuffer *buf = evbuffer_new();
+ if (buf == NULL) {
+ /* if we cannot allocate memory; we just drop the connection */
+ evhttp_connection_free(req->evcon);
+ return;
+ }
+ if (reason == NULL) {
+ reason = evhttp_response_phrase_internal(error);
+ }
+
+ evhttp_response_code_(req, error, reason);
+
+ evbuffer_add_printf(buf, ERR_FORMAT, error, reason, reason);
+
+ evhttp_send_page_(req, buf);
+
+ evbuffer_free(buf);
+#undef ERR_FORMAT
+}
+
+/* Requires that headers and response code are already set up */
+
+static inline void
+evhttp_send(struct evhttp_request *req, struct evbuffer *databuf)
+{
+ struct evhttp_connection *evcon = req->evcon;
+
+ if (evcon == NULL) {
+ evhttp_request_free(req);
+ return;
+ }
+
+ EVUTIL_ASSERT(TAILQ_FIRST(&evcon->requests) == req);
+
+ /* we expect no more calls form the user on this request */
+ req->userdone = 1;
+
+ /* xxx: not sure if we really should expose the data buffer this way */
+ if (databuf != NULL)
+ evbuffer_add_buffer(req->output_buffer, databuf);
+
+ /* Adds headers to the response */
+ evhttp_make_header(evcon, req);
+
+ evhttp_write_buffer(evcon, evhttp_send_done, NULL);
+}
+
+void
+evhttp_send_reply(struct evhttp_request *req, int code, const char *reason,
+ struct evbuffer *databuf)
+{
+ evhttp_response_code_(req, code, reason);
+
+ evhttp_send(req, databuf);
+}
+
+void
+evhttp_send_reply_start(struct evhttp_request *req, int code,
+ const char *reason)
+{
+ evhttp_response_code_(req, code, reason);
+ if (evhttp_find_header(req->output_headers, "Content-Length") == NULL &&
+ REQ_VERSION_ATLEAST(req, 1, 1) &&
+ evhttp_response_needs_body(req)) {
+ /*
+ * prefer HTTP/1.1 chunked encoding to closing the connection;
+ * note RFC 2616 section 4.4 forbids it with Content-Length:
+ * and it's not necessary then anyway.
+ */
+ evhttp_add_header(req->output_headers, "Transfer-Encoding",
+ "chunked");
+ req->chunked = 1;
+ } else {
+ req->chunked = 0;
+ }
+ evhttp_make_header(req->evcon, req);
+ evhttp_write_buffer(req->evcon, NULL, NULL);
+}
+
+void
+evhttp_send_reply_chunk_with_cb(struct evhttp_request *req, struct evbuffer *databuf,
+ void (*cb)(struct evhttp_connection *, void *), void *arg)
+{
+ struct evhttp_connection *evcon = req->evcon;
+ struct evbuffer *output;
+
+ if (evcon == NULL)
+ return;
+
+ output = bufferevent_get_output(evcon->bufev);
+
+ if (evbuffer_get_length(databuf) == 0)
+ return;
+ if (!evhttp_response_needs_body(req))
+ return;
+ if (req->chunked) {
+ evbuffer_add_printf(output, "%x\r\n",
+ (unsigned)evbuffer_get_length(databuf));
+ }
+ evbuffer_add_buffer(output, databuf);
+ if (req->chunked) {
+ evbuffer_add(output, "\r\n", 2);
+ }
+ evhttp_write_buffer(evcon, cb, arg);
+}
+
+void
+evhttp_send_reply_chunk(struct evhttp_request *req, struct evbuffer *databuf)
+{
+ evhttp_send_reply_chunk_with_cb(req, databuf, NULL, NULL);
+}
+void
+evhttp_send_reply_end(struct evhttp_request *req)
+{
+ struct evhttp_connection *evcon = req->evcon;
+ struct evbuffer *output;
+
+ if (evcon == NULL) {
+ evhttp_request_free(req);
+ return;
+ }
+
+ output = bufferevent_get_output(evcon->bufev);
+
+ /* we expect no more calls form the user on this request */
+ req->userdone = 1;
+
+ if (req->chunked) {
+ evbuffer_add(output, "0\r\n\r\n", 5);
+ evhttp_write_buffer(req->evcon, evhttp_send_done, NULL);
+ req->chunked = 0;
+ } else if (evbuffer_get_length(output) == 0) {
+ /* let the connection know that we are done with the request */
+ evhttp_send_done(evcon, NULL);
+ } else {
+ /* make the callback execute after all data has been written */
+ evcon->cb = evhttp_send_done;
+ evcon->cb_arg = NULL;
+ }
+}
+
+static const char *informational_phrases[] = {
+ /* 100 */ "Continue",
+ /* 101 */ "Switching Protocols"
+};
+
+static const char *success_phrases[] = {
+ /* 200 */ "OK",
+ /* 201 */ "Created",
+ /* 202 */ "Accepted",
+ /* 203 */ "Non-Authoritative Information",
+ /* 204 */ "No Content",
+ /* 205 */ "Reset Content",
+ /* 206 */ "Partial Content"
+};
+
+static const char *redirection_phrases[] = {
+ /* 300 */ "Multiple Choices",
+ /* 301 */ "Moved Permanently",
+ /* 302 */ "Found",
+ /* 303 */ "See Other",
+ /* 304 */ "Not Modified",
+ /* 305 */ "Use Proxy",
+ /* 307 */ "Temporary Redirect"
+};
+
+static const char *client_error_phrases[] = {
+ /* 400 */ "Bad Request",
+ /* 401 */ "Unauthorized",
+ /* 402 */ "Payment Required",
+ /* 403 */ "Forbidden",
+ /* 404 */ "Not Found",
+ /* 405 */ "Method Not Allowed",
+ /* 406 */ "Not Acceptable",
+ /* 407 */ "Proxy Authentication Required",
+ /* 408 */ "Request Time-out",
+ /* 409 */ "Conflict",
+ /* 410 */ "Gone",
+ /* 411 */ "Length Required",
+ /* 412 */ "Precondition Failed",
+ /* 413 */ "Request Entity Too Large",
+ /* 414 */ "Request-URI Too Large",
+ /* 415 */ "Unsupported Media Type",
+ /* 416 */ "Requested range not satisfiable",
+ /* 417 */ "Expectation Failed"
+};
+
+static const char *server_error_phrases[] = {
+ /* 500 */ "Internal Server Error",
+ /* 501 */ "Not Implemented",
+ /* 502 */ "Bad Gateway",
+ /* 503 */ "Service Unavailable",
+ /* 504 */ "Gateway Time-out",
+ /* 505 */ "HTTP Version not supported"
+};
+
+struct response_class {
+ const char *name;
+ size_t num_responses;
+ const char **responses;
+};
+
+#ifndef MEMBERSOF
+#define MEMBERSOF(x) (sizeof(x)/sizeof(x[0]))
+#endif
+
+static const struct response_class response_classes[] = {
+ /* 1xx */ { "Informational", MEMBERSOF(informational_phrases), informational_phrases },
+ /* 2xx */ { "Success", MEMBERSOF(success_phrases), success_phrases },
+ /* 3xx */ { "Redirection", MEMBERSOF(redirection_phrases), redirection_phrases },
+ /* 4xx */ { "Client Error", MEMBERSOF(client_error_phrases), client_error_phrases },
+ /* 5xx */ { "Server Error", MEMBERSOF(server_error_phrases), server_error_phrases }
+};
+
+static const char *
+evhttp_response_phrase_internal(int code)
+{
+ int klass = code / 100 - 1;
+ int subcode = code % 100;
+
+ /* Unknown class - can't do any better here */
+ if (klass < 0 || klass >= (int) MEMBERSOF(response_classes))
+ return "Unknown Status Class";
+
+ /* Unknown sub-code, return class name at least */
+ if (subcode >= (int) response_classes[klass].num_responses)
+ return response_classes[klass].name;
+
+ return response_classes[klass].responses[subcode];
+}
+
+void
+evhttp_response_code_(struct evhttp_request *req, int code, const char *reason)
+{
+ req->kind = EVHTTP_RESPONSE;
+ req->response_code = code;
+ if (req->response_code_line != NULL)
+ mm_free(req->response_code_line);
+ if (reason == NULL)
+ reason = evhttp_response_phrase_internal(code);
+ req->response_code_line = mm_strdup(reason);
+ if (req->response_code_line == NULL) {
+ event_warn("%s: strdup", __func__);
+ /* XXX what else can we do? */
+ }
+}
+
+void
+evhttp_send_page_(struct evhttp_request *req, struct evbuffer *databuf)
+{
+ if (!req->major || !req->minor) {
+ req->major = 1;
+ req->minor = 1;
+ }
+
+ if (req->kind != EVHTTP_RESPONSE)
+ evhttp_response_code_(req, 200, "OK");
+
+ evhttp_clear_headers(req->output_headers);
+ evhttp_add_header(req->output_headers, "Content-Type", "text/html");
+ evhttp_add_header(req->output_headers, "Connection", "close");
+
+ evhttp_send(req, databuf);
+}
+
+static const char uri_chars[256] = {
+ /* 0 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
+ /* 64 */
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0,
+ /* 128 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 192 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+#define CHAR_IS_UNRESERVED(c) \
+ (uri_chars[(unsigned char)(c)])
+
+/*
+ * Helper functions to encode/decode a string for inclusion in a URI.
+ * The returned string must be freed by the caller.
+ */
+char *
+evhttp_uriencode(const char *uri, ev_ssize_t len, int space_as_plus)
+{
+ struct evbuffer *buf = evbuffer_new();
+ const char *p, *end;
+ char *result;
+
+ if (buf == NULL)
+ return (NULL);
+
+ if (len >= 0)
+ end = uri+len;
+ else
+ end = uri+strlen(uri);
+
+ for (p = uri; p < end; p++) {
+ if (CHAR_IS_UNRESERVED(*p)) {
+ evbuffer_add(buf, p, 1);
+ } else if (*p == ' ' && space_as_plus) {
+ evbuffer_add(buf, "+", 1);
+ } else {
+ evbuffer_add_printf(buf, "%%%02X", (unsigned char)(*p));
+ }
+ }
+ evbuffer_add(buf, "", 1); /* NUL-terminator. */
+ result = mm_malloc(evbuffer_get_length(buf));
+ if (result)
+ evbuffer_remove(buf, result, evbuffer_get_length(buf));
+ evbuffer_free(buf);
+
+ return (result);
+}
+
+char *
+evhttp_encode_uri(const char *str)
+{
+ return evhttp_uriencode(str, -1, 0);
+}
+
+/*
+ * @param decode_plus_ctl: if 1, we decode plus into space. If 0, we don't.
+ * If -1, when true we transform plus to space only after we've seen
+ * a ?. -1 is deprecated.
+ * @return the number of bytes written to 'ret'.
+ */
+int
+evhttp_decode_uri_internal(
+ const char *uri, size_t length, char *ret, int decode_plus_ctl)
+{
+ char c;
+ int j;
+ int decode_plus = (decode_plus_ctl == 1) ? 1: 0;
+ unsigned i;
+
+ for (i = j = 0; i < length; i++) {
+ c = uri[i];
+ if (c == '?') {
+ if (decode_plus_ctl < 0)
+ decode_plus = 1;
+ } else if (c == '+' && decode_plus) {
+ c = ' ';
+ } else if ((i + 2) < length && c == '%' &&
+ EVUTIL_ISXDIGIT_(uri[i+1]) && EVUTIL_ISXDIGIT_(uri[i+2])) {
+ char tmp[3];
+ tmp[0] = uri[i+1];
+ tmp[1] = uri[i+2];
+ tmp[2] = '\0';
+ c = (char)strtol(tmp, NULL, 16);
+ i += 2;
+ }
+ ret[j++] = c;
+ }
+ ret[j] = '\0';
+
+ return (j);
+}
+
+/* deprecated */
+char *
+evhttp_decode_uri(const char *uri)
+{
+ char *ret;
+
+ if ((ret = mm_malloc(strlen(uri) + 1)) == NULL) {
+ event_warn("%s: malloc(%lu)", __func__,
+ (unsigned long)(strlen(uri) + 1));
+ return (NULL);
+ }
+
+ evhttp_decode_uri_internal(uri, strlen(uri),
+ ret, -1 /*always_decode_plus*/);
+
+ return (ret);
+}
+
+char *
+evhttp_uridecode(const char *uri, int decode_plus, size_t *size_out)
+{
+ char *ret;
+ int n;
+
+ if ((ret = mm_malloc(strlen(uri) + 1)) == NULL) {
+ event_warn("%s: malloc(%lu)", __func__,
+ (unsigned long)(strlen(uri) + 1));
+ return (NULL);
+ }
+
+ n = evhttp_decode_uri_internal(uri, strlen(uri),
+ ret, !!decode_plus/*always_decode_plus*/);
+
+ if (size_out) {
+ EVUTIL_ASSERT(n >= 0);
+ *size_out = (size_t)n;
+ }
+
+ return (ret);
+}
+
+/*
+ * Helper function to parse out arguments in a query.
+ * The arguments are separated by key and value.
+ */
+
+static int
+evhttp_parse_query_impl(const char *str, struct evkeyvalq *headers,
+ int is_whole_uri)
+{
+ char *line=NULL;
+ char *argument;
+ char *p;
+ const char *query_part;
+ int result = -1;
+ struct evhttp_uri *uri=NULL;
+
+ TAILQ_INIT(headers);
+
+ if (is_whole_uri) {
+ uri = evhttp_uri_parse(str);
+ if (!uri)
+ goto error;
+ query_part = evhttp_uri_get_query(uri);
+ } else {
+ query_part = str;
+ }
+
+ /* No arguments - we are done */
+ if (!query_part || !strlen(query_part)) {
+ result = 0;
+ goto done;
+ }
+
+ if ((line = mm_strdup(query_part)) == NULL) {
+ event_warn("%s: strdup", __func__);
+ goto error;
+ }
+
+ p = argument = line;
+ while (p != NULL && *p != '\0') {
+ char *key, *value, *decoded_value;
+ argument = strsep(&p, "&");
+
+ value = argument;
+ key = strsep(&value, "=");
+ if (value == NULL || *key == '\0') {
+ goto error;
+ }
+
+ if ((decoded_value = mm_malloc(strlen(value) + 1)) == NULL) {
+ event_warn("%s: mm_malloc", __func__);
+ goto error;
+ }
+ evhttp_decode_uri_internal(value, strlen(value),
+ decoded_value, 1 /*always_decode_plus*/);
+ event_debug(("Query Param: %s -> %s\n", key, decoded_value));
+ evhttp_add_header_internal(headers, key, decoded_value);
+ mm_free(decoded_value);
+ }
+
+ result = 0;
+ goto done;
+error:
+ evhttp_clear_headers(headers);
+done:
+ if (line)
+ mm_free(line);
+ if (uri)
+ evhttp_uri_free(uri);
+ return result;
+}
+
+int
+evhttp_parse_query(const char *uri, struct evkeyvalq *headers)
+{
+ return evhttp_parse_query_impl(uri, headers, 1);
+}
+int
+evhttp_parse_query_str(const char *uri, struct evkeyvalq *headers)
+{
+ return evhttp_parse_query_impl(uri, headers, 0);
+}
+
+static struct evhttp_cb *
+evhttp_dispatch_callback(struct httpcbq *callbacks, struct evhttp_request *req)
+{
+ struct evhttp_cb *cb;
+ size_t offset = 0;
+ char *translated;
+ const char *path;
+
+ /* Test for different URLs */
+ path = evhttp_uri_get_path(req->uri_elems);
+ offset = strlen(path);
+ if ((translated = mm_malloc(offset + 1)) == NULL)
+ return (NULL);
+ evhttp_decode_uri_internal(path, offset, translated,
+ 0 /* decode_plus */);
+
+ TAILQ_FOREACH(cb, callbacks, next) {
+ if (!strcmp(cb->what, translated)) {
+ mm_free(translated);
+ return (cb);
+ }
+ }
+
+ mm_free(translated);
+ return (NULL);
+}
+
+
+static int
+prefix_suffix_match(const char *pattern, const char *name, int ignorecase)
+{
+ char c;
+
+ while (1) {
+ switch (c = *pattern++) {
+ case '\0':
+ return *name == '\0';
+
+ case '*':
+ while (*name != '\0') {
+ if (prefix_suffix_match(pattern, name,
+ ignorecase))
+ return (1);
+ ++name;
+ }
+ return (0);
+ default:
+ if (c != *name) {
+ if (!ignorecase ||
+ EVUTIL_TOLOWER_(c) != EVUTIL_TOLOWER_(*name))
+ return (0);
+ }
+ ++name;
+ }
+ }
+ /* NOTREACHED */
+}
+
+/*
+ Search the vhost hierarchy beginning with http for a server alias
+ matching hostname. If a match is found, and outhttp is non-null,
+ outhttp is set to the matching http object and 1 is returned.
+*/
+
+static int
+evhttp_find_alias(struct evhttp *http, struct evhttp **outhttp,
+ const char *hostname)
+{
+ struct evhttp_server_alias *alias;
+ struct evhttp *vhost;
+
+ TAILQ_FOREACH(alias, &http->aliases, next) {
+ /* XXX Do we need to handle IP addresses? */
+ if (!evutil_ascii_strcasecmp(alias->alias, hostname)) {
+ if (outhttp)
+ *outhttp = http;
+ return 1;
+ }
+ }
+
+ /* XXX It might be good to avoid recursion here, but I don't
+ see a way to do that w/o a list. */
+ TAILQ_FOREACH(vhost, &http->virtualhosts, next_vhost) {
+ if (evhttp_find_alias(vhost, outhttp, hostname))
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ Attempts to find the best http object to handle a request for a hostname.
+ All aliases for the root http object and vhosts are searched for an exact
+ match. Then, the vhost hierarchy is traversed again for a matching
+ pattern.
+
+ If an alias or vhost is matched, 1 is returned, and outhttp, if non-null,
+ is set with the best matching http object. If there are no matches, the
+ root http object is stored in outhttp and 0 is returned.
+*/
+
+static int
+evhttp_find_vhost(struct evhttp *http, struct evhttp **outhttp,
+ const char *hostname)
+{
+ struct evhttp *vhost;
+ struct evhttp *oldhttp;
+ int match_found = 0;
+
+ if (evhttp_find_alias(http, outhttp, hostname))
+ return 1;
+
+ do {
+ oldhttp = http;
+ TAILQ_FOREACH(vhost, &http->virtualhosts, next_vhost) {
+ if (prefix_suffix_match(vhost->vhost_pattern,
+ hostname, 1 /* ignorecase */)) {
+ http = vhost;
+ match_found = 1;
+ break;
+ }
+ }
+ } while (oldhttp != http);
+
+ if (outhttp)
+ *outhttp = http;
+
+ return match_found;
+}
+
+static void
+evhttp_handle_request(struct evhttp_request *req, void *arg)
+{
+ struct evhttp *http = arg;
+ struct evhttp_cb *cb = NULL;
+ const char *hostname;
+
+ /* we have a new request on which the user needs to take action */
+ req->userdone = 0;
+
+ if (req->type == 0 || req->uri == NULL) {
+ evhttp_send_error(req, HTTP_BADREQUEST, NULL);
+ return;
+ }
+
+ if ((http->allowed_methods & req->type) == 0) {
+ event_debug(("Rejecting disallowed method %x (allowed: %x)\n",
+ (unsigned)req->type, (unsigned)http->allowed_methods));
+ evhttp_send_error(req, HTTP_NOTIMPLEMENTED, NULL);
+ return;
+ }
+
+ /* handle potential virtual hosts */
+ hostname = evhttp_request_get_host(req);
+ if (hostname != NULL) {
+ evhttp_find_vhost(http, &http, hostname);
+ }
+
+ if ((cb = evhttp_dispatch_callback(&http->callbacks, req)) != NULL) {
+ (*cb->cb)(req, cb->cbarg);
+ return;
+ }
+
+ /* Generic call back */
+ if (http->gencb) {
+ (*http->gencb)(req, http->gencbarg);
+ return;
+ } else {
+ /* We need to send a 404 here */
+#define ERR_FORMAT "<html><head>" \
+ "<title>404 Not Found</title>" \
+ "</head><body>" \
+ "<h1>Not Found</h1>" \
+ "<p>The requested URL %s was not found on this server.</p>"\
+ "</body></html>\n"
+
+ char *escaped_html;
+ struct evbuffer *buf;
+
+ if ((escaped_html = evhttp_htmlescape(req->uri)) == NULL) {
+ evhttp_connection_free(req->evcon);
+ return;
+ }
+
+ if ((buf = evbuffer_new()) == NULL) {
+ mm_free(escaped_html);
+ evhttp_connection_free(req->evcon);
+ return;
+ }
+
+ evhttp_response_code_(req, HTTP_NOTFOUND, "Not Found");
+
+ evbuffer_add_printf(buf, ERR_FORMAT, escaped_html);
+
+ mm_free(escaped_html);
+
+ evhttp_send_page_(req, buf);
+
+ evbuffer_free(buf);
+#undef ERR_FORMAT
+ }
+}
+
+/* Listener callback when a connection arrives at a server. */
+static void
+accept_socket_cb(struct evconnlistener *listener, evutil_socket_t nfd, struct sockaddr *peer_sa, int peer_socklen, void *arg)
+{
+ struct evhttp *http = arg;
+
+ evhttp_get_request(http, nfd, peer_sa, peer_socklen);
+}
+
+int
+evhttp_bind_socket(struct evhttp *http, const char *address, ev_uint16_t port)
+{
+ struct evhttp_bound_socket *bound =
+ evhttp_bind_socket_with_handle(http, address, port);
+ if (bound == NULL)
+ return (-1);
+ return (0);
+}
+
+struct evhttp_bound_socket *
+evhttp_bind_socket_with_handle(struct evhttp *http, const char *address, ev_uint16_t port)
+{
+ evutil_socket_t fd;
+ struct evhttp_bound_socket *bound;
+
+ if ((fd = bind_socket(address, port, 1 /*reuse*/)) == -1)
+ return (NULL);
+
+ if (listen(fd, 128) == -1) {
+ event_sock_warn(fd, "%s: listen", __func__);
+ evutil_closesocket(fd);
+ return (NULL);
+ }
+
+ bound = evhttp_accept_socket_with_handle(http, fd);
+
+ if (bound != NULL) {
+ event_debug(("Bound to port %d - Awaiting connections ... ",
+ port));
+ return (bound);
+ }
+
+ return (NULL);
+}
+
+int
+evhttp_accept_socket(struct evhttp *http, evutil_socket_t fd)
+{
+ struct evhttp_bound_socket *bound =
+ evhttp_accept_socket_with_handle(http, fd);
+ if (bound == NULL)
+ return (-1);
+ return (0);
+}
+
+void
+evhttp_foreach_bound_socket(struct evhttp *http,
+ evhttp_bound_socket_foreach_fn *function,
+ void *argument)
+{
+ struct evhttp_bound_socket *bound;
+
+ TAILQ_FOREACH(bound, &http->sockets, next)
+ function(bound, argument);
+}
+
+struct evhttp_bound_socket *
+evhttp_accept_socket_with_handle(struct evhttp *http, evutil_socket_t fd)
+{
+ struct evhttp_bound_socket *bound;
+ struct evconnlistener *listener;
+ const int flags =
+ LEV_OPT_REUSEABLE|LEV_OPT_CLOSE_ON_EXEC|LEV_OPT_CLOSE_ON_FREE;
+
+ listener = evconnlistener_new(http->base, NULL, NULL,
+ flags,
+ 0, /* Backlog is '0' because we already said 'listen' */
+ fd);
+ if (!listener)
+ return (NULL);
+
+ bound = evhttp_bind_listener(http, listener);
+ if (!bound) {
+ evconnlistener_free(listener);
+ return (NULL);
+ }
+ return (bound);
+}
+
+struct evhttp_bound_socket *
+evhttp_bind_listener(struct evhttp *http, struct evconnlistener *listener)
+{
+ struct evhttp_bound_socket *bound;
+
+ bound = mm_malloc(sizeof(struct evhttp_bound_socket));
+ if (bound == NULL)
+ return (NULL);
+
+ bound->listener = listener;
+ TAILQ_INSERT_TAIL(&http->sockets, bound, next);
+
+ evconnlistener_set_cb(listener, accept_socket_cb, http);
+ return bound;
+}
+
+evutil_socket_t
+evhttp_bound_socket_get_fd(struct evhttp_bound_socket *bound)
+{
+ return evconnlistener_get_fd(bound->listener);
+}
+
+struct evconnlistener *
+evhttp_bound_socket_get_listener(struct evhttp_bound_socket *bound)
+{
+ return bound->listener;
+}
+
+void
+evhttp_del_accept_socket(struct evhttp *http, struct evhttp_bound_socket *bound)
+{
+ TAILQ_REMOVE(&http->sockets, bound, next);
+ evconnlistener_free(bound->listener);
+ mm_free(bound);
+}
+
+static struct evhttp*
+evhttp_new_object(void)
+{
+ struct evhttp *http = NULL;
+
+ if ((http = mm_calloc(1, sizeof(struct evhttp))) == NULL) {
+ event_warn("%s: calloc", __func__);
+ return (NULL);
+ }
+
+ evutil_timerclear(&http->timeout);
+ evhttp_set_max_headers_size(http, EV_SIZE_MAX);
+ evhttp_set_max_body_size(http, EV_SIZE_MAX);
+ evhttp_set_default_content_type(http, "text/html; charset=ISO-8859-1");
+ evhttp_set_allowed_methods(http,
+ EVHTTP_REQ_GET |
+ EVHTTP_REQ_POST |
+ EVHTTP_REQ_HEAD |
+ EVHTTP_REQ_PUT |
+ EVHTTP_REQ_DELETE);
+
+ TAILQ_INIT(&http->sockets);
+ TAILQ_INIT(&http->callbacks);
+ TAILQ_INIT(&http->connections);
+ TAILQ_INIT(&http->virtualhosts);
+ TAILQ_INIT(&http->aliases);
+
+ return (http);
+}
+
+struct evhttp *
+evhttp_new(struct event_base *base)
+{
+ struct evhttp *http = NULL;
+
+ http = evhttp_new_object();
+ if (http == NULL)
+ return (NULL);
+ http->base = base;
+
+ return (http);
+}
+
+/*
+ * Start a web server on the specified address and port.
+ */
+
+struct evhttp *
+evhttp_start(const char *address, unsigned short port)
+{
+ struct evhttp *http = NULL;
+
+ http = evhttp_new_object();
+ if (http == NULL)
+ return (NULL);
+ if (evhttp_bind_socket(http, address, port) == -1) {
+ mm_free(http);
+ return (NULL);
+ }
+
+ return (http);
+}
+
+void
+evhttp_free(struct evhttp* http)
+{
+ struct evhttp_cb *http_cb;
+ struct evhttp_connection *evcon;
+ struct evhttp_bound_socket *bound;
+ struct evhttp* vhost;
+ struct evhttp_server_alias *alias;
+
+ /* Remove the accepting part */
+ while ((bound = TAILQ_FIRST(&http->sockets)) != NULL) {
+ TAILQ_REMOVE(&http->sockets, bound, next);
+
+ evconnlistener_free(bound->listener);
+
+ mm_free(bound);
+ }
+
+ while ((evcon = TAILQ_FIRST(&http->connections)) != NULL) {
+ /* evhttp_connection_free removes the connection */
+ evhttp_connection_free(evcon);
+ }
+
+ while ((http_cb = TAILQ_FIRST(&http->callbacks)) != NULL) {
+ TAILQ_REMOVE(&http->callbacks, http_cb, next);
+ mm_free(http_cb->what);
+ mm_free(http_cb);
+ }
+
+ while ((vhost = TAILQ_FIRST(&http->virtualhosts)) != NULL) {
+ TAILQ_REMOVE(&http->virtualhosts, vhost, next_vhost);
+
+ evhttp_free(vhost);
+ }
+
+ if (http->vhost_pattern != NULL)
+ mm_free(http->vhost_pattern);
+
+ while ((alias = TAILQ_FIRST(&http->aliases)) != NULL) {
+ TAILQ_REMOVE(&http->aliases, alias, next);
+ mm_free(alias->alias);
+ mm_free(alias);
+ }
+
+ mm_free(http);
+}
+
+int
+evhttp_add_virtual_host(struct evhttp* http, const char *pattern,
+ struct evhttp* vhost)
+{
+ /* a vhost can only be a vhost once and should not have bound sockets */
+ if (vhost->vhost_pattern != NULL ||
+ TAILQ_FIRST(&vhost->sockets) != NULL)
+ return (-1);
+
+ vhost->vhost_pattern = mm_strdup(pattern);
+ if (vhost->vhost_pattern == NULL)
+ return (-1);
+
+ TAILQ_INSERT_TAIL(&http->virtualhosts, vhost, next_vhost);
+
+ return (0);
+}
+
+int
+evhttp_remove_virtual_host(struct evhttp* http, struct evhttp* vhost)
+{
+ if (vhost->vhost_pattern == NULL)
+ return (-1);
+
+ TAILQ_REMOVE(&http->virtualhosts, vhost, next_vhost);
+
+ mm_free(vhost->vhost_pattern);
+ vhost->vhost_pattern = NULL;
+
+ return (0);
+}
+
+int
+evhttp_add_server_alias(struct evhttp *http, const char *alias)
+{
+ struct evhttp_server_alias *evalias;
+
+ evalias = mm_calloc(1, sizeof(*evalias));
+ if (!evalias)
+ return -1;
+
+ evalias->alias = mm_strdup(alias);
+ if (!evalias->alias) {
+ mm_free(evalias);
+ return -1;
+ }
+
+ TAILQ_INSERT_TAIL(&http->aliases, evalias, next);
+
+ return 0;
+}
+
+int
+evhttp_remove_server_alias(struct evhttp *http, const char *alias)
+{
+ struct evhttp_server_alias *evalias;
+
+ TAILQ_FOREACH(evalias, &http->aliases, next) {
+ if (evutil_ascii_strcasecmp(evalias->alias, alias) == 0) {
+ TAILQ_REMOVE(&http->aliases, evalias, next);
+ mm_free(evalias->alias);
+ mm_free(evalias);
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+void
+evhttp_set_timeout(struct evhttp* http, int timeout_in_secs)
+{
+ if (timeout_in_secs == -1) {
+ evhttp_set_timeout_tv(http, NULL);
+ } else {
+ struct timeval tv;
+ tv.tv_sec = timeout_in_secs;
+ tv.tv_usec = 0;
+ evhttp_set_timeout_tv(http, &tv);
+ }
+}
+
+void
+evhttp_set_timeout_tv(struct evhttp* http, const struct timeval* tv)
+{
+ if (tv) {
+ http->timeout = *tv;
+ } else {
+ evutil_timerclear(&http->timeout);
+ }
+}
+
+void
+evhttp_set_max_headers_size(struct evhttp* http, ev_ssize_t max_headers_size)
+{
+ if (max_headers_size < 0)
+ http->default_max_headers_size = EV_SIZE_MAX;
+ else
+ http->default_max_headers_size = max_headers_size;
+}
+
+void
+evhttp_set_max_body_size(struct evhttp* http, ev_ssize_t max_body_size)
+{
+ if (max_body_size < 0)
+ http->default_max_body_size = EV_UINT64_MAX;
+ else
+ http->default_max_body_size = max_body_size;
+}
+
+void
+evhttp_set_default_content_type(struct evhttp *http,
+ const char *content_type) {
+ http->default_content_type = content_type;
+}
+
+void
+evhttp_set_allowed_methods(struct evhttp* http, ev_uint16_t methods)
+{
+ http->allowed_methods = methods;
+}
+
+int
+evhttp_set_cb(struct evhttp *http, const char *uri,
+ void (*cb)(struct evhttp_request *, void *), void *cbarg)
+{
+ struct evhttp_cb *http_cb;
+
+ TAILQ_FOREACH(http_cb, &http->callbacks, next) {
+ if (strcmp(http_cb->what, uri) == 0)
+ return (-1);
+ }
+
+ if ((http_cb = mm_calloc(1, sizeof(struct evhttp_cb))) == NULL) {
+ event_warn("%s: calloc", __func__);
+ return (-2);
+ }
+
+ http_cb->what = mm_strdup(uri);
+ if (http_cb->what == NULL) {
+ event_warn("%s: strdup", __func__);
+ mm_free(http_cb);
+ return (-3);
+ }
+ http_cb->cb = cb;
+ http_cb->cbarg = cbarg;
+
+ TAILQ_INSERT_TAIL(&http->callbacks, http_cb, next);
+
+ return (0);
+}
+
+int
+evhttp_del_cb(struct evhttp *http, const char *uri)
+{
+ struct evhttp_cb *http_cb;
+
+ TAILQ_FOREACH(http_cb, &http->callbacks, next) {
+ if (strcmp(http_cb->what, uri) == 0)
+ break;
+ }
+ if (http_cb == NULL)
+ return (-1);
+
+ TAILQ_REMOVE(&http->callbacks, http_cb, next);
+ mm_free(http_cb->what);
+ mm_free(http_cb);
+
+ return (0);
+}
+
+void
+evhttp_set_gencb(struct evhttp *http,
+ void (*cb)(struct evhttp_request *, void *), void *cbarg)
+{
+ http->gencb = cb;
+ http->gencbarg = cbarg;
+}
+
+void
+evhttp_set_bevcb(struct evhttp *http,
+ struct bufferevent* (*cb)(struct event_base *, void *), void *cbarg)
+{
+ http->bevcb = cb;
+ http->bevcbarg = cbarg;
+}
+
+/*
+ * Request related functions
+ */
+
+struct evhttp_request *
+evhttp_request_new(void (*cb)(struct evhttp_request *, void *), void *arg)
+{
+ struct evhttp_request *req = NULL;
+
+ /* Allocate request structure */
+ if ((req = mm_calloc(1, sizeof(struct evhttp_request))) == NULL) {
+ event_warn("%s: calloc", __func__);
+ goto error;
+ }
+
+ req->headers_size = 0;
+ req->body_size = 0;
+
+ req->kind = EVHTTP_RESPONSE;
+ req->input_headers = mm_calloc(1, sizeof(struct evkeyvalq));
+ if (req->input_headers == NULL) {
+ event_warn("%s: calloc", __func__);
+ goto error;
+ }
+ TAILQ_INIT(req->input_headers);
+
+ req->output_headers = mm_calloc(1, sizeof(struct evkeyvalq));
+ if (req->output_headers == NULL) {
+ event_warn("%s: calloc", __func__);
+ goto error;
+ }
+ TAILQ_INIT(req->output_headers);
+
+ if ((req->input_buffer = evbuffer_new()) == NULL) {
+ event_warn("%s: evbuffer_new", __func__);
+ goto error;
+ }
+
+ if ((req->output_buffer = evbuffer_new()) == NULL) {
+ event_warn("%s: evbuffer_new", __func__);
+ goto error;
+ }
+
+ req->cb = cb;
+ req->cb_arg = arg;
+
+ return (req);
+
+ error:
+ if (req != NULL)
+ evhttp_request_free(req);
+ return (NULL);
+}
+
+void
+evhttp_request_free(struct evhttp_request *req)
+{
+ if ((req->flags & EVHTTP_REQ_DEFER_FREE) != 0) {
+ req->flags |= EVHTTP_REQ_NEEDS_FREE;
+ return;
+ }
+
+ if (req->remote_host != NULL)
+ mm_free(req->remote_host);
+ if (req->uri != NULL)
+ mm_free(req->uri);
+ if (req->uri_elems != NULL)
+ evhttp_uri_free(req->uri_elems);
+ if (req->response_code_line != NULL)
+ mm_free(req->response_code_line);
+ if (req->host_cache != NULL)
+ mm_free(req->host_cache);
+
+ evhttp_clear_headers(req->input_headers);
+ mm_free(req->input_headers);
+
+ evhttp_clear_headers(req->output_headers);
+ mm_free(req->output_headers);
+
+ if (req->input_buffer != NULL)
+ evbuffer_free(req->input_buffer);
+
+ if (req->output_buffer != NULL)
+ evbuffer_free(req->output_buffer);
+
+ mm_free(req);
+}
+
+void
+evhttp_request_own(struct evhttp_request *req)
+{
+ req->flags |= EVHTTP_USER_OWNED;
+}
+
+int
+evhttp_request_is_owned(struct evhttp_request *req)
+{
+ return (req->flags & EVHTTP_USER_OWNED) != 0;
+}
+
+struct evhttp_connection *
+evhttp_request_get_connection(struct evhttp_request *req)
+{
+ return req->evcon;
+}
+
+struct event_base *
+evhttp_connection_get_base(struct evhttp_connection *conn)
+{
+ return conn->base;
+}
+
+void
+evhttp_request_set_chunked_cb(struct evhttp_request *req,
+ void (*cb)(struct evhttp_request *, void *))
+{
+ req->chunk_cb = cb;
+}
+
+void
+evhttp_request_set_header_cb(struct evhttp_request *req,
+ int (*cb)(struct evhttp_request *, void *))
+{
+ req->header_cb = cb;
+}
+
+void
+evhttp_request_set_error_cb(struct evhttp_request *req,
+ void (*cb)(enum evhttp_request_error, void *))
+{
+ req->error_cb = cb;
+}
+
+void
+evhttp_request_set_on_complete_cb(struct evhttp_request *req,
+ void (*cb)(struct evhttp_request *, void *), void *cb_arg)
+{
+ req->on_complete_cb = cb;
+ req->on_complete_cb_arg = cb_arg;
+}
+
+/*
+ * Allows for inspection of the request URI
+ */
+
+const char *
+evhttp_request_get_uri(const struct evhttp_request *req) {
+ if (req->uri == NULL)
+ event_debug(("%s: request %p has no uri\n", __func__, req));
+ return (req->uri);
+}
+
+const struct evhttp_uri *
+evhttp_request_get_evhttp_uri(const struct evhttp_request *req) {
+ if (req->uri_elems == NULL)
+ event_debug(("%s: request %p has no uri elems\n",
+ __func__, req));
+ return (req->uri_elems);
+}
+
+const char *
+evhttp_request_get_host(struct evhttp_request *req)
+{
+ const char *host = NULL;
+
+ if (req->host_cache)
+ return req->host_cache;
+
+ if (req->uri_elems)
+ host = evhttp_uri_get_host(req->uri_elems);
+ if (!host && req->input_headers) {
+ const char *p;
+ size_t len;
+
+ host = evhttp_find_header(req->input_headers, "Host");
+ /* The Host: header may include a port. Remove it here
+ to be consistent with uri_elems case above. */
+ if (host) {
+ p = host + strlen(host) - 1;
+ while (p > host && EVUTIL_ISDIGIT_(*p))
+ --p;
+ if (p > host && *p == ':') {
+ len = p - host;
+ req->host_cache = mm_malloc(len + 1);
+ if (!req->host_cache) {
+ event_warn("%s: malloc", __func__);
+ return NULL;
+ }
+ memcpy(req->host_cache, host, len);
+ req->host_cache[len] = '\0';
+ host = req->host_cache;
+ }
+ }
+ }
+
+ return host;
+}
+
+enum evhttp_cmd_type
+evhttp_request_get_command(const struct evhttp_request *req) {
+ return (req->type);
+}
+
+int
+evhttp_request_get_response_code(const struct evhttp_request *req)
+{
+ return req->response_code;
+}
+
+const char *
+evhttp_request_get_response_code_line(const struct evhttp_request *req)
+{
+ return req->response_code_line;
+}
+
+/** Returns the input headers */
+struct evkeyvalq *evhttp_request_get_input_headers(struct evhttp_request *req)
+{
+ return (req->input_headers);
+}
+
+/** Returns the output headers */
+struct evkeyvalq *evhttp_request_get_output_headers(struct evhttp_request *req)
+{
+ return (req->output_headers);
+}
+
+/** Returns the input buffer */
+struct evbuffer *evhttp_request_get_input_buffer(struct evhttp_request *req)
+{
+ return (req->input_buffer);
+}
+
+/** Returns the output buffer */
+struct evbuffer *evhttp_request_get_output_buffer(struct evhttp_request *req)
+{
+ return (req->output_buffer);
+}
+
+
+/*
+ * Takes a file descriptor to read a request from.
+ * The callback is executed once the whole request has been read.
+ */
+
+static struct evhttp_connection*
+evhttp_get_request_connection(
+ struct evhttp* http,
+ evutil_socket_t fd, struct sockaddr *sa, ev_socklen_t salen)
+{
+ struct evhttp_connection *evcon;
+ char *hostname = NULL, *portname = NULL;
+ struct bufferevent* bev = NULL;
+
+ name_from_addr(sa, salen, &hostname, &portname);
+ if (hostname == NULL || portname == NULL) {
+ if (hostname) mm_free(hostname);
+ if (portname) mm_free(portname);
+ return (NULL);
+ }
+
+ event_debug(("%s: new request from %s:%s on "EV_SOCK_FMT"\n",
+ __func__, hostname, portname, EV_SOCK_ARG(fd)));
+
+ /* we need a connection object to put the http request on */
+ if (http->bevcb != NULL) {
+ bev = (*http->bevcb)(http->base, http->bevcbarg);
+ }
+ evcon = evhttp_connection_base_bufferevent_new(
+ http->base, NULL, bev, hostname, atoi(portname));
+ mm_free(hostname);
+ mm_free(portname);
+ if (evcon == NULL)
+ return (NULL);
+
+ evcon->max_headers_size = http->default_max_headers_size;
+ evcon->max_body_size = http->default_max_body_size;
+
+ evcon->flags |= EVHTTP_CON_INCOMING;
+ evcon->state = EVCON_READING_FIRSTLINE;
+
+ evcon->fd = fd;
+
+ bufferevent_enable(evcon->bufev, EV_READ);
+ bufferevent_disable(evcon->bufev, EV_WRITE);
+ bufferevent_setfd(evcon->bufev, fd);
+
+ return (evcon);
+}
+
+static int
+evhttp_associate_new_request_with_connection(struct evhttp_connection *evcon)
+{
+ struct evhttp *http = evcon->http_server;
+ struct evhttp_request *req;
+ if ((req = evhttp_request_new(evhttp_handle_request, http)) == NULL)
+ return (-1);
+
+ if ((req->remote_host = mm_strdup(evcon->address)) == NULL) {
+ event_warn("%s: strdup", __func__);
+ evhttp_request_free(req);
+ return (-1);
+ }
+ req->remote_port = evcon->port;
+
+ req->evcon = evcon; /* the request ends up owning the connection */
+ req->flags |= EVHTTP_REQ_OWN_CONNECTION;
+
+ /* We did not present the request to the user user yet, so treat it as
+ * if the user was done with the request. This allows us to free the
+ * request on a persistent connection if the client drops it without
+ * sending a request.
+ */
+ req->userdone = 1;
+
+ TAILQ_INSERT_TAIL(&evcon->requests, req, next);
+
+ req->kind = EVHTTP_REQUEST;
+
+
+ evhttp_start_read_(evcon);
+
+ return (0);
+}
+
+static void
+evhttp_get_request(struct evhttp *http, evutil_socket_t fd,
+ struct sockaddr *sa, ev_socklen_t salen)
+{
+ struct evhttp_connection *evcon;
+
+ evcon = evhttp_get_request_connection(http, fd, sa, salen);
+ if (evcon == NULL) {
+ event_sock_warn(fd, "%s: cannot get connection on "EV_SOCK_FMT,
+ __func__, EV_SOCK_ARG(fd));
+ evutil_closesocket(fd);
+ return;
+ }
+
+ /* the timeout can be used by the server to close idle connections */
+ if (evutil_timerisset(&http->timeout))
+ evhttp_connection_set_timeout_tv(evcon, &http->timeout);
+
+ /*
+ * if we want to accept more than one request on a connection,
+ * we need to know which http server it belongs to.
+ */
+ evcon->http_server = http;
+ TAILQ_INSERT_TAIL(&http->connections, evcon, next);
+
+ if (evhttp_associate_new_request_with_connection(evcon) == -1)
+ evhttp_connection_free(evcon);
+}
+
+
+/*
+ * Network helper functions that we do not want to export to the rest of
+ * the world.
+ */
+
+static void
+name_from_addr(struct sockaddr *sa, ev_socklen_t salen,
+ char **phost, char **pport)
+{
+ char ntop[NI_MAXHOST];
+ char strport[NI_MAXSERV];
+ int ni_result;
+
+#ifdef EVENT__HAVE_GETNAMEINFO
+ ni_result = getnameinfo(sa, salen,
+ ntop, sizeof(ntop), strport, sizeof(strport),
+ NI_NUMERICHOST|NI_NUMERICSERV);
+
+ if (ni_result != 0) {
+#ifdef EAI_SYSTEM
+ /* Windows doesn't have an EAI_SYSTEM. */
+ if (ni_result == EAI_SYSTEM)
+ event_err(1, "getnameinfo failed");
+ else
+#endif
+ event_errx(1, "getnameinfo failed: %s", gai_strerror(ni_result));
+ return;
+ }
+#else
+ ni_result = fake_getnameinfo(sa, salen,
+ ntop, sizeof(ntop), strport, sizeof(strport),
+ NI_NUMERICHOST|NI_NUMERICSERV);
+ if (ni_result != 0)
+ return;
+#endif
+
+ *phost = mm_strdup(ntop);
+ *pport = mm_strdup(strport);
+}
+
+/* Create a non-blocking socket and bind it */
+/* todo: rename this function */
+static evutil_socket_t
+bind_socket_ai(struct evutil_addrinfo *ai, int reuse)
+{
+ evutil_socket_t fd;
+
+ int on = 1, r;
+ int serrno;
+
+ /* Create listen socket */
+ fd = evutil_socket_(ai ? ai->ai_family : AF_INET,
+ SOCK_STREAM|EVUTIL_SOCK_NONBLOCK|EVUTIL_SOCK_CLOEXEC, 0);
+ if (fd == -1) {
+ event_sock_warn(-1, "socket");
+ return (-1);
+ }
+
+ if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (void *)&on, sizeof(on))<0)
+ goto out;
+ if (reuse) {
+ if (evutil_make_listen_socket_reuseable(fd) < 0)
+ goto out;
+ }
+
+ if (ai != NULL) {
+ r = bind(fd, ai->ai_addr, (ev_socklen_t)ai->ai_addrlen);
+ if (r == -1)
+ goto out;
+ }
+
+ return (fd);
+
+ out:
+ serrno = EVUTIL_SOCKET_ERROR();
+ evutil_closesocket(fd);
+ EVUTIL_SET_SOCKET_ERROR(serrno);
+ return (-1);
+}
+
+static struct evutil_addrinfo *
+make_addrinfo(const char *address, ev_uint16_t port)
+{
+ struct evutil_addrinfo *ai = NULL;
+
+ struct evutil_addrinfo hints;
+ char strport[NI_MAXSERV];
+ int ai_result;
+
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC;
+ hints.ai_socktype = SOCK_STREAM;
+ /* turn NULL hostname into INADDR_ANY, and skip looking up any address
+ * types we don't have an interface to connect to. */
+ hints.ai_flags = EVUTIL_AI_PASSIVE|EVUTIL_AI_ADDRCONFIG;
+ evutil_snprintf(strport, sizeof(strport), "%d", port);
+ if ((ai_result = evutil_getaddrinfo(address, strport, &hints, &ai))
+ != 0) {
+ if (ai_result == EVUTIL_EAI_SYSTEM)
+ event_warn("getaddrinfo");
+ else
+ event_warnx("getaddrinfo: %s",
+ evutil_gai_strerror(ai_result));
+ return (NULL);
+ }
+
+ return (ai);
+}
+
+static evutil_socket_t
+bind_socket(const char *address, ev_uint16_t port, int reuse)
+{
+ evutil_socket_t fd;
+ struct evutil_addrinfo *aitop = NULL;
+
+ /* just create an unbound socket */
+ if (address == NULL && port == 0)
+ return bind_socket_ai(NULL, 0);
+
+ aitop = make_addrinfo(address, port);
+
+ if (aitop == NULL)
+ return (-1);
+
+ fd = bind_socket_ai(aitop, reuse);
+
+ evutil_freeaddrinfo(aitop);
+
+ return (fd);
+}
+
+struct evhttp_uri {
+ unsigned flags;
+ char *scheme; /* scheme; e.g http, ftp etc */
+ char *userinfo; /* userinfo (typically username:pass), or NULL */
+ char *host; /* hostname, IP address, or NULL */
+ int port; /* port, or zero */
+ char *path; /* path, or "". */
+ char *query; /* query, or NULL */
+ char *fragment; /* fragment or NULL */
+};
+
+struct evhttp_uri *
+evhttp_uri_new(void)
+{
+ struct evhttp_uri *uri = mm_calloc(sizeof(struct evhttp_uri), 1);
+ if (uri)
+ uri->port = -1;
+ return uri;
+}
+
+void
+evhttp_uri_set_flags(struct evhttp_uri *uri, unsigned flags)
+{
+ uri->flags = flags;
+}
+
+/* Return true if the string starting at s and ending immediately before eos
+ * is a valid URI scheme according to RFC3986
+ */
+static int
+scheme_ok(const char *s, const char *eos)
+{
+ /* scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) */
+ EVUTIL_ASSERT(eos >= s);
+ if (s == eos)
+ return 0;
+ if (!EVUTIL_ISALPHA_(*s))
+ return 0;
+ while (++s < eos) {
+ if (! EVUTIL_ISALNUM_(*s) &&
+ *s != '+' && *s != '-' && *s != '.')
+ return 0;
+ }
+ return 1;
+}
+
+#define SUBDELIMS "!$&'()*+,;="
+
+/* Return true iff [s..eos) is a valid userinfo */
+static int
+userinfo_ok(const char *s, const char *eos)
+{
+ while (s < eos) {
+ if (CHAR_IS_UNRESERVED(*s) ||
+ strchr(SUBDELIMS, *s) ||
+ *s == ':')
+ ++s;
+ else if (*s == '%' && s+2 < eos &&
+ EVUTIL_ISXDIGIT_(s[1]) &&
+ EVUTIL_ISXDIGIT_(s[2]))
+ s += 3;
+ else
+ return 0;
+ }
+ return 1;
+}
+
+static int
+regname_ok(const char *s, const char *eos)
+{
+ while (s && s<eos) {
+ if (CHAR_IS_UNRESERVED(*s) ||
+ strchr(SUBDELIMS, *s))
+ ++s;
+ else if (*s == '%' &&
+ EVUTIL_ISXDIGIT_(s[1]) &&
+ EVUTIL_ISXDIGIT_(s[2]))
+ s += 3;
+ else
+ return 0;
+ }
+ return 1;
+}
+
+static int
+parse_port(const char *s, const char *eos)
+{
+ int portnum = 0;
+ while (s < eos) {
+ if (! EVUTIL_ISDIGIT_(*s))
+ return -1;
+ portnum = (portnum * 10) + (*s - '0');
+ if (portnum < 0)
+ return -1;
+ if (portnum > 65535)
+ return -1;
+ ++s;
+ }
+ return portnum;
+}
+
+/* returns 0 for bad, 1 for ipv6, 2 for IPvFuture */
+static int
+bracket_addr_ok(const char *s, const char *eos)
+{
+ if (s + 3 > eos || *s != '[' || *(eos-1) != ']')
+ return 0;
+ if (s[1] == 'v') {
+ /* IPvFuture, or junk.
+ "v" 1*HEXDIG "." 1*( unreserved / sub-delims / ":" )
+ */
+ s += 2; /* skip [v */
+ --eos;
+ if (!EVUTIL_ISXDIGIT_(*s)) /*require at least one*/
+ return 0;
+ while (s < eos && *s != '.') {
+ if (EVUTIL_ISXDIGIT_(*s))
+ ++s;
+ else
+ return 0;
+ }
+ if (*s != '.')
+ return 0;
+ ++s;
+ while (s < eos) {
+ if (CHAR_IS_UNRESERVED(*s) ||
+ strchr(SUBDELIMS, *s) ||
+ *s == ':')
+ ++s;
+ else
+ return 0;
+ }
+ return 2;
+ } else {
+ /* IPv6, or junk */
+ char buf[64];
+ ev_ssize_t n_chars = eos-s-2;
+ struct in6_addr in6;
+ if (n_chars >= 64) /* way too long */
+ return 0;
+ memcpy(buf, s+1, n_chars);
+ buf[n_chars]='\0';
+ return (evutil_inet_pton(AF_INET6,buf,&in6)==1) ? 1 : 0;
+ }
+}
+
+static int
+parse_authority(struct evhttp_uri *uri, char *s, char *eos)
+{
+ char *cp, *port;
+ EVUTIL_ASSERT(eos);
+ if (eos == s) {
+ uri->host = mm_strdup("");
+ if (uri->host == NULL) {
+ event_warn("%s: strdup", __func__);
+ return -1;
+ }
+ return 0;
+ }
+
+ /* Optionally, we start with "userinfo@" */
+
+ cp = strchr(s, '@');
+ if (cp && cp < eos) {
+ if (! userinfo_ok(s,cp))
+ return -1;
+ *cp++ = '\0';
+ uri->userinfo = mm_strdup(s);
+ if (uri->userinfo == NULL) {
+ event_warn("%s: strdup", __func__);
+ return -1;
+ }
+ } else {
+ cp = s;
+ }
+ /* Optionally, we end with ":port" */
+ for (port=eos-1; port >= cp && EVUTIL_ISDIGIT_(*port); --port)
+ ;
+ if (port >= cp && *port == ':') {
+ if (port+1 == eos) /* Leave port unspecified; the RFC allows a
+ * nil port */
+ uri->port = -1;
+ else if ((uri->port = parse_port(port+1, eos))<0)
+ return -1;
+ eos = port;
+ }
+ /* Now, cp..eos holds the "host" port, which can be an IPv4Address,
+ * an IP-Literal, or a reg-name */
+ EVUTIL_ASSERT(eos >= cp);
+ if (*cp == '[' && eos >= cp+2 && *(eos-1) == ']') {
+ /* IPv6address, IP-Literal, or junk. */
+ if (! bracket_addr_ok(cp, eos))
+ return -1;
+ } else {
+ /* Make sure the host part is ok. */
+ if (! regname_ok(cp,eos)) /* Match IPv4Address or reg-name */
+ return -1;
+ }
+ uri->host = mm_malloc(eos-cp+1);
+ if (uri->host == NULL) {
+ event_warn("%s: malloc", __func__);
+ return -1;
+ }
+ memcpy(uri->host, cp, eos-cp);
+ uri->host[eos-cp] = '\0';
+ return 0;
+
+}
+
+static char *
+end_of_authority(char *cp)
+{
+ while (*cp) {
+ if (*cp == '?' || *cp == '#' || *cp == '/')
+ return cp;
+ ++cp;
+ }
+ return cp;
+}
+
+enum uri_part {
+ PART_PATH,
+ PART_QUERY,
+ PART_FRAGMENT
+};
+
+/* Return the character after the longest prefix of 'cp' that matches...
+ * *pchar / "/" if allow_qchars is false, or
+ * *(pchar / "/" / "?") if allow_qchars is true.
+ */
+static char *
+end_of_path(char *cp, enum uri_part part, unsigned flags)
+{
+ if (flags & EVHTTP_URI_NONCONFORMANT) {
+ /* If NONCONFORMANT:
+ * Path is everything up to a # or ? or nul.
+ * Query is everything up a # or nul
+ * Fragment is everything up to a nul.
+ */
+ switch (part) {
+ case PART_PATH:
+ while (*cp && *cp != '#' && *cp != '?')
+ ++cp;
+ break;
+ case PART_QUERY:
+ while (*cp && *cp != '#')
+ ++cp;
+ break;
+ case PART_FRAGMENT:
+ cp += strlen(cp);
+ break;
+ };
+ return cp;
+ }
+
+ while (*cp) {
+ if (CHAR_IS_UNRESERVED(*cp) ||
+ strchr(SUBDELIMS, *cp) ||
+ *cp == ':' || *cp == '@' || *cp == '/')
+ ++cp;
+ else if (*cp == '%' && EVUTIL_ISXDIGIT_(cp[1]) &&
+ EVUTIL_ISXDIGIT_(cp[2]))
+ cp += 3;
+ else if (*cp == '?' && part != PART_PATH)
+ ++cp;
+ else
+ return cp;
+ }
+ return cp;
+}
+
+static int
+path_matches_noscheme(const char *cp)
+{
+ while (*cp) {
+ if (*cp == ':')
+ return 0;
+ else if (*cp == '/')
+ return 1;
+ ++cp;
+ }
+ return 1;
+}
+
+struct evhttp_uri *
+evhttp_uri_parse(const char *source_uri)
+{
+ return evhttp_uri_parse_with_flags(source_uri, 0);
+}
+
+struct evhttp_uri *
+evhttp_uri_parse_with_flags(const char *source_uri, unsigned flags)
+{
+ char *readbuf = NULL, *readp = NULL, *token = NULL, *query = NULL;
+ char *path = NULL, *fragment = NULL;
+ int got_authority = 0;
+
+ struct evhttp_uri *uri = mm_calloc(1, sizeof(struct evhttp_uri));
+ if (uri == NULL) {
+ event_warn("%s: calloc", __func__);
+ goto err;
+ }
+ uri->port = -1;
+ uri->flags = flags;
+
+ readbuf = mm_strdup(source_uri);
+ if (readbuf == NULL) {
+ event_warn("%s: strdup", __func__);
+ goto err;
+ }
+
+ readp = readbuf;
+ token = NULL;
+
+ /* We try to follow RFC3986 here as much as we can, and match
+ the productions
+
+ URI = scheme ":" hier-part [ "?" query ] [ "#" fragment ]
+
+ relative-ref = relative-part [ "?" query ] [ "#" fragment ]
+ */
+
+ /* 1. scheme: */
+ token = strchr(readp, ':');
+ if (token && scheme_ok(readp,token)) {
+ *token = '\0';
+ uri->scheme = mm_strdup(readp);
+ if (uri->scheme == NULL) {
+ event_warn("%s: strdup", __func__);
+ goto err;
+ }
+ readp = token+1; /* eat : */
+ }
+
+ /* 2. Optionally, "//" then an 'authority' part. */
+ if (readp[0]=='/' && readp[1] == '/') {
+ char *authority;
+ readp += 2;
+ authority = readp;
+ path = end_of_authority(readp);
+ if (parse_authority(uri, authority, path) < 0)
+ goto err;
+ readp = path;
+ got_authority = 1;
+ }
+
+ /* 3. Query: path-abempty, path-absolute, path-rootless, or path-empty
+ */
+ path = readp;
+ readp = end_of_path(path, PART_PATH, flags);
+
+ /* Query */
+ if (*readp == '?') {
+ *readp = '\0';
+ ++readp;
+ query = readp;
+ readp = end_of_path(readp, PART_QUERY, flags);
+ }
+ /* fragment */
+ if (*readp == '#') {
+ *readp = '\0';
+ ++readp;
+ fragment = readp;
+ readp = end_of_path(readp, PART_FRAGMENT, flags);
+ }
+ if (*readp != '\0') {
+ goto err;
+ }
+
+ /* These next two cases may be unreachable; I'm leaving them
+ * in to be defensive. */
+ /* If you didn't get an authority, the path can't begin with "//" */
+ if (!got_authority && path[0]=='/' && path[1]=='/')
+ goto err;
+ /* If you did get an authority, the path must begin with "/" or be
+ * empty. */
+ if (got_authority && path[0] != '/' && path[0] != '\0')
+ goto err;
+ /* (End of maybe-unreachable cases) */
+
+ /* If there was no scheme, the first part of the path (if any) must
+ * have no colon in it. */
+ if (! uri->scheme && !path_matches_noscheme(path))
+ goto err;
+
+ EVUTIL_ASSERT(path);
+ uri->path = mm_strdup(path);
+ if (uri->path == NULL) {
+ event_warn("%s: strdup", __func__);
+ goto err;
+ }
+
+ if (query) {
+ uri->query = mm_strdup(query);
+ if (uri->query == NULL) {
+ event_warn("%s: strdup", __func__);
+ goto err;
+ }
+ }
+ if (fragment) {
+ uri->fragment = mm_strdup(fragment);
+ if (uri->fragment == NULL) {
+ event_warn("%s: strdup", __func__);
+ goto err;
+ }
+ }
+
+ mm_free(readbuf);
+
+ return uri;
+err:
+ if (uri)
+ evhttp_uri_free(uri);
+ if (readbuf)
+ mm_free(readbuf);
+ return NULL;
+}
+
+void
+evhttp_uri_free(struct evhttp_uri *uri)
+{
+#define URI_FREE_STR_(f) \
+ if (uri->f) { \
+ mm_free(uri->f); \
+ }
+
+ URI_FREE_STR_(scheme);
+ URI_FREE_STR_(userinfo);
+ URI_FREE_STR_(host);
+ URI_FREE_STR_(path);
+ URI_FREE_STR_(query);
+ URI_FREE_STR_(fragment);
+
+ mm_free(uri);
+#undef URI_FREE_STR_
+}
+
+char *
+evhttp_uri_join(struct evhttp_uri *uri, char *buf, size_t limit)
+{
+ struct evbuffer *tmp = 0;
+ size_t joined_size = 0;
+ char *output = NULL;
+
+#define URI_ADD_(f) evbuffer_add(tmp, uri->f, strlen(uri->f))
+
+ if (!uri || !buf || !limit)
+ return NULL;
+
+ tmp = evbuffer_new();
+ if (!tmp)
+ return NULL;
+
+ if (uri->scheme) {
+ URI_ADD_(scheme);
+ evbuffer_add(tmp, ":", 1);
+ }
+ if (uri->host) {
+ evbuffer_add(tmp, "//", 2);
+ if (uri->userinfo)
+ evbuffer_add_printf(tmp,"%s@", uri->userinfo);
+ URI_ADD_(host);
+ if (uri->port >= 0)
+ evbuffer_add_printf(tmp,":%d", uri->port);
+
+ if (uri->path && uri->path[0] != '/' && uri->path[0] != '\0')
+ goto err;
+ }
+
+ if (uri->path)
+ URI_ADD_(path);
+
+ if (uri->query) {
+ evbuffer_add(tmp, "?", 1);
+ URI_ADD_(query);
+ }
+
+ if (uri->fragment) {
+ evbuffer_add(tmp, "#", 1);
+ URI_ADD_(fragment);
+ }
+
+ evbuffer_add(tmp, "\0", 1); /* NUL */
+
+ joined_size = evbuffer_get_length(tmp);
+
+ if (joined_size > limit) {
+ /* It doesn't fit. */
+ evbuffer_free(tmp);
+ return NULL;
+ }
+ evbuffer_remove(tmp, buf, joined_size);
+
+ output = buf;
+err:
+ evbuffer_free(tmp);
+
+ return output;
+#undef URI_ADD_
+}
+
+const char *
+evhttp_uri_get_scheme(const struct evhttp_uri *uri)
+{
+ return uri->scheme;
+}
+const char *
+evhttp_uri_get_userinfo(const struct evhttp_uri *uri)
+{
+ return uri->userinfo;
+}
+const char *
+evhttp_uri_get_host(const struct evhttp_uri *uri)
+{
+ return uri->host;
+}
+int
+evhttp_uri_get_port(const struct evhttp_uri *uri)
+{
+ return uri->port;
+}
+const char *
+evhttp_uri_get_path(const struct evhttp_uri *uri)
+{
+ return uri->path;
+}
+const char *
+evhttp_uri_get_query(const struct evhttp_uri *uri)
+{
+ return uri->query;
+}
+const char *
+evhttp_uri_get_fragment(const struct evhttp_uri *uri)
+{
+ return uri->fragment;
+}
+
+#define URI_SET_STR_(f) do { \
+ if (uri->f) \
+ mm_free(uri->f); \
+ if (f) { \
+ if ((uri->f = mm_strdup(f)) == NULL) { \
+ event_warn("%s: strdup()", __func__); \
+ return -1; \
+ } \
+ } else { \
+ uri->f = NULL; \
+ } \
+ } while(0)
+
+int
+evhttp_uri_set_scheme(struct evhttp_uri *uri, const char *scheme)
+{
+ if (scheme && !scheme_ok(scheme, scheme+strlen(scheme)))
+ return -1;
+
+ URI_SET_STR_(scheme);
+ return 0;
+}
+int
+evhttp_uri_set_userinfo(struct evhttp_uri *uri, const char *userinfo)
+{
+ if (userinfo && !userinfo_ok(userinfo, userinfo+strlen(userinfo)))
+ return -1;
+ URI_SET_STR_(userinfo);
+ return 0;
+}
+int
+evhttp_uri_set_host(struct evhttp_uri *uri, const char *host)
+{
+ if (host) {
+ if (host[0] == '[') {
+ if (! bracket_addr_ok(host, host+strlen(host)))
+ return -1;
+ } else {
+ if (! regname_ok(host, host+strlen(host)))
+ return -1;
+ }
+ }
+
+ URI_SET_STR_(host);
+ return 0;
+}
+int
+evhttp_uri_set_port(struct evhttp_uri *uri, int port)
+{
+ if (port < -1)
+ return -1;
+ uri->port = port;
+ return 0;
+}
+#define end_of_cpath(cp,p,f) \
+ ((const char*)(end_of_path(((char*)(cp)), (p), (f))))
+
+int
+evhttp_uri_set_path(struct evhttp_uri *uri, const char *path)
+{
+ if (path && end_of_cpath(path, PART_PATH, uri->flags) != path+strlen(path))
+ return -1;
+
+ URI_SET_STR_(path);
+ return 0;
+}
+int
+evhttp_uri_set_query(struct evhttp_uri *uri, const char *query)
+{
+ if (query && end_of_cpath(query, PART_QUERY, uri->flags) != query+strlen(query))
+ return -1;
+ URI_SET_STR_(query);
+ return 0;
+}
+int
+evhttp_uri_set_fragment(struct evhttp_uri *uri, const char *fragment)
+{
+ if (fragment && end_of_cpath(fragment, PART_FRAGMENT, uri->flags) != fragment+strlen(fragment))
+ return -1;
+ URI_SET_STR_(fragment);
+ return 0;
+}
diff --git a/libs/libevent/src/iocp-internal.h b/libs/libevent/src/iocp-internal.h
new file mode 100644
index 0000000000..93dbe2b1a4
--- /dev/null
+++ b/libs/libevent/src/iocp-internal.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef IOCP_INTERNAL_H_INCLUDED_
+#define IOCP_INTERNAL_H_INCLUDED_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct event_overlapped;
+struct event_iocp_port;
+struct evbuffer;
+typedef void (*iocp_callback)(struct event_overlapped *, ev_uintptr_t, ev_ssize_t, int success);
+
+/* This whole file is actually win32 only. We wrap the structures in a win32
+ * ifdef so that we can test-compile code that uses these interfaces on
+ * non-win32 platforms. */
+#ifdef _WIN32
+
+/**
+ Internal use only. Wraps an OVERLAPPED that we're using for libevent
+ functionality. Whenever an event_iocp_port gets an event for a given
+ OVERLAPPED*, it upcasts the pointer to an event_overlapped, and calls the
+ iocp_callback function with the event_overlapped, the iocp key, and the
+ number of bytes transferred as arguments.
+ */
+struct event_overlapped {
+ OVERLAPPED overlapped;
+ iocp_callback cb;
+};
+
+/* Mingw's headers don't define LPFN_ACCEPTEX. */
+
+typedef BOOL (WINAPI *AcceptExPtr)(SOCKET, SOCKET, PVOID, DWORD, DWORD, DWORD, LPDWORD, LPOVERLAPPED);
+typedef BOOL (WINAPI *ConnectExPtr)(SOCKET, const struct sockaddr *, int, PVOID, DWORD, LPDWORD, LPOVERLAPPED);
+typedef void (WINAPI *GetAcceptExSockaddrsPtr)(PVOID, DWORD, DWORD, DWORD, LPSOCKADDR *, LPINT, LPSOCKADDR *, LPINT);
+
+/** Internal use only. Holds pointers to functions that only some versions of
+ Windows provide.
+ */
+struct win32_extension_fns {
+ AcceptExPtr AcceptEx;
+ ConnectExPtr ConnectEx;
+ GetAcceptExSockaddrsPtr GetAcceptExSockaddrs;
+};
+
+/**
+ Internal use only. Stores a Windows IO Completion port, along with
+ related data.
+ */
+struct event_iocp_port {
+ /** The port itself */
+ HANDLE port;
+ /* A lock to cover internal structures. */
+ CRITICAL_SECTION lock;
+ /** Number of threads ever open on the port. */
+ short n_threads;
+ /** True iff we're shutting down all the threads on this port */
+ short shutdown;
+ /** How often the threads on this port check for shutdown and other
+ * conditions */
+ long ms;
+ /* The threads that are waiting for events. */
+ HANDLE *threads;
+ /** Number of threads currently open on this port. */
+ short n_live_threads;
+ /** A semaphore to signal when we are done shutting down. */
+ HANDLE *shutdownSemaphore;
+};
+
+const struct win32_extension_fns *event_get_win32_extension_fns_(void);
+#else
+/* Dummy definition so we can test-compile more things on unix. */
+struct event_overlapped {
+ iocp_callback cb;
+};
+#endif
+
+/** Initialize the fields in an event_overlapped.
+
+ @param overlapped The struct event_overlapped to initialize
+ @param cb The callback that should be invoked once the IO operation has
+ finished.
+ */
+void event_overlapped_init_(struct event_overlapped *, iocp_callback cb);
+
+/** Allocate and return a new evbuffer that supports overlapped IO on a given
+ socket. The socket must be associated with an IO completion port using
+ event_iocp_port_associate_.
+*/
+struct evbuffer *evbuffer_overlapped_new_(evutil_socket_t fd);
+
+/** XXXX Document (nickm) */
+evutil_socket_t evbuffer_overlapped_get_fd_(struct evbuffer *buf);
+
+void evbuffer_overlapped_set_fd_(struct evbuffer *buf, evutil_socket_t fd);
+
+/** Start reading data onto the end of an overlapped evbuffer.
+
+ An evbuffer can only have one read pending at a time. While the read
+ is in progress, no other data may be added to the end of the buffer.
+ The buffer must be created with event_overlapped_init_().
+ evbuffer_commit_read_() must be called in the completion callback.
+
+ @param buf The buffer to read onto
+ @param n The number of bytes to try to read.
+ @param ol Overlapped object with associated completion callback.
+ @return 0 on success, -1 on error.
+ */
+int evbuffer_launch_read_(struct evbuffer *buf, size_t n, struct event_overlapped *ol);
+
+/** Start writing data from the start of an evbuffer.
+
+ An evbuffer can only have one write pending at a time. While the write is
+ in progress, no other data may be removed from the front of the buffer.
+ The buffer must be created with event_overlapped_init_().
+ evbuffer_commit_write_() must be called in the completion callback.
+
+ @param buf The buffer to read onto
+ @param n The number of bytes to try to read.
+ @param ol Overlapped object with associated completion callback.
+ @return 0 on success, -1 on error.
+ */
+int evbuffer_launch_write_(struct evbuffer *buf, ev_ssize_t n, struct event_overlapped *ol);
+
+/** XXX document */
+void evbuffer_commit_read_(struct evbuffer *, ev_ssize_t);
+void evbuffer_commit_write_(struct evbuffer *, ev_ssize_t);
+
+/** Create an IOCP, and launch its worker threads. Internal use only.
+
+ This interface is unstable, and will change.
+ */
+struct event_iocp_port *event_iocp_port_launch_(int n_cpus);
+
+/** Associate a file descriptor with an iocp, such that overlapped IO on the
+ fd will happen on one of the iocp's worker threads.
+*/
+int event_iocp_port_associate_(struct event_iocp_port *port, evutil_socket_t fd,
+ ev_uintptr_t key);
+
+/** Tell all threads serving an iocp to stop. Wait for up to waitMsec for all
+ the threads to finish whatever they're doing. If waitMsec is -1, wait
+ as long as required. If all the threads are done, free the port and return
+ 0. Otherwise, return -1. If you get a -1 return value, it is safe to call
+ this function again.
+*/
+int event_iocp_shutdown_(struct event_iocp_port *port, long waitMsec);
+
+/* FIXME document. */
+int event_iocp_activate_overlapped_(struct event_iocp_port *port,
+ struct event_overlapped *o,
+ ev_uintptr_t key, ev_uint32_t n_bytes);
+
+struct event_base;
+/* FIXME document. */
+struct event_iocp_port *event_base_get_iocp_(struct event_base *base);
+
+/* FIXME document. */
+int event_base_start_iocp_(struct event_base *base, int n_cpus);
+void event_base_stop_iocp_(struct event_base *base);
+
+/* FIXME document. */
+struct bufferevent *bufferevent_async_new_(struct event_base *base,
+ evutil_socket_t fd, int options);
+
+/* FIXME document. */
+void bufferevent_async_set_connected_(struct bufferevent *bev);
+int bufferevent_async_can_connect_(struct bufferevent *bev);
+int bufferevent_async_connect_(struct bufferevent *bev, evutil_socket_t fd,
+ const struct sockaddr *sa, int socklen);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/libs/libevent/src/ipv6-internal.h b/libs/libevent/src/ipv6-internal.h
new file mode 100644
index 0000000000..0c207377b8
--- /dev/null
+++ b/libs/libevent/src/ipv6-internal.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Internal use only: Fake IPv6 structures and values on platforms that
+ * do not have them */
+
+#ifndef IPV6_INTERNAL_H_INCLUDED_
+#define IPV6_INTERNAL_H_INCLUDED_
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <sys/types.h>
+#ifdef EVENT__HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#include "event2/util.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @file ipv6-internal.h
+ *
+ * Replacement types and functions for platforms that don't support ipv6
+ * properly.
+ */
+
+#ifndef EVENT__HAVE_STRUCT_IN6_ADDR
+struct in6_addr {
+ ev_uint8_t s6_addr[16];
+};
+#endif
+
+#ifndef EVENT__HAVE_SA_FAMILY_T
+typedef int sa_family_t;
+#endif
+
+#ifndef EVENT__HAVE_STRUCT_SOCKADDR_IN6
+struct sockaddr_in6 {
+ /* This will fail if we find a struct sockaddr that doesn't have
+ * sa_family as the first element. */
+ sa_family_t sin6_family;
+ ev_uint16_t sin6_port;
+ struct in6_addr sin6_addr;
+};
+#endif
+
+#ifndef AF_INET6
+#define AF_INET6 3333
+#endif
+#ifndef PF_INET6
+#define PF_INET6 AF_INET6
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/libs/libevent/src/listener.c b/libs/libevent/src/listener.c
new file mode 100644
index 0000000000..2af14e3a7b
--- /dev/null
+++ b/libs/libevent/src/listener.c
@@ -0,0 +1,889 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos, Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <sys/types.h>
+
+#ifdef _WIN32
+#ifndef _WIN32_WINNT
+/* Minimum required for InitializeCriticalSectionAndSpinCount */
+#define _WIN32_WINNT 0x0403
+#endif
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#include <mswsock.h>
+#endif
+#include <errno.h>
+#ifdef EVENT__HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#ifdef EVENT__HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#include "event2/listener.h"
+#include "event2/util.h"
+#include "event2/event.h"
+#include "event2/event_struct.h"
+#include "mm-internal.h"
+#include "util-internal.h"
+#include "log-internal.h"
+#include "evthread-internal.h"
+#ifdef _WIN32
+#include "iocp-internal.h"
+#include "defer-internal.h"
+#include "event-internal.h"
+#endif
+
+struct evconnlistener_ops {
+ int (*enable)(struct evconnlistener *);
+ int (*disable)(struct evconnlistener *);
+ void (*destroy)(struct evconnlistener *);
+ void (*shutdown)(struct evconnlistener *);
+ evutil_socket_t (*getfd)(struct evconnlistener *);
+ struct event_base *(*getbase)(struct evconnlistener *);
+};
+
+struct evconnlistener {
+ const struct evconnlistener_ops *ops;
+ void *lock;
+ evconnlistener_cb cb;
+ evconnlistener_errorcb errorcb;
+ void *user_data;
+ unsigned flags;
+ short refcnt;
+ int accept4_flags;
+ unsigned enabled : 1;
+};
+
+struct evconnlistener_event {
+ struct evconnlistener base;
+ struct event listener;
+};
+
+#ifdef _WIN32
+struct evconnlistener_iocp {
+ struct evconnlistener base;
+ evutil_socket_t fd;
+ struct event_base *event_base;
+ struct event_iocp_port *port;
+ short n_accepting;
+ unsigned shutting_down : 1;
+ unsigned event_added : 1;
+ struct accepting_socket **accepting;
+};
+#endif
+
+#define LOCK(listener) EVLOCK_LOCK((listener)->lock, 0)
+#define UNLOCK(listener) EVLOCK_UNLOCK((listener)->lock, 0)
+
+struct evconnlistener *
+evconnlistener_new_async(struct event_base *base,
+ evconnlistener_cb cb, void *ptr, unsigned flags, int backlog,
+ evutil_socket_t fd); /* XXXX export this? */
+
+static int event_listener_enable(struct evconnlistener *);
+static int event_listener_disable(struct evconnlistener *);
+static void event_listener_destroy(struct evconnlistener *);
+static evutil_socket_t event_listener_getfd(struct evconnlistener *);
+static struct event_base *event_listener_getbase(struct evconnlistener *);
+
+#if 0
+static void
+listener_incref_and_lock(struct evconnlistener *listener)
+{
+ LOCK(listener);
+ ++listener->refcnt;
+}
+#endif
+
+static int
+listener_decref_and_unlock(struct evconnlistener *listener)
+{
+ int refcnt = --listener->refcnt;
+ if (refcnt == 0) {
+ listener->ops->destroy(listener);
+ UNLOCK(listener);
+ EVTHREAD_FREE_LOCK(listener->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ mm_free(listener);
+ return 1;
+ } else {
+ UNLOCK(listener);
+ return 0;
+ }
+}
+
+static const struct evconnlistener_ops evconnlistener_event_ops = {
+ event_listener_enable,
+ event_listener_disable,
+ event_listener_destroy,
+ NULL, /* shutdown */
+ event_listener_getfd,
+ event_listener_getbase
+};
+
+static void listener_read_cb(evutil_socket_t, short, void *);
+
+struct evconnlistener *
+evconnlistener_new(struct event_base *base,
+ evconnlistener_cb cb, void *ptr, unsigned flags, int backlog,
+ evutil_socket_t fd)
+{
+ struct evconnlistener_event *lev;
+
+#ifdef _WIN32
+ if (base && event_base_get_iocp_(base)) {
+ const struct win32_extension_fns *ext =
+ event_get_win32_extension_fns_();
+ if (ext->AcceptEx && ext->GetAcceptExSockaddrs)
+ return evconnlistener_new_async(base, cb, ptr, flags,
+ backlog, fd);
+ }
+#endif
+
+ if (backlog > 0) {
+ if (listen(fd, backlog) < 0)
+ return NULL;
+ } else if (backlog < 0) {
+ if (listen(fd, 128) < 0)
+ return NULL;
+ }
+
+ lev = mm_calloc(1, sizeof(struct evconnlistener_event));
+ if (!lev)
+ return NULL;
+
+ lev->base.ops = &evconnlistener_event_ops;
+ lev->base.cb = cb;
+ lev->base.user_data = ptr;
+ lev->base.flags = flags;
+ lev->base.refcnt = 1;
+
+ lev->base.accept4_flags = 0;
+ if (!(flags & LEV_OPT_LEAVE_SOCKETS_BLOCKING))
+ lev->base.accept4_flags |= EVUTIL_SOCK_NONBLOCK;
+ if (flags & LEV_OPT_CLOSE_ON_EXEC)
+ lev->base.accept4_flags |= EVUTIL_SOCK_CLOEXEC;
+
+ if (flags & LEV_OPT_THREADSAFE) {
+ EVTHREAD_ALLOC_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+ }
+
+ event_assign(&lev->listener, base, fd, EV_READ|EV_PERSIST,
+ listener_read_cb, lev);
+
+ if (!(flags & LEV_OPT_DISABLED))
+ evconnlistener_enable(&lev->base);
+
+ return &lev->base;
+}
+
+struct evconnlistener *
+evconnlistener_new_bind(struct event_base *base, evconnlistener_cb cb,
+ void *ptr, unsigned flags, int backlog, const struct sockaddr *sa,
+ int socklen)
+{
+ struct evconnlistener *listener;
+ evutil_socket_t fd;
+ int on = 1;
+ int family = sa ? sa->sa_family : AF_UNSPEC;
+ int socktype = SOCK_STREAM | EVUTIL_SOCK_NONBLOCK;
+
+ if (backlog == 0)
+ return NULL;
+
+ if (flags & LEV_OPT_CLOSE_ON_EXEC)
+ socktype |= EVUTIL_SOCK_CLOEXEC;
+
+ fd = evutil_socket_(family, socktype, 0);
+ if (fd == -1)
+ return NULL;
+
+ if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (void*)&on, sizeof(on))<0)
+ goto err;
+
+ if (flags & LEV_OPT_REUSEABLE) {
+ if (evutil_make_listen_socket_reuseable(fd) < 0)
+ goto err;
+ }
+
+ if (flags & LEV_OPT_REUSEABLE_PORT) {
+ if (evutil_make_listen_socket_reuseable_port(fd) < 0)
+ goto err;
+ }
+
+ if (flags & LEV_OPT_DEFERRED_ACCEPT) {
+ if (evutil_make_tcp_listen_socket_deferred(fd) < 0)
+ goto err;
+ }
+
+ if (sa) {
+ if (bind(fd, sa, socklen)<0)
+ goto err;
+ }
+
+ listener = evconnlistener_new(base, cb, ptr, flags, backlog, fd);
+ if (!listener)
+ goto err;
+
+ return listener;
+err:
+ evutil_closesocket(fd);
+ return NULL;
+}
+
+void
+evconnlistener_free(struct evconnlistener *lev)
+{
+ LOCK(lev);
+ lev->cb = NULL;
+ lev->errorcb = NULL;
+ if (lev->ops->shutdown)
+ lev->ops->shutdown(lev);
+ listener_decref_and_unlock(lev);
+}
+
+static void
+event_listener_destroy(struct evconnlistener *lev)
+{
+ struct evconnlistener_event *lev_e =
+ EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
+
+ event_del(&lev_e->listener);
+ if (lev->flags & LEV_OPT_CLOSE_ON_FREE)
+ evutil_closesocket(event_get_fd(&lev_e->listener));
+ event_debug_unassign(&lev_e->listener);
+}
+
+int
+evconnlistener_enable(struct evconnlistener *lev)
+{
+ int r;
+ LOCK(lev);
+ lev->enabled = 1;
+ if (lev->cb)
+ r = lev->ops->enable(lev);
+ else
+ r = 0;
+ UNLOCK(lev);
+ return r;
+}
+
+int
+evconnlistener_disable(struct evconnlistener *lev)
+{
+ int r;
+ LOCK(lev);
+ lev->enabled = 0;
+ r = lev->ops->disable(lev);
+ UNLOCK(lev);
+ return r;
+}
+
+static int
+event_listener_enable(struct evconnlistener *lev)
+{
+ struct evconnlistener_event *lev_e =
+ EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
+ return event_add(&lev_e->listener, NULL);
+}
+
+static int
+event_listener_disable(struct evconnlistener *lev)
+{
+ struct evconnlistener_event *lev_e =
+ EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
+ return event_del(&lev_e->listener);
+}
+
+evutil_socket_t
+evconnlistener_get_fd(struct evconnlistener *lev)
+{
+ evutil_socket_t fd;
+ LOCK(lev);
+ fd = lev->ops->getfd(lev);
+ UNLOCK(lev);
+ return fd;
+}
+
+static evutil_socket_t
+event_listener_getfd(struct evconnlistener *lev)
+{
+ struct evconnlistener_event *lev_e =
+ EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
+ return event_get_fd(&lev_e->listener);
+}
+
+struct event_base *
+evconnlistener_get_base(struct evconnlistener *lev)
+{
+ struct event_base *base;
+ LOCK(lev);
+ base = lev->ops->getbase(lev);
+ UNLOCK(lev);
+ return base;
+}
+
+static struct event_base *
+event_listener_getbase(struct evconnlistener *lev)
+{
+ struct evconnlistener_event *lev_e =
+ EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
+ return event_get_base(&lev_e->listener);
+}
+
+void
+evconnlistener_set_cb(struct evconnlistener *lev,
+ evconnlistener_cb cb, void *arg)
+{
+ int enable = 0;
+ LOCK(lev);
+ if (lev->enabled && !lev->cb)
+ enable = 1;
+ lev->cb = cb;
+ lev->user_data = arg;
+ if (enable)
+ evconnlistener_enable(lev);
+ UNLOCK(lev);
+}
+
+void
+evconnlistener_set_error_cb(struct evconnlistener *lev,
+ evconnlistener_errorcb errorcb)
+{
+ LOCK(lev);
+ lev->errorcb = errorcb;
+ UNLOCK(lev);
+}
+
+static void
+listener_read_cb(evutil_socket_t fd, short what, void *p)
+{
+ struct evconnlistener *lev = p;
+ int err;
+ evconnlistener_cb cb;
+ evconnlistener_errorcb errorcb;
+ void *user_data;
+ LOCK(lev);
+ while (1) {
+ struct sockaddr_storage ss;
+ ev_socklen_t socklen = sizeof(ss);
+ evutil_socket_t new_fd = evutil_accept4_(fd, (struct sockaddr*)&ss, &socklen, lev->accept4_flags);
+ if (new_fd < 0)
+ break;
+ if (socklen == 0) {
+ /* This can happen with some older linux kernels in
+ * response to nmap. */
+ evutil_closesocket(new_fd);
+ continue;
+ }
+
+ if (lev->cb == NULL) {
+ evutil_closesocket(new_fd);
+ UNLOCK(lev);
+ return;
+ }
+ ++lev->refcnt;
+ cb = lev->cb;
+ user_data = lev->user_data;
+ UNLOCK(lev);
+ cb(lev, new_fd, (struct sockaddr*)&ss, (int)socklen,
+ user_data);
+ LOCK(lev);
+ if (lev->refcnt == 1) {
+ int freed = listener_decref_and_unlock(lev);
+ EVUTIL_ASSERT(freed);
+
+ evutil_closesocket(new_fd);
+ return;
+ }
+ --lev->refcnt;
+ }
+ err = evutil_socket_geterror(fd);
+ if (EVUTIL_ERR_ACCEPT_RETRIABLE(err)) {
+ UNLOCK(lev);
+ return;
+ }
+ if (lev->errorcb != NULL) {
+ ++lev->refcnt;
+ errorcb = lev->errorcb;
+ user_data = lev->user_data;
+ UNLOCK(lev);
+ errorcb(lev, user_data);
+ LOCK(lev);
+ listener_decref_and_unlock(lev);
+ } else {
+ event_sock_warn(fd, "Error from accept() call");
+ }
+}
+
+#ifdef _WIN32
+struct accepting_socket {
+ CRITICAL_SECTION lock;
+ struct event_overlapped overlapped;
+ SOCKET s;
+ int error;
+ struct event_callback deferred;
+ struct evconnlistener_iocp *lev;
+ ev_uint8_t buflen;
+ ev_uint8_t family;
+ unsigned free_on_cb:1;
+ char addrbuf[1];
+};
+
+static void accepted_socket_cb(struct event_overlapped *o, ev_uintptr_t key,
+ ev_ssize_t n, int ok);
+static void accepted_socket_invoke_user_cb(struct event_callback *cb, void *arg);
+
+static void
+iocp_listener_event_add(struct evconnlistener_iocp *lev)
+{
+ if (lev->event_added)
+ return;
+
+ lev->event_added = 1;
+ event_base_add_virtual_(lev->event_base);
+}
+
+static void
+iocp_listener_event_del(struct evconnlistener_iocp *lev)
+{
+ if (!lev->event_added)
+ return;
+
+ lev->event_added = 0;
+ event_base_del_virtual_(lev->event_base);
+}
+
+static struct accepting_socket *
+new_accepting_socket(struct evconnlistener_iocp *lev, int family)
+{
+ struct accepting_socket *res;
+ int addrlen;
+ int buflen;
+
+ if (family == AF_INET)
+ addrlen = sizeof(struct sockaddr_in);
+ else if (family == AF_INET6)
+ addrlen = sizeof(struct sockaddr_in6);
+ else
+ return NULL;
+ buflen = (addrlen+16)*2;
+
+ res = mm_calloc(1,sizeof(struct accepting_socket)-1+buflen);
+ if (!res)
+ return NULL;
+
+ event_overlapped_init_(&res->overlapped, accepted_socket_cb);
+ res->s = INVALID_SOCKET;
+ res->lev = lev;
+ res->buflen = buflen;
+ res->family = family;
+
+ event_deferred_cb_init_(&res->deferred,
+ event_base_get_npriorities(lev->event_base) / 2,
+ accepted_socket_invoke_user_cb, res);
+
+ InitializeCriticalSectionAndSpinCount(&res->lock, 1000);
+
+ return res;
+}
+
+static void
+free_and_unlock_accepting_socket(struct accepting_socket *as)
+{
+ /* requires lock. */
+ if (as->s != INVALID_SOCKET)
+ closesocket(as->s);
+
+ LeaveCriticalSection(&as->lock);
+ DeleteCriticalSection(&as->lock);
+ mm_free(as);
+}
+
+static int
+start_accepting(struct accepting_socket *as)
+{
+ /* requires lock */
+ const struct win32_extension_fns *ext = event_get_win32_extension_fns_();
+ DWORD pending = 0;
+ SOCKET s = socket(as->family, SOCK_STREAM, 0);
+ int error = 0;
+
+ if (!as->lev->base.enabled)
+ return 0;
+
+ if (s == INVALID_SOCKET) {
+ error = WSAGetLastError();
+ goto report_err;
+ }
+
+ /* XXXX It turns out we need to do this again later. Does this call
+ * have any effect? */
+ setsockopt(s, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
+ (char *)&as->lev->fd, sizeof(&as->lev->fd));
+
+ if (!(as->lev->base.flags & LEV_OPT_LEAVE_SOCKETS_BLOCKING))
+ evutil_make_socket_nonblocking(s);
+
+ if (event_iocp_port_associate_(as->lev->port, s, 1) < 0) {
+ closesocket(s);
+ return -1;
+ }
+
+ as->s = s;
+
+ if (ext->AcceptEx(as->lev->fd, s, as->addrbuf, 0,
+ as->buflen/2, as->buflen/2, &pending, &as->overlapped.overlapped))
+ {
+ /* Immediate success! */
+ accepted_socket_cb(&as->overlapped, 1, 0, 1);
+ } else {
+ error = WSAGetLastError();
+ if (error != ERROR_IO_PENDING) {
+ goto report_err;
+ }
+ }
+
+ return 0;
+
+report_err:
+ as->error = error;
+ event_deferred_cb_schedule_(
+ as->lev->event_base,
+ &as->deferred);
+ return 0;
+}
+
+static void
+stop_accepting(struct accepting_socket *as)
+{
+ /* requires lock. */
+ SOCKET s = as->s;
+ as->s = INVALID_SOCKET;
+ closesocket(s);
+}
+
+static void
+accepted_socket_invoke_user_cb(struct event_callback *dcb, void *arg)
+{
+ struct accepting_socket *as = arg;
+
+ struct sockaddr *sa_local=NULL, *sa_remote=NULL;
+ int socklen_local=0, socklen_remote=0;
+ const struct win32_extension_fns *ext = event_get_win32_extension_fns_();
+ struct evconnlistener *lev = &as->lev->base;
+ evutil_socket_t sock=-1;
+ void *data;
+ evconnlistener_cb cb=NULL;
+ evconnlistener_errorcb errorcb=NULL;
+ int error;
+
+ EVUTIL_ASSERT(ext->GetAcceptExSockaddrs);
+
+ LOCK(lev);
+ EnterCriticalSection(&as->lock);
+ if (as->free_on_cb) {
+ free_and_unlock_accepting_socket(as);
+ listener_decref_and_unlock(lev);
+ return;
+ }
+
+ ++lev->refcnt;
+
+ error = as->error;
+ if (error) {
+ as->error = 0;
+ errorcb = lev->errorcb;
+ } else {
+ ext->GetAcceptExSockaddrs(
+ as->addrbuf, 0, as->buflen/2, as->buflen/2,
+ &sa_local, &socklen_local, &sa_remote,
+ &socklen_remote);
+ sock = as->s;
+ cb = lev->cb;
+ as->s = INVALID_SOCKET;
+
+ /* We need to call this so getsockname, getpeername, and
+ * shutdown work correctly on the accepted socket. */
+ /* XXXX handle error? */
+ setsockopt(sock, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
+ (char *)&as->lev->fd, sizeof(&as->lev->fd));
+ }
+ data = lev->user_data;
+
+ LeaveCriticalSection(&as->lock);
+ UNLOCK(lev);
+
+ if (errorcb) {
+ WSASetLastError(error);
+ errorcb(lev, data);
+ } else if (cb) {
+ cb(lev, sock, sa_remote, socklen_remote, data);
+ }
+
+ LOCK(lev);
+ if (listener_decref_and_unlock(lev))
+ return;
+
+ EnterCriticalSection(&as->lock);
+ start_accepting(as);
+ LeaveCriticalSection(&as->lock);
+}
+
+static void
+accepted_socket_cb(struct event_overlapped *o, ev_uintptr_t key, ev_ssize_t n, int ok)
+{
+ struct accepting_socket *as =
+ EVUTIL_UPCAST(o, struct accepting_socket, overlapped);
+
+ LOCK(&as->lev->base);
+ EnterCriticalSection(&as->lock);
+ if (ok) {
+ /* XXXX Don't do this if some EV_MT flag is set. */
+ event_deferred_cb_schedule_(
+ as->lev->event_base,
+ &as->deferred);
+ LeaveCriticalSection(&as->lock);
+ } else if (as->free_on_cb) {
+ struct evconnlistener *lev = &as->lev->base;
+ free_and_unlock_accepting_socket(as);
+ listener_decref_and_unlock(lev);
+ return;
+ } else if (as->s == INVALID_SOCKET) {
+ /* This is okay; we were disabled by iocp_listener_disable. */
+ LeaveCriticalSection(&as->lock);
+ } else {
+ /* Some error on accept that we couldn't actually handle. */
+ BOOL ok;
+ DWORD transfer = 0, flags=0;
+ event_sock_warn(as->s, "Unexpected error on AcceptEx");
+ ok = WSAGetOverlappedResult(as->s, &o->overlapped,
+ &transfer, FALSE, &flags);
+ if (ok) {
+ /* well, that was confusing! */
+ as->error = 1;
+ } else {
+ as->error = WSAGetLastError();
+ }
+ event_deferred_cb_schedule_(
+ as->lev->event_base,
+ &as->deferred);
+ LeaveCriticalSection(&as->lock);
+ }
+ UNLOCK(&as->lev->base);
+}
+
+static int
+iocp_listener_enable(struct evconnlistener *lev)
+{
+ int i;
+ struct evconnlistener_iocp *lev_iocp =
+ EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
+
+ LOCK(lev);
+ iocp_listener_event_add(lev_iocp);
+ for (i = 0; i < lev_iocp->n_accepting; ++i) {
+ struct accepting_socket *as = lev_iocp->accepting[i];
+ if (!as)
+ continue;
+ EnterCriticalSection(&as->lock);
+ if (!as->free_on_cb && as->s == INVALID_SOCKET)
+ start_accepting(as);
+ LeaveCriticalSection(&as->lock);
+ }
+ UNLOCK(lev);
+ return 0;
+}
+
+static int
+iocp_listener_disable_impl(struct evconnlistener *lev, int shutdown)
+{
+ int i;
+ struct evconnlistener_iocp *lev_iocp =
+ EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
+
+ LOCK(lev);
+ iocp_listener_event_del(lev_iocp);
+ for (i = 0; i < lev_iocp->n_accepting; ++i) {
+ struct accepting_socket *as = lev_iocp->accepting[i];
+ if (!as)
+ continue;
+ EnterCriticalSection(&as->lock);
+ if (!as->free_on_cb && as->s != INVALID_SOCKET) {
+ if (shutdown)
+ as->free_on_cb = 1;
+ stop_accepting(as);
+ }
+ LeaveCriticalSection(&as->lock);
+ }
+
+ if (shutdown && lev->flags & LEV_OPT_CLOSE_ON_FREE)
+ evutil_closesocket(lev_iocp->fd);
+
+ UNLOCK(lev);
+ return 0;
+}
+
+static int
+iocp_listener_disable(struct evconnlistener *lev)
+{
+ return iocp_listener_disable_impl(lev,0);
+}
+
+static void
+iocp_listener_destroy(struct evconnlistener *lev)
+{
+ struct evconnlistener_iocp *lev_iocp =
+ EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
+
+ if (! lev_iocp->shutting_down) {
+ lev_iocp->shutting_down = 1;
+ iocp_listener_disable_impl(lev,1);
+ }
+
+}
+
+static evutil_socket_t
+iocp_listener_getfd(struct evconnlistener *lev)
+{
+ struct evconnlistener_iocp *lev_iocp =
+ EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
+ return lev_iocp->fd;
+}
+static struct event_base *
+iocp_listener_getbase(struct evconnlistener *lev)
+{
+ struct evconnlistener_iocp *lev_iocp =
+ EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
+ return lev_iocp->event_base;
+}
+
+static const struct evconnlistener_ops evconnlistener_iocp_ops = {
+ iocp_listener_enable,
+ iocp_listener_disable,
+ iocp_listener_destroy,
+ iocp_listener_destroy, /* shutdown */
+ iocp_listener_getfd,
+ iocp_listener_getbase
+};
+
+/* XXX define some way to override this. */
+#define N_SOCKETS_PER_LISTENER 4
+
+struct evconnlistener *
+evconnlistener_new_async(struct event_base *base,
+ evconnlistener_cb cb, void *ptr, unsigned flags, int backlog,
+ evutil_socket_t fd)
+{
+ struct sockaddr_storage ss;
+ int socklen = sizeof(ss);
+ struct evconnlistener_iocp *lev;
+ int i;
+
+ flags |= LEV_OPT_THREADSAFE;
+
+ if (!base || !event_base_get_iocp_(base))
+ goto err;
+
+ /* XXXX duplicate code */
+ if (backlog > 0) {
+ if (listen(fd, backlog) < 0)
+ goto err;
+ } else if (backlog < 0) {
+ if (listen(fd, 128) < 0)
+ goto err;
+ }
+ if (getsockname(fd, (struct sockaddr*)&ss, &socklen)) {
+ event_sock_warn(fd, "getsockname");
+ goto err;
+ }
+ lev = mm_calloc(1, sizeof(struct evconnlistener_iocp));
+ if (!lev) {
+ event_warn("calloc");
+ goto err;
+ }
+ lev->base.ops = &evconnlistener_iocp_ops;
+ lev->base.cb = cb;
+ lev->base.user_data = ptr;
+ lev->base.flags = flags;
+ lev->base.refcnt = 1;
+ lev->base.enabled = 1;
+
+ lev->port = event_base_get_iocp_(base);
+ lev->fd = fd;
+ lev->event_base = base;
+
+
+ if (event_iocp_port_associate_(lev->port, fd, 1) < 0)
+ goto err_free_lev;
+
+ EVTHREAD_ALLOC_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+
+ lev->n_accepting = N_SOCKETS_PER_LISTENER;
+ lev->accepting = mm_calloc(lev->n_accepting,
+ sizeof(struct accepting_socket *));
+ if (!lev->accepting) {
+ event_warn("calloc");
+ goto err_delete_lock;
+ }
+ for (i = 0; i < lev->n_accepting; ++i) {
+ lev->accepting[i] = new_accepting_socket(lev, ss.ss_family);
+ if (!lev->accepting[i]) {
+ event_warnx("Couldn't create accepting socket");
+ goto err_free_accepting;
+ }
+ if (cb && start_accepting(lev->accepting[i]) < 0) {
+ event_warnx("Couldn't start accepting on socket");
+ EnterCriticalSection(&lev->accepting[i]->lock);
+ free_and_unlock_accepting_socket(lev->accepting[i]);
+ goto err_free_accepting;
+ }
+ ++lev->base.refcnt;
+ }
+
+ iocp_listener_event_add(lev);
+
+ return &lev->base;
+
+err_free_accepting:
+ mm_free(lev->accepting);
+ /* XXXX free the other elements. */
+err_delete_lock:
+ EVTHREAD_FREE_LOCK(lev->base.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+err_free_lev:
+ mm_free(lev);
+err:
+ /* Don't close the fd, it is caller's responsibility. */
+ return NULL;
+}
+
+#endif
diff --git a/libs/libevent/src/log-internal.h b/libs/libevent/src/log-internal.h
new file mode 100644
index 0000000000..330478a9ed
--- /dev/null
+++ b/libs/libevent/src/log-internal.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef LOG_INTERNAL_H_INCLUDED_
+#define LOG_INTERNAL_H_INCLUDED_
+
+#include "event2/util.h"
+
+#ifdef __GNUC__
+#define EV_CHECK_FMT(a,b) __attribute__((format(printf, a, b)))
+#define EV_NORETURN __attribute__((noreturn))
+#else
+#define EV_CHECK_FMT(a,b)
+#define EV_NORETURN
+#endif
+
+#define EVENT_ERR_ABORT_ ((int)0xdeaddead)
+
+#define USE_GLOBAL_FOR_DEBUG_LOGGING
+
+#if !defined(EVENT__DISABLE_DEBUG_MODE) || defined(USE_DEBUG)
+#define EVENT_DEBUG_LOGGING_ENABLED
+#endif
+
+#ifdef EVENT_DEBUG_LOGGING_ENABLED
+#ifdef USE_GLOBAL_FOR_DEBUG_LOGGING
+extern ev_uint32_t event_debug_logging_mask_;
+#define event_debug_get_logging_mask_() (event_debug_logging_mask_)
+#else
+ev_uint32_t event_debug_get_logging_mask_(void);
+#endif
+#else
+#define event_debug_get_logging_mask_() (0)
+#endif
+
+void event_err(int eval, const char *fmt, ...) EV_CHECK_FMT(2,3) EV_NORETURN;
+void event_warn(const char *fmt, ...) EV_CHECK_FMT(1,2);
+void event_sock_err(int eval, evutil_socket_t sock, const char *fmt, ...) EV_CHECK_FMT(3,4) EV_NORETURN;
+void event_sock_warn(evutil_socket_t sock, const char *fmt, ...) EV_CHECK_FMT(2,3);
+void event_errx(int eval, const char *fmt, ...) EV_CHECK_FMT(2,3) EV_NORETURN;
+void event_warnx(const char *fmt, ...) EV_CHECK_FMT(1,2);
+void event_msgx(const char *fmt, ...) EV_CHECK_FMT(1,2);
+void event_debugx_(const char *fmt, ...) EV_CHECK_FMT(1,2);
+
+void event_logv_(int severity, const char *errstr, const char *fmt, va_list ap)
+ EV_CHECK_FMT(3,0);
+
+#ifdef EVENT_DEBUG_LOGGING_ENABLED
+#define event_debug(x) do { \
+ if (event_debug_get_logging_mask_()) { \
+ event_debugx_ x; \
+ } \
+ } while (0)
+#else
+#define event_debug(x) ((void)0)
+#endif
+
+#undef EV_CHECK_FMT
+
+#endif
diff --git a/libs/libevent/src/log.c b/libs/libevent/src/log.c
new file mode 100644
index 0000000000..e8ae9fdc31
--- /dev/null
+++ b/libs/libevent/src/log.c
@@ -0,0 +1,253 @@
+/* $OpenBSD: err.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */
+
+/*
+ * log.c
+ *
+ * Based on err.c, which was adapted from OpenBSD libc *err* *warn* code.
+ *
+ * Copyright (c) 2005-2012 Niels Provos and Nick Mathewson
+ *
+ * Copyright (c) 2000 Dug Song <dugsong@monkey.org>
+ *
+ * Copyright (c) 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef _WIN32
+#include <winsock2.h>
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+#include <sys/types.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <errno.h>
+#include "event2/event.h"
+#include "event2/util.h"
+
+#include "log-internal.h"
+
+static void event_log(int severity, const char *msg);
+static void event_exit(int errcode) EV_NORETURN;
+
+static event_fatal_cb fatal_fn = NULL;
+
+#ifdef EVENT_DEBUG_LOGGING_ENABLED
+#ifdef USE_DEBUG
+#define DEFAULT_MASK EVENT_DBG_ALL
+#else
+#define DEFAULT_MASK 0
+#endif
+
+#ifdef USE_GLOBAL_FOR_DEBUG_LOGGING
+ev_uint32_t event_debug_logging_mask_ = DEFAULT_MASK;
+#else
+static ev_uint32_t event_debug_logging_mask_ = DEFAULT_MASK;
+ev_uint32_t
+event_debug_get_logging_mask_(void)
+{
+ return event_debug_logging_mask_;
+}
+#endif
+#endif /* EVENT_DEBUG_LOGGING_ENABLED */
+
+void
+event_enable_debug_logging(ev_uint32_t which)
+{
+#ifdef EVENT_DEBUG_LOGGING_ENABLED
+ event_debug_logging_mask_ = which;
+#endif
+}
+
+void
+event_set_fatal_callback(event_fatal_cb cb)
+{
+ fatal_fn = cb;
+}
+
+static void
+event_exit(int errcode)
+{
+ if (fatal_fn) {
+ fatal_fn(errcode);
+ exit(errcode); /* should never be reached */
+ } else if (errcode == EVENT_ERR_ABORT_)
+ abort();
+ else
+ exit(errcode);
+}
+
+void
+event_err(int eval, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ event_logv_(EVENT_LOG_ERR, strerror(errno), fmt, ap);
+ va_end(ap);
+ event_exit(eval);
+}
+
+void
+event_warn(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ event_logv_(EVENT_LOG_WARN, strerror(errno), fmt, ap);
+ va_end(ap);
+}
+
+void
+event_sock_err(int eval, evutil_socket_t sock, const char *fmt, ...)
+{
+ va_list ap;
+ int err = evutil_socket_geterror(sock);
+
+ va_start(ap, fmt);
+ event_logv_(EVENT_LOG_ERR, evutil_socket_error_to_string(err), fmt, ap);
+ va_end(ap);
+ event_exit(eval);
+}
+
+void
+event_sock_warn(evutil_socket_t sock, const char *fmt, ...)
+{
+ va_list ap;
+ int err = evutil_socket_geterror(sock);
+
+ va_start(ap, fmt);
+ event_logv_(EVENT_LOG_WARN, evutil_socket_error_to_string(err), fmt, ap);
+ va_end(ap);
+}
+
+void
+event_errx(int eval, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ event_logv_(EVENT_LOG_ERR, NULL, fmt, ap);
+ va_end(ap);
+ event_exit(eval);
+}
+
+void
+event_warnx(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ event_logv_(EVENT_LOG_WARN, NULL, fmt, ap);
+ va_end(ap);
+}
+
+void
+event_msgx(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ event_logv_(EVENT_LOG_MSG, NULL, fmt, ap);
+ va_end(ap);
+}
+
+void
+event_debugx_(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ event_logv_(EVENT_LOG_DEBUG, NULL, fmt, ap);
+ va_end(ap);
+}
+
+void
+event_logv_(int severity, const char *errstr, const char *fmt, va_list ap)
+{
+ char buf[1024];
+ size_t len;
+
+ if (severity == EVENT_LOG_DEBUG && !event_debug_get_logging_mask_())
+ return;
+
+ if (fmt != NULL)
+ evutil_vsnprintf(buf, sizeof(buf), fmt, ap);
+ else
+ buf[0] = '\0';
+
+ if (errstr) {
+ len = strlen(buf);
+ if (len < sizeof(buf) - 3) {
+ evutil_snprintf(buf + len, sizeof(buf) - len, ": %s", errstr);
+ }
+ }
+
+ event_log(severity, buf);
+}
+
+static event_log_cb log_fn = NULL;
+
+void
+event_set_log_callback(event_log_cb cb)
+{
+ log_fn = cb;
+}
+
+static void
+event_log(int severity, const char *msg)
+{
+ if (log_fn)
+ log_fn(severity, msg);
+ else {
+ const char *severity_str;
+ switch (severity) {
+ case EVENT_LOG_DEBUG:
+ severity_str = "debug";
+ break;
+ case EVENT_LOG_MSG:
+ severity_str = "msg";
+ break;
+ case EVENT_LOG_WARN:
+ severity_str = "warn";
+ break;
+ case EVENT_LOG_ERR:
+ severity_str = "err";
+ break;
+ default:
+ severity_str = "???";
+ break;
+ }
+ (void)fprintf(stderr, "[%s] %s\n", severity_str, msg);
+ }
+}
diff --git a/libs/libevent/src/minheap-internal.h b/libs/libevent/src/minheap-internal.h
new file mode 100644
index 0000000000..b3b6f1fd49
--- /dev/null
+++ b/libs/libevent/src/minheap-internal.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Copyright (c) 2006 Maxim Yegorushkin <maxim.yegorushkin@gmail.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef MINHEAP_INTERNAL_H_INCLUDED_
+#define MINHEAP_INTERNAL_H_INCLUDED_
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+#include "event2/event.h"
+#include "event2/event_struct.h"
+#include "event2/util.h"
+#include "util-internal.h"
+#include "mm-internal.h"
+
+typedef struct min_heap
+{
+ struct event** p;
+ unsigned n, a;
+} min_heap_t;
+
+static inline void min_heap_ctor_(min_heap_t* s);
+static inline void min_heap_dtor_(min_heap_t* s);
+static inline void min_heap_elem_init_(struct event* e);
+static inline int min_heap_elt_is_top_(const struct event *e);
+static inline int min_heap_empty_(min_heap_t* s);
+static inline unsigned min_heap_size_(min_heap_t* s);
+static inline struct event* min_heap_top_(min_heap_t* s);
+static inline int min_heap_reserve_(min_heap_t* s, unsigned n);
+static inline int min_heap_push_(min_heap_t* s, struct event* e);
+static inline struct event* min_heap_pop_(min_heap_t* s);
+static inline int min_heap_adjust_(min_heap_t *s, struct event* e);
+static inline int min_heap_erase_(min_heap_t* s, struct event* e);
+static inline void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e);
+static inline void min_heap_shift_up_unconditional_(min_heap_t* s, unsigned hole_index, struct event* e);
+static inline void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e);
+
+#define min_heap_elem_greater(a, b) \
+ (evutil_timercmp(&(a)->ev_timeout, &(b)->ev_timeout, >))
+
+void min_heap_ctor_(min_heap_t* s) { s->p = 0; s->n = 0; s->a = 0; }
+void min_heap_dtor_(min_heap_t* s) { if (s->p) mm_free(s->p); }
+void min_heap_elem_init_(struct event* e) { e->ev_timeout_pos.min_heap_idx = -1; }
+int min_heap_empty_(min_heap_t* s) { return 0u == s->n; }
+unsigned min_heap_size_(min_heap_t* s) { return s->n; }
+struct event* min_heap_top_(min_heap_t* s) { return s->n ? *s->p : 0; }
+
+int min_heap_push_(min_heap_t* s, struct event* e)
+{
+ if (min_heap_reserve_(s, s->n + 1))
+ return -1;
+ min_heap_shift_up_(s, s->n++, e);
+ return 0;
+}
+
+struct event* min_heap_pop_(min_heap_t* s)
+{
+ if (s->n)
+ {
+ struct event* e = *s->p;
+ min_heap_shift_down_(s, 0u, s->p[--s->n]);
+ e->ev_timeout_pos.min_heap_idx = -1;
+ return e;
+ }
+ return 0;
+}
+
+int min_heap_elt_is_top_(const struct event *e)
+{
+ return e->ev_timeout_pos.min_heap_idx == 0;
+}
+
+int min_heap_erase_(min_heap_t* s, struct event* e)
+{
+ if (-1 != e->ev_timeout_pos.min_heap_idx)
+ {
+ struct event *last = s->p[--s->n];
+ unsigned parent = (e->ev_timeout_pos.min_heap_idx - 1) / 2;
+ /* we replace e with the last element in the heap. We might need to
+ shift it upward if it is less than its parent, or downward if it is
+ greater than one or both its children. Since the children are known
+ to be less than the parent, it can't need to shift both up and
+ down. */
+ if (e->ev_timeout_pos.min_heap_idx > 0 && min_heap_elem_greater(s->p[parent], last))
+ min_heap_shift_up_unconditional_(s, e->ev_timeout_pos.min_heap_idx, last);
+ else
+ min_heap_shift_down_(s, e->ev_timeout_pos.min_heap_idx, last);
+ e->ev_timeout_pos.min_heap_idx = -1;
+ return 0;
+ }
+ return -1;
+}
+
+int min_heap_adjust_(min_heap_t *s, struct event *e)
+{
+ if (-1 == e->ev_timeout_pos.min_heap_idx) {
+ return min_heap_push_(s, e);
+ } else {
+ unsigned parent = (e->ev_timeout_pos.min_heap_idx - 1) / 2;
+ /* The position of e has changed; we shift it up or down
+ * as needed. We can't need to do both. */
+ if (e->ev_timeout_pos.min_heap_idx > 0 && min_heap_elem_greater(s->p[parent], e))
+ min_heap_shift_up_unconditional_(s, e->ev_timeout_pos.min_heap_idx, e);
+ else
+ min_heap_shift_down_(s, e->ev_timeout_pos.min_heap_idx, e);
+ return 0;
+ }
+}
+
+int min_heap_reserve_(min_heap_t* s, unsigned n)
+{
+ if (s->a < n)
+ {
+ struct event** p;
+ unsigned a = s->a ? s->a * 2 : 8;
+ if (a < n)
+ a = n;
+ if (!(p = (struct event**)mm_realloc(s->p, a * sizeof *p)))
+ return -1;
+ s->p = p;
+ s->a = a;
+ }
+ return 0;
+}
+
+void min_heap_shift_up_unconditional_(min_heap_t* s, unsigned hole_index, struct event* e)
+{
+ unsigned parent = (hole_index - 1) / 2;
+ do
+ {
+ (s->p[hole_index] = s->p[parent])->ev_timeout_pos.min_heap_idx = hole_index;
+ hole_index = parent;
+ parent = (hole_index - 1) / 2;
+ } while (hole_index && min_heap_elem_greater(s->p[parent], e));
+ (s->p[hole_index] = e)->ev_timeout_pos.min_heap_idx = hole_index;
+}
+
+void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e)
+{
+ unsigned parent = (hole_index - 1) / 2;
+ while (hole_index && min_heap_elem_greater(s->p[parent], e))
+ {
+ (s->p[hole_index] = s->p[parent])->ev_timeout_pos.min_heap_idx = hole_index;
+ hole_index = parent;
+ parent = (hole_index - 1) / 2;
+ }
+ (s->p[hole_index] = e)->ev_timeout_pos.min_heap_idx = hole_index;
+}
+
+void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e)
+{
+ unsigned min_child = 2 * (hole_index + 1);
+ while (min_child <= s->n)
+ {
+ min_child -= min_child == s->n || min_heap_elem_greater(s->p[min_child], s->p[min_child - 1]);
+ if (!(min_heap_elem_greater(e, s->p[min_child])))
+ break;
+ (s->p[hole_index] = s->p[min_child])->ev_timeout_pos.min_heap_idx = hole_index;
+ hole_index = min_child;
+ min_child = 2 * (hole_index + 1);
+ }
+ (s->p[hole_index] = e)->ev_timeout_pos.min_heap_idx = hole_index;
+}
+
+#endif /* MINHEAP_INTERNAL_H_INCLUDED_ */
diff --git a/libs/libevent/src/mm-internal.h b/libs/libevent/src/mm-internal.h
new file mode 100644
index 0000000000..4ba6fce4ad
--- /dev/null
+++ b/libs/libevent/src/mm-internal.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef MM_INTERNAL_H_INCLUDED_
+#define MM_INTERNAL_H_INCLUDED_
+
+#include <sys/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef EVENT__DISABLE_MM_REPLACEMENT
+/* Internal use only: Memory allocation functions. We give them nice short
+ * mm_names for our own use, but make sure that the symbols have longer names
+ * so they don't conflict with other libraries (like, say, libmm). */
+
+/** Allocate uninitialized memory.
+ *
+ * @return On success, return a pointer to sz newly allocated bytes.
+ * On failure, set errno to ENOMEM and return NULL.
+ * If the argument sz is 0, simply return NULL.
+ */
+void *event_mm_malloc_(size_t sz);
+
+/** Allocate memory initialized to zero.
+ *
+ * @return On success, return a pointer to (count * size) newly allocated
+ * bytes, initialized to zero.
+ * On failure, or if the product would result in an integer overflow,
+ * set errno to ENOMEM and return NULL.
+ * If either arguments are 0, simply return NULL.
+ */
+void *event_mm_calloc_(size_t count, size_t size);
+
+/** Duplicate a string.
+ *
+ * @return On success, return a pointer to a newly allocated duplicate
+ * of a string.
+ * Set errno to ENOMEM and return NULL if a memory allocation error
+ * occurs (or would occur) in the process.
+ * If the argument str is NULL, set errno to EINVAL and return NULL.
+ */
+char *event_mm_strdup_(const char *str);
+
+void *event_mm_realloc_(void *p, size_t sz);
+void event_mm_free_(void *p);
+#define mm_malloc(sz) event_mm_malloc_(sz)
+#define mm_calloc(count, size) event_mm_calloc_((count), (size))
+#define mm_strdup(s) event_mm_strdup_(s)
+#define mm_realloc(p, sz) event_mm_realloc_((p), (sz))
+#define mm_free(p) event_mm_free_(p)
+#else
+#define mm_malloc(sz) malloc(sz)
+#define mm_calloc(n, sz) calloc((n), (sz))
+#define mm_strdup(s) strdup(s)
+#define mm_realloc(p, sz) realloc((p), (sz))
+#define mm_free(p) free(p)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/libs/libevent/src/ratelim-internal.h b/libs/libevent/src/ratelim-internal.h
new file mode 100644
index 0000000000..6cc1cdde2c
--- /dev/null
+++ b/libs/libevent/src/ratelim-internal.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef RATELIM_INTERNAL_H_INCLUDED_
+#define RATELIM_INTERNAL_H_INCLUDED_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "event2/util.h"
+
+/** A token bucket is an internal structure that tracks how many bytes we are
+ * currently willing to read or write on a given bufferevent or group of
+ * bufferevents */
+struct ev_token_bucket {
+ /** How many bytes are we willing to read or write right now? These
+ * values are signed so that we can do "defecit spending" */
+ ev_ssize_t read_limit, write_limit;
+ /** When was this bucket last updated? Measured in abstract 'ticks'
+ * relative to the token bucket configuration. */
+ ev_uint32_t last_updated;
+};
+
+/** Configuration info for a token bucket or set of token buckets. */
+struct ev_token_bucket_cfg {
+ /** How many bytes are we willing to read on average per tick? */
+ size_t read_rate;
+ /** How many bytes are we willing to read at most in any one tick? */
+ size_t read_maximum;
+ /** How many bytes are we willing to write on average per tick? */
+ size_t write_rate;
+ /** How many bytes are we willing to write at most in any one tick? */
+ size_t write_maximum;
+
+ /* How long is a tick? Note that fractions of a millisecond are
+ * ignored. */
+ struct timeval tick_timeout;
+
+ /* How long is a tick, in milliseconds? Derived from tick_timeout. */
+ unsigned msec_per_tick;
+};
+
+/** The current tick is 'current_tick': add bytes to 'bucket' as specified in
+ * 'cfg'. */
+int ev_token_bucket_update_(struct ev_token_bucket *bucket,
+ const struct ev_token_bucket_cfg *cfg,
+ ev_uint32_t current_tick);
+
+/** In which tick does 'tv' fall according to 'cfg'? Note that ticks can
+ * overflow easily; your code needs to handle this. */
+ev_uint32_t ev_token_bucket_get_tick_(const struct timeval *tv,
+ const struct ev_token_bucket_cfg *cfg);
+
+/** Adjust 'bucket' to respect 'cfg', and note that it was last updated in
+ * 'current_tick'. If 'reinitialize' is true, we are changing the
+ * configuration of 'bucket'; otherwise, we are setting it up for the first
+ * time.
+ */
+int ev_token_bucket_init_(struct ev_token_bucket *bucket,
+ const struct ev_token_bucket_cfg *cfg,
+ ev_uint32_t current_tick,
+ int reinitialize);
+
+int bufferevent_remove_from_rate_limit_group_internal_(struct bufferevent *bev,
+ int unsuspend);
+
+/** Decrease the read limit of 'b' by 'n' bytes */
+#define ev_token_bucket_decrement_read(b,n) \
+ do { \
+ (b)->read_limit -= (n); \
+ } while (0)
+/** Decrease the write limit of 'b' by 'n' bytes */
+#define ev_token_bucket_decrement_write(b,n) \
+ do { \
+ (b)->write_limit -= (n); \
+ } while (0)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/libs/libevent/src/signal.c b/libs/libevent/src/signal.c
new file mode 100644
index 0000000000..3f46295024
--- /dev/null
+++ b/libs/libevent/src/signal.c
@@ -0,0 +1,479 @@
+/* $OpenBSD: select.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */
+
+/*
+ * Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef _WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <winsock2.h>
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+#include <sys/types.h>
+#ifdef EVENT__HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#ifdef EVENT__HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef EVENT__HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#include <errno.h>
+#ifdef EVENT__HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+
+#include "event2/event.h"
+#include "event2/event_struct.h"
+#include "event-internal.h"
+#include "event2/util.h"
+#include "evsignal-internal.h"
+#include "log-internal.h"
+#include "evmap-internal.h"
+#include "evthread-internal.h"
+
+/*
+ signal.c
+
+ This is the signal-handling implementation we use for backends that don't
+ have a better way to do signal handling. It uses sigaction() or signal()
+ to set a signal handler, and a socket pair to tell the event base when
+
+ Note that I said "the event base" : only one event base can be set up to use
+ this at a time. For historical reasons and backward compatibility, if you
+ add an event for a signal to event_base A, then add an event for a signal
+ (any signal!) to event_base B, event_base B will get informed about the
+ signal, but event_base A won't.
+
+ It would be neat to change this behavior in some future version of Libevent.
+ kqueue already does something far more sensible. We can make all backends
+ on Linux do a reasonable thing using signalfd.
+*/
+
+#ifndef _WIN32
+/* Windows wants us to call our signal handlers as __cdecl. Nobody else
+ * expects you to do anything crazy like this. */
+#define __cdecl
+#endif
+
+static int evsig_add(struct event_base *, evutil_socket_t, short, short, void *);
+static int evsig_del(struct event_base *, evutil_socket_t, short, short, void *);
+
+static const struct eventop evsigops = {
+ "signal",
+ NULL,
+ evsig_add,
+ evsig_del,
+ NULL,
+ NULL,
+ 0, 0, 0
+};
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+/* Lock for evsig_base and evsig_base_n_signals_added fields. */
+static void *evsig_base_lock = NULL;
+#endif
+/* The event base that's currently getting informed about signals. */
+static struct event_base *evsig_base = NULL;
+/* A copy of evsig_base->sigev_n_signals_added. */
+static int evsig_base_n_signals_added = 0;
+static evutil_socket_t evsig_base_fd = -1;
+
+static void __cdecl evsig_handler(int sig);
+
+#define EVSIGBASE_LOCK() EVLOCK_LOCK(evsig_base_lock, 0)
+#define EVSIGBASE_UNLOCK() EVLOCK_UNLOCK(evsig_base_lock, 0)
+
+void
+evsig_set_base_(struct event_base *base)
+{
+ EVSIGBASE_LOCK();
+ evsig_base = base;
+ evsig_base_n_signals_added = base->sig.ev_n_signals_added;
+ evsig_base_fd = base->sig.ev_signal_pair[1];
+ EVSIGBASE_UNLOCK();
+}
+
+/* Callback for when the signal handler write a byte to our signaling socket */
+static void
+evsig_cb(evutil_socket_t fd, short what, void *arg)
+{
+ static char signals[1024];
+ ev_ssize_t n;
+ int i;
+ int ncaught[NSIG];
+ struct event_base *base;
+
+ base = arg;
+
+ memset(&ncaught, 0, sizeof(ncaught));
+
+ while (1) {
+#ifdef _WIN32
+ n = recv(fd, signals, sizeof(signals), 0);
+#else
+ n = read(fd, signals, sizeof(signals));
+#endif
+ if (n == -1) {
+ int err = evutil_socket_geterror(fd);
+ if (! EVUTIL_ERR_RW_RETRIABLE(err))
+ event_sock_err(1, fd, "%s: recv", __func__);
+ break;
+ } else if (n == 0) {
+ /* XXX warn? */
+ break;
+ }
+ for (i = 0; i < n; ++i) {
+ ev_uint8_t sig = signals[i];
+ if (sig < NSIG)
+ ncaught[sig]++;
+ }
+ }
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+ for (i = 0; i < NSIG; ++i) {
+ if (ncaught[i])
+ evmap_signal_active_(base, i, ncaught[i]);
+ }
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+int
+evsig_init_(struct event_base *base)
+{
+ /*
+ * Our signal handler is going to write to one end of the socket
+ * pair to wake up our event loop. The event loop then scans for
+ * signals that got delivered.
+ */
+ if (evutil_make_internal_pipe_(base->sig.ev_signal_pair) == -1) {
+#ifdef _WIN32
+ /* Make this nonfatal on win32, where sometimes people
+ have localhost firewalled. */
+ event_sock_warn(-1, "%s: socketpair", __func__);
+#else
+ event_sock_err(1, -1, "%s: socketpair", __func__);
+#endif
+ return -1;
+ }
+
+ if (base->sig.sh_old) {
+ mm_free(base->sig.sh_old);
+ }
+ base->sig.sh_old = NULL;
+ base->sig.sh_old_max = 0;
+
+ event_assign(&base->sig.ev_signal, base, base->sig.ev_signal_pair[0],
+ EV_READ | EV_PERSIST, evsig_cb, base);
+
+ base->sig.ev_signal.ev_flags |= EVLIST_INTERNAL;
+ event_priority_set(&base->sig.ev_signal, 0);
+
+ base->evsigsel = &evsigops;
+
+ return 0;
+}
+
+/* Helper: set the signal handler for evsignal to handler in base, so that
+ * we can restore the original handler when we clear the current one. */
+int
+evsig_set_handler_(struct event_base *base,
+ int evsignal, void (__cdecl *handler)(int))
+{
+#ifdef EVENT__HAVE_SIGACTION
+ struct sigaction sa;
+#else
+ ev_sighandler_t sh;
+#endif
+ struct evsig_info *sig = &base->sig;
+ void *p;
+
+ /*
+ * resize saved signal handler array up to the highest signal number.
+ * a dynamic array is used to keep footprint on the low side.
+ */
+ if (evsignal >= sig->sh_old_max) {
+ int new_max = evsignal + 1;
+ event_debug(("%s: evsignal (%d) >= sh_old_max (%d), resizing",
+ __func__, evsignal, sig->sh_old_max));
+ p = mm_realloc(sig->sh_old, new_max * sizeof(*sig->sh_old));
+ if (p == NULL) {
+ event_warn("realloc");
+ return (-1);
+ }
+
+ memset((char *)p + sig->sh_old_max * sizeof(*sig->sh_old),
+ 0, (new_max - sig->sh_old_max) * sizeof(*sig->sh_old));
+
+ sig->sh_old_max = new_max;
+ sig->sh_old = p;
+ }
+
+ /* allocate space for previous handler out of dynamic array */
+ sig->sh_old[evsignal] = mm_malloc(sizeof *sig->sh_old[evsignal]);
+ if (sig->sh_old[evsignal] == NULL) {
+ event_warn("malloc");
+ return (-1);
+ }
+
+ /* save previous handler and setup new handler */
+#ifdef EVENT__HAVE_SIGACTION
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = handler;
+ sa.sa_flags |= SA_RESTART;
+ sigfillset(&sa.sa_mask);
+
+ if (sigaction(evsignal, &sa, sig->sh_old[evsignal]) == -1) {
+ event_warn("sigaction");
+ mm_free(sig->sh_old[evsignal]);
+ sig->sh_old[evsignal] = NULL;
+ return (-1);
+ }
+#else
+ if ((sh = signal(evsignal, handler)) == SIG_ERR) {
+ event_warn("signal");
+ mm_free(sig->sh_old[evsignal]);
+ sig->sh_old[evsignal] = NULL;
+ return (-1);
+ }
+ *sig->sh_old[evsignal] = sh;
+#endif
+
+ return (0);
+}
+
+static int
+evsig_add(struct event_base *base, evutil_socket_t evsignal, short old, short events, void *p)
+{
+ struct evsig_info *sig = &base->sig;
+ (void)p;
+
+ EVUTIL_ASSERT(evsignal >= 0 && evsignal < NSIG);
+
+ /* catch signals if they happen quickly */
+ EVSIGBASE_LOCK();
+ if (evsig_base != base && evsig_base_n_signals_added) {
+ event_warnx("Added a signal to event base %p with signals "
+ "already added to event_base %p. Only one can have "
+ "signals at a time with the %s backend. The base with "
+ "the most recently added signal or the most recent "
+ "event_base_loop() call gets preference; do "
+ "not rely on this behavior in future Libevent versions.",
+ base, evsig_base, base->evsel->name);
+ }
+ evsig_base = base;
+ evsig_base_n_signals_added = ++sig->ev_n_signals_added;
+ evsig_base_fd = base->sig.ev_signal_pair[1];
+ EVSIGBASE_UNLOCK();
+
+ event_debug(("%s: %d: changing signal handler", __func__, (int)evsignal));
+ if (evsig_set_handler_(base, (int)evsignal, evsig_handler) == -1) {
+ goto err;
+ }
+
+
+ if (!sig->ev_signal_added) {
+ if (event_add_nolock_(&sig->ev_signal, NULL, 0))
+ goto err;
+ sig->ev_signal_added = 1;
+ }
+
+ return (0);
+
+err:
+ EVSIGBASE_LOCK();
+ --evsig_base_n_signals_added;
+ --sig->ev_n_signals_added;
+ EVSIGBASE_UNLOCK();
+ return (-1);
+}
+
+int
+evsig_restore_handler_(struct event_base *base, int evsignal)
+{
+ int ret = 0;
+ struct evsig_info *sig = &base->sig;
+#ifdef EVENT__HAVE_SIGACTION
+ struct sigaction *sh;
+#else
+ ev_sighandler_t *sh;
+#endif
+
+ if (evsignal >= sig->sh_old_max) {
+ /* Can't actually restore. */
+ /* XXXX.*/
+ return 0;
+ }
+
+ /* restore previous handler */
+ sh = sig->sh_old[evsignal];
+ sig->sh_old[evsignal] = NULL;
+#ifdef EVENT__HAVE_SIGACTION
+ if (sigaction(evsignal, sh, NULL) == -1) {
+ event_warn("sigaction");
+ ret = -1;
+ }
+#else
+ if (signal(evsignal, *sh) == SIG_ERR) {
+ event_warn("signal");
+ ret = -1;
+ }
+#endif
+
+ mm_free(sh);
+
+ return ret;
+}
+
+static int
+evsig_del(struct event_base *base, evutil_socket_t evsignal, short old, short events, void *p)
+{
+ EVUTIL_ASSERT(evsignal >= 0 && evsignal < NSIG);
+
+ event_debug(("%s: "EV_SOCK_FMT": restoring signal handler",
+ __func__, EV_SOCK_ARG(evsignal)));
+
+ EVSIGBASE_LOCK();
+ --evsig_base_n_signals_added;
+ --base->sig.ev_n_signals_added;
+ EVSIGBASE_UNLOCK();
+
+ return (evsig_restore_handler_(base, (int)evsignal));
+}
+
+static void __cdecl
+evsig_handler(int sig)
+{
+ int save_errno = errno;
+#ifdef _WIN32
+ int socket_errno = EVUTIL_SOCKET_ERROR();
+#endif
+ ev_uint8_t msg;
+
+ if (evsig_base == NULL) {
+ event_warnx(
+ "%s: received signal %d, but have no base configured",
+ __func__, sig);
+ return;
+ }
+
+#ifndef EVENT__HAVE_SIGACTION
+ signal(sig, evsig_handler);
+#endif
+
+ /* Wake up our notification mechanism */
+ msg = sig;
+#ifdef _WIN32
+ send(evsig_base_fd, (char*)&msg, 1, 0);
+#else
+ {
+ int r = write(evsig_base_fd, (char*)&msg, 1);
+ (void)r; /* Suppress 'unused return value' and 'unused var' */
+ }
+#endif
+ errno = save_errno;
+#ifdef _WIN32
+ EVUTIL_SET_SOCKET_ERROR(socket_errno);
+#endif
+}
+
+void
+evsig_dealloc_(struct event_base *base)
+{
+ int i = 0;
+ if (base->sig.ev_signal_added) {
+ event_del(&base->sig.ev_signal);
+ base->sig.ev_signal_added = 0;
+ }
+ /* debug event is created in evsig_init_/event_assign even when
+ * ev_signal_added == 0, so unassign is required */
+ event_debug_unassign(&base->sig.ev_signal);
+
+ for (i = 0; i < NSIG; ++i) {
+ if (i < base->sig.sh_old_max && base->sig.sh_old[i] != NULL)
+ evsig_restore_handler_(base, i);
+ }
+ EVSIGBASE_LOCK();
+ if (base == evsig_base) {
+ evsig_base = NULL;
+ evsig_base_n_signals_added = 0;
+ evsig_base_fd = -1;
+ }
+ EVSIGBASE_UNLOCK();
+
+ if (base->sig.ev_signal_pair[0] != -1) {
+ evutil_closesocket(base->sig.ev_signal_pair[0]);
+ base->sig.ev_signal_pair[0] = -1;
+ }
+ if (base->sig.ev_signal_pair[1] != -1) {
+ evutil_closesocket(base->sig.ev_signal_pair[1]);
+ base->sig.ev_signal_pair[1] = -1;
+ }
+ base->sig.sh_old_max = 0;
+
+ /* per index frees are handled in evsig_del() */
+ if (base->sig.sh_old) {
+ mm_free(base->sig.sh_old);
+ base->sig.sh_old = NULL;
+ }
+}
+
+static void
+evsig_free_globals_locks(void)
+{
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+ if (evsig_base_lock != NULL) {
+ EVTHREAD_FREE_LOCK(evsig_base_lock, 0);
+ evsig_base_lock = NULL;
+ }
+#endif
+ return;
+}
+
+void
+evsig_free_globals_(void)
+{
+ evsig_free_globals_locks();
+}
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+int
+evsig_global_setup_locks_(const int enable_locks)
+{
+ EVTHREAD_SETUP_GLOBAL_LOCK(evsig_base_lock, 0);
+ return 0;
+}
+
+#endif
diff --git a/libs/libevent/src/strlcpy-internal.h b/libs/libevent/src/strlcpy-internal.h
new file mode 100644
index 0000000000..cfc27ec662
--- /dev/null
+++ b/libs/libevent/src/strlcpy-internal.h
@@ -0,0 +1,22 @@
+#ifndef STRLCPY_INTERNAL_H_INCLUDED_
+#define STRLCPY_INTERNAL_H_INCLUDED_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifndef EVENT__HAVE_STRLCPY
+#include <string.h>
+size_t event_strlcpy_(char *dst, const char *src, size_t siz);
+#define strlcpy event_strlcpy_
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
diff --git a/libs/libevent/src/strlcpy.c b/libs/libevent/src/strlcpy.c
new file mode 100644
index 0000000000..3876475f5a
--- /dev/null
+++ b/libs/libevent/src/strlcpy.c
@@ -0,0 +1,75 @@
+/* $OpenBSD: strlcpy.c,v 1.5 2001/05/13 15:40:16 deraadt Exp $ */
+
+/*
+ * Copyright (c) 1998 Todd C. Miller <Todd.Miller@courtesan.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char *rcsid = "$OpenBSD: strlcpy.c,v 1.5 2001/05/13 15:40:16 deraadt Exp $";
+#endif /* LIBC_SCCS and not lint */
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <sys/types.h>
+
+#ifndef EVENT__HAVE_STRLCPY
+#include "strlcpy-internal.h"
+
+/*
+ * Copy src to string dst of size siz. At most siz-1 characters
+ * will be copied. Always NUL terminates (unless siz == 0).
+ * Returns strlen(src); if retval >= siz, truncation occurred.
+ */
+size_t
+event_strlcpy_(dst, src, siz)
+ char *dst;
+ const char *src;
+ size_t siz;
+{
+ register char *d = dst;
+ register const char *s = src;
+ register size_t n = siz;
+
+ /* Copy as many bytes as will fit */
+ if (n != 0 && --n != 0) {
+ do {
+ if ((*d++ = *s++) == 0)
+ break;
+ } while (--n != 0);
+ }
+
+ /* Not enough room in dst, add NUL and traverse rest of src */
+ if (n == 0) {
+ if (siz != 0)
+ *d = '\0'; /* NUL-terminate dst */
+ while (*s++)
+ ;
+ }
+
+ return (s - src - 1); /* count does not include NUL */
+}
+#endif
diff --git a/libs/libevent/src/time-internal.h b/libs/libevent/src/time-internal.h
new file mode 100644
index 0000000000..2c584fa752
--- /dev/null
+++ b/libs/libevent/src/time-internal.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef TIME_INTERNAL_H_INCLUDED_
+#define TIME_INTERNAL_H_INCLUDED_
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef EVENT__HAVE_MACH_MACH_TIME_H
+/* For mach_timebase_info */
+#include <mach/mach_time.h>
+#endif
+
+#include <time.h>
+
+#include "event2/util.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined(EVENT__HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
+#define HAVE_POSIX_MONOTONIC
+#elif defined(EVENT__HAVE_MACH_ABSOLUTE_TIME)
+#define HAVE_MACH_MONOTONIC
+#elif defined(_WIN32)
+#define HAVE_WIN32_MONOTONIC
+#else
+#define HAVE_FALLBACK_MONOTONIC
+#endif
+
+long evutil_tv_to_msec_(const struct timeval *tv);
+void evutil_usleep_(const struct timeval *tv);
+
+#ifdef _WIN32
+typedef ULONGLONG (WINAPI *ev_GetTickCount_func)(void);
+#endif
+
+struct evutil_monotonic_timer {
+
+#ifdef HAVE_MACH_MONOTONIC
+ struct mach_timebase_info mach_timebase_units;
+#endif
+
+#ifdef HAVE_POSIX_MONOTONIC
+ int monotonic_clock;
+#endif
+
+#ifdef HAVE_WIN32_MONOTONIC
+ ev_GetTickCount_func GetTickCount64_fn;
+ ev_GetTickCount_func GetTickCount_fn;
+ ev_uint64_t last_tick_count;
+ ev_uint64_t adjust_tick_count;
+
+ ev_uint64_t first_tick;
+ ev_uint64_t first_counter;
+ double usec_per_count;
+ int use_performance_counter;
+#endif
+
+ struct timeval adjust_monotonic_clock;
+ struct timeval last_time;
+};
+
+int evutil_configure_monotonic_time_(struct evutil_monotonic_timer *mt,
+ int flags);
+int evutil_gettime_monotonic_(struct evutil_monotonic_timer *mt, struct timeval *tv);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* EVENT_INTERNAL_H_INCLUDED_ */
diff --git a/libs/libevent/src/util-internal.h b/libs/libevent/src/util-internal.h
new file mode 100644
index 0000000000..a6318f2890
--- /dev/null
+++ b/libs/libevent/src/util-internal.h
@@ -0,0 +1,485 @@
+/*
+ * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef UTIL_INTERNAL_H_INCLUDED_
+#define UTIL_INTERNAL_H_INCLUDED_
+
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#include <errno.h>
+
+/* For EVUTIL_ASSERT */
+#include "log-internal.h"
+#include <stdio.h>
+#include <stdlib.h>
+#ifdef EVENT__HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#ifdef EVENT__HAVE_SYS_EVENTFD_H
+#include <sys/eventfd.h>
+#endif
+#include "event2/util.h"
+
+#include "time-internal.h"
+#include "ipv6-internal.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* If we need magic to say "inline", get it for free internally. */
+#ifdef EVENT__inline
+#if(_MSC_VER < 1900)
+#define inline __inline
+#else
+#define inline EVENT__inline
+#endif
+
+
+#endif
+#ifdef EVENT____func__
+#define __func__ EVENT____func__
+#endif
+
+/* A good no-op to use in macro definitions. */
+#define EVUTIL_NIL_STMT_ ((void)0)
+/* A no-op that tricks the compiler into thinking a condition is used while
+ * definitely not making any code for it. Used to compile out asserts while
+ * avoiding "unused variable" warnings. The "!" forces the compiler to
+ * do the sizeof() on an int, in case "condition" is a bitfield value.
+ */
+#define EVUTIL_NIL_CONDITION_(condition) do { \
+ (void)sizeof(!(condition)); \
+} while(0)
+
+/* Internal use only: macros to match patterns of error codes in a
+ cross-platform way. We need these macros because of two historical
+ reasons: first, nonblocking IO functions are generally written to give an
+ error on the "blocked now, try later" case, so sometimes an error from a
+ read, write, connect, or accept means "no error; just wait for more
+ data," and we need to look at the error code. Second, Windows defines
+ a different set of error codes for sockets. */
+
+#ifndef _WIN32
+
+#if EAGAIN == EWOULDBLOCK
+#define EVUTIL_ERR_IS_EAGAIN(e) \
+ ((e) == EAGAIN)
+#else
+#define EVUTIL_ERR_IS_EAGAIN(e) \
+ ((e) == EAGAIN || (e) == EWOULDBLOCK)
+#endif
+
+/* True iff e is an error that means a read/write operation can be retried. */
+#define EVUTIL_ERR_RW_RETRIABLE(e) \
+ ((e) == EINTR || EVUTIL_ERR_IS_EAGAIN(e))
+/* True iff e is an error that means an connect can be retried. */
+#define EVUTIL_ERR_CONNECT_RETRIABLE(e) \
+ ((e) == EINTR || (e) == EINPROGRESS)
+/* True iff e is an error that means a accept can be retried. */
+#define EVUTIL_ERR_ACCEPT_RETRIABLE(e) \
+ ((e) == EINTR || EVUTIL_ERR_IS_EAGAIN(e) || (e) == ECONNABORTED)
+
+/* True iff e is an error that means the connection was refused */
+#define EVUTIL_ERR_CONNECT_REFUSED(e) \
+ ((e) == ECONNREFUSED)
+
+#else
+/* Win32 */
+
+#define EVUTIL_ERR_IS_EAGAIN(e) \
+ ((e) == WSAEWOULDBLOCK || (e) == EAGAIN)
+
+#define EVUTIL_ERR_RW_RETRIABLE(e) \
+ ((e) == WSAEWOULDBLOCK || \
+ (e) == WSAEINTR)
+
+#define EVUTIL_ERR_CONNECT_RETRIABLE(e) \
+ ((e) == WSAEWOULDBLOCK || \
+ (e) == WSAEINTR || \
+ (e) == WSAEINPROGRESS || \
+ (e) == WSAEINVAL)
+
+#define EVUTIL_ERR_ACCEPT_RETRIABLE(e) \
+ EVUTIL_ERR_RW_RETRIABLE(e)
+
+#define EVUTIL_ERR_CONNECT_REFUSED(e) \
+ ((e) == WSAECONNREFUSED)
+
+#endif
+
+/* Arguments for shutdown() */
+#ifdef SHUT_RD
+#define EVUTIL_SHUT_RD SHUT_RD
+#else
+#define EVUTIL_SHUT_RD 0
+#endif
+#ifdef SHUT_WR
+#define EVUTIL_SHUT_WR SHUT_WR
+#else
+#define EVUTIL_SHUT_WR 1
+#endif
+#ifdef SHUT_BOTH
+#define EVUTIL_SHUT_BOTH SHUT_BOTH
+#else
+#define EVUTIL_SHUT_BOTH 2
+#endif
+
+/* Helper: Verify that all the elements in 'dlist' are internally consistent.
+ * Checks for circular lists and bad prev/next pointers.
+ *
+ * Example usage:
+ * EVUTIL_ASSERT_LIST_OK(eventlist, event, ev_next);
+ */
+#define EVUTIL_ASSERT_LIST_OK(dlist, type, field) do { \
+ struct type *elm1, *elm2, **nextp; \
+ if (LIST_EMPTY((dlist))) \
+ break; \
+ \
+ /* Check list for circularity using Floyd's */ \
+ /* 'Tortoise and Hare' algorithm */ \
+ elm1 = LIST_FIRST((dlist)); \
+ elm2 = LIST_NEXT(elm1, field); \
+ while (elm1 && elm2) { \
+ EVUTIL_ASSERT(elm1 != elm2); \
+ elm1 = LIST_NEXT(elm1, field); \
+ elm2 = LIST_NEXT(elm2, field); \
+ if (!elm2) \
+ break; \
+ EVUTIL_ASSERT(elm1 != elm2); \
+ elm2 = LIST_NEXT(elm2, field); \
+ } \
+ \
+ /* Now check next and prev pointers for consistency. */ \
+ nextp = &LIST_FIRST((dlist)); \
+ elm1 = LIST_FIRST((dlist)); \
+ while (elm1) { \
+ EVUTIL_ASSERT(*nextp == elm1); \
+ EVUTIL_ASSERT(nextp == elm1->field.le_prev); \
+ nextp = &LIST_NEXT(elm1, field); \
+ elm1 = *nextp; \
+ } \
+ } while (0)
+
+/* Helper: Verify that all the elements in a TAILQ are internally consistent.
+ * Checks for circular lists and bad prev/next pointers.
+ *
+ * Example usage:
+ * EVUTIL_ASSERT_TAILQ_OK(activelist, event, ev_active_next);
+ */
+#define EVUTIL_ASSERT_TAILQ_OK(tailq, type, field) do { \
+ struct type *elm1, *elm2, **nextp; \
+ if (TAILQ_EMPTY((tailq))) \
+ break; \
+ \
+ /* Check list for circularity using Floyd's */ \
+ /* 'Tortoise and Hare' algorithm */ \
+ elm1 = TAILQ_FIRST((tailq)); \
+ elm2 = TAILQ_NEXT(elm1, field); \
+ while (elm1 && elm2) { \
+ EVUTIL_ASSERT(elm1 != elm2); \
+ elm1 = TAILQ_NEXT(elm1, field); \
+ elm2 = TAILQ_NEXT(elm2, field); \
+ if (!elm2) \
+ break; \
+ EVUTIL_ASSERT(elm1 != elm2); \
+ elm2 = TAILQ_NEXT(elm2, field); \
+ } \
+ \
+ /* Now check next and prev pointers for consistency. */ \
+ nextp = &TAILQ_FIRST((tailq)); \
+ elm1 = TAILQ_FIRST((tailq)); \
+ while (elm1) { \
+ EVUTIL_ASSERT(*nextp == elm1); \
+ EVUTIL_ASSERT(nextp == elm1->field.tqe_prev); \
+ nextp = &TAILQ_NEXT(elm1, field); \
+ elm1 = *nextp; \
+ } \
+ EVUTIL_ASSERT(nextp == (tailq)->tqh_last); \
+ } while (0)
+
+/* Locale-independent replacements for some ctypes functions. Use these
+ * when you care about ASCII's notion of character types, because you are about
+ * to send those types onto the wire.
+ */
+int EVUTIL_ISALPHA_(char c);
+int EVUTIL_ISALNUM_(char c);
+int EVUTIL_ISSPACE_(char c);
+int EVUTIL_ISDIGIT_(char c);
+int EVUTIL_ISXDIGIT_(char c);
+int EVUTIL_ISPRINT_(char c);
+int EVUTIL_ISLOWER_(char c);
+int EVUTIL_ISUPPER_(char c);
+char EVUTIL_TOUPPER_(char c);
+char EVUTIL_TOLOWER_(char c);
+
+/** Remove all trailing horizontal whitespace (space or tab) from the end of a
+ * string */
+void evutil_rtrim_lws_(char *);
+
+
+/** Helper macro. If we know that a given pointer points to a field in a
+ structure, return a pointer to the structure itself. Used to implement
+ our half-baked C OO. Example:
+
+ struct subtype {
+ int x;
+ struct supertype common;
+ int y;
+ };
+ ...
+ void fn(struct supertype *super) {
+ struct subtype *sub = EVUTIL_UPCAST(super, struct subtype, common);
+ ...
+ }
+ */
+#define EVUTIL_UPCAST(ptr, type, field) \
+ ((type *)(((char*)(ptr)) - evutil_offsetof(type, field)))
+
+/* As open(pathname, flags, mode), except that the file is always opened with
+ * the close-on-exec flag set. (And the mode argument is mandatory.)
+ */
+int evutil_open_closeonexec_(const char *pathname, int flags, unsigned mode);
+
+int evutil_read_file_(const char *filename, char **content_out, size_t *len_out,
+ int is_binary);
+
+int evutil_socket_connect_(evutil_socket_t *fd_ptr, const struct sockaddr *sa, int socklen);
+
+int evutil_socket_finished_connecting_(evutil_socket_t fd);
+
+int evutil_ersatz_socketpair_(int, int , int, evutil_socket_t[]);
+
+int evutil_resolve_(int family, const char *hostname, struct sockaddr *sa,
+ ev_socklen_t *socklen, int port);
+
+const char *evutil_getenv_(const char *name);
+
+/* Structure to hold the state of our weak random number generator.
+ */
+struct evutil_weakrand_state {
+ ev_uint32_t seed;
+};
+
+#define EVUTIL_WEAKRAND_MAX EV_INT32_MAX
+
+/* Initialize the state of a week random number generator based on 'seed'. If
+ * the seed is 0, construct a new seed based on not-very-strong platform
+ * entropy, like the PID and the time of day.
+ *
+ * This function, and the other evutil_weakrand* functions, are meant for
+ * speed, not security or statistical strength. If you need a RNG which an
+ * attacker can't predict, or which passes strong statistical tests, use the
+ * evutil_secure_rng* functions instead.
+ */
+ev_uint32_t evutil_weakrand_seed_(struct evutil_weakrand_state *state, ev_uint32_t seed);
+/* Return a pseudorandom value between 0 and EVUTIL_WEAKRAND_MAX inclusive.
+ * Updates the state in 'seed' as needed -- this value must be protected by a
+ * lock.
+ */
+ev_int32_t evutil_weakrand_(struct evutil_weakrand_state *seed);
+/* Return a pseudorandom value x such that 0 <= x < top. top must be no more
+ * than EVUTIL_WEAKRAND_MAX. Updates the state in 'seed' as needed -- this
+ * value must be proteced by a lock */
+ev_int32_t evutil_weakrand_range_(struct evutil_weakrand_state *seed, ev_int32_t top);
+
+/* Evaluates to the same boolean value as 'p', and hints to the compiler that
+ * we expect this value to be false. */
+#if defined(__GNUC__) && __GNUC__ >= 3 /* gcc 3.0 or later */
+#define EVUTIL_UNLIKELY(p) __builtin_expect(!!(p),0)
+#else
+#define EVUTIL_UNLIKELY(p) (p)
+#endif
+
+/* Replacement for assert() that calls event_errx on failure. */
+#ifdef NDEBUG
+#define EVUTIL_ASSERT(cond) EVUTIL_NIL_CONDITION_(cond)
+#define EVUTIL_FAILURE_CHECK(cond) 0
+#else
+#define EVUTIL_ASSERT(cond) \
+ do { \
+ if (EVUTIL_UNLIKELY(!(cond))) { \
+ event_errx(EVENT_ERR_ABORT_, \
+ "%s:%d: Assertion %s failed in %s", \
+ __FILE__,__LINE__,#cond,__func__); \
+ /* In case a user-supplied handler tries to */ \
+ /* return control to us, log and abort here. */ \
+ (void)fprintf(stderr, \
+ "%s:%d: Assertion %s failed in %s", \
+ __FILE__,__LINE__,#cond,__func__); \
+ abort(); \
+ } \
+ } while (0)
+#define EVUTIL_FAILURE_CHECK(cond) EVUTIL_UNLIKELY(cond)
+#endif
+
+#ifndef EVENT__HAVE_STRUCT_SOCKADDR_STORAGE
+/* Replacement for sockaddr storage that we can use internally on platforms
+ * that lack it. It is not space-efficient, but neither is sockaddr_storage.
+ */
+struct sockaddr_storage {
+ union {
+ struct sockaddr ss_sa;
+ struct sockaddr_in ss_sin;
+ struct sockaddr_in6 ss_sin6;
+ char ss_padding[128];
+ } ss_union;
+};
+#define ss_family ss_union.ss_sa.sa_family
+#endif
+
+/* Internal addrinfo error code. This one is returned from only from
+ * evutil_getaddrinfo_common_, when we are sure that we'll have to hit a DNS
+ * server. */
+#define EVUTIL_EAI_NEED_RESOLVE -90002
+
+struct evdns_base;
+struct evdns_getaddrinfo_request;
+typedef struct evdns_getaddrinfo_request* (*evdns_getaddrinfo_fn)(
+ struct evdns_base *base,
+ const char *nodename, const char *servname,
+ const struct evutil_addrinfo *hints_in,
+ void (*cb)(int, struct evutil_addrinfo *, void *), void *arg);
+
+void evutil_set_evdns_getaddrinfo_fn_(evdns_getaddrinfo_fn fn);
+
+struct evutil_addrinfo *evutil_new_addrinfo_(struct sockaddr *sa,
+ ev_socklen_t socklen, const struct evutil_addrinfo *hints);
+struct evutil_addrinfo *evutil_addrinfo_append_(struct evutil_addrinfo *first,
+ struct evutil_addrinfo *append);
+void evutil_adjust_hints_for_addrconfig_(struct evutil_addrinfo *hints);
+int evutil_getaddrinfo_common_(const char *nodename, const char *servname,
+ struct evutil_addrinfo *hints, struct evutil_addrinfo **res, int *portnum);
+
+int evutil_getaddrinfo_async_(struct evdns_base *dns_base,
+ const char *nodename, const char *servname,
+ const struct evutil_addrinfo *hints_in,
+ void (*cb)(int, struct evutil_addrinfo *, void *), void *arg);
+
+/** Return true iff sa is a looback address. (That is, it is 127.0.0.1/8, or
+ * ::1). */
+int evutil_sockaddr_is_loopback_(const struct sockaddr *sa);
+
+
+/**
+ Formats a sockaddr sa into a string buffer of size outlen stored in out.
+ Returns a pointer to out. Always writes something into out, so it's safe
+ to use the output of this function without checking it for NULL.
+ */
+const char *evutil_format_sockaddr_port_(const struct sockaddr *sa, char *out, size_t outlen);
+
+int evutil_hex_char_to_int_(char c);
+
+
+void evutil_free_secure_rng_globals_(void);
+void evutil_free_globals_(void);
+
+#ifdef _WIN32
+HMODULE evutil_load_windows_system_library_(const TCHAR *library_name);
+#endif
+
+#ifndef EV_SIZE_FMT
+#if defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__)
+#define EV_U64_FMT "%I64u"
+#define EV_I64_FMT "%I64d"
+#define EV_I64_ARG(x) ((__int64)(x))
+#define EV_U64_ARG(x) ((unsigned __int64)(x))
+#else
+#define EV_U64_FMT "%llu"
+#define EV_I64_FMT "%lld"
+#define EV_I64_ARG(x) ((long long)(x))
+#define EV_U64_ARG(x) ((unsigned long long)(x))
+#endif
+#endif
+
+#ifdef _WIN32
+#define EV_SOCK_FMT EV_I64_FMT
+#define EV_SOCK_ARG(x) EV_I64_ARG((x))
+#else
+#define EV_SOCK_FMT "%d"
+#define EV_SOCK_ARG(x) (x)
+#endif
+
+#if defined(__STDC__) && defined(__STDC_VERSION__) && !defined(__MINGW64_VERSION_MAJOR)
+#if (__STDC_VERSION__ >= 199901L)
+#define EV_SIZE_FMT "%zu"
+#define EV_SSIZE_FMT "%zd"
+#define EV_SIZE_ARG(x) (x)
+#define EV_SSIZE_ARG(x) (x)
+#endif
+#endif
+
+#ifndef EV_SIZE_FMT
+#if (EVENT__SIZEOF_SIZE_T <= EVENT__SIZEOF_LONG)
+#define EV_SIZE_FMT "%lu"
+#define EV_SSIZE_FMT "%ld"
+#define EV_SIZE_ARG(x) ((unsigned long)(x))
+#define EV_SSIZE_ARG(x) ((long)(x))
+#else
+#define EV_SIZE_FMT EV_U64_FMT
+#define EV_SSIZE_FMT EV_I64_FMT
+#define EV_SIZE_ARG(x) EV_U64_ARG(x)
+#define EV_SSIZE_ARG(x) EV_I64_ARG(x)
+#endif
+#endif
+
+evutil_socket_t evutil_socket_(int domain, int type, int protocol);
+evutil_socket_t evutil_accept4_(evutil_socket_t sockfd, struct sockaddr *addr,
+ ev_socklen_t *addrlen, int flags);
+
+ /* used by one of the test programs.. */
+EVENT2_EXPORT_SYMBOL
+int evutil_make_internal_pipe_(evutil_socket_t fd[2]);
+evutil_socket_t evutil_eventfd_(unsigned initval, int flags);
+
+#ifdef SOCK_NONBLOCK
+#define EVUTIL_SOCK_NONBLOCK SOCK_NONBLOCK
+#else
+#define EVUTIL_SOCK_NONBLOCK 0x4000000
+#endif
+#ifdef SOCK_CLOEXEC
+#define EVUTIL_SOCK_CLOEXEC SOCK_CLOEXEC
+#else
+#define EVUTIL_SOCK_CLOEXEC 0x80000000
+#endif
+#ifdef EFD_NONBLOCK
+#define EVUTIL_EFD_NONBLOCK EFD_NONBLOCK
+#else
+#define EVUTIL_EFD_NONBLOCK 0x4000
+#endif
+#ifdef EFD_CLOEXEC
+#define EVUTIL_EFD_CLOEXEC EFD_CLOEXEC
+#else
+#define EVUTIL_EFD_CLOEXEC 0x8000
+#endif
+
+void evutil_memclear_(void *mem, size_t len);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/libs/libevent/src/win32select.c b/libs/libevent/src/win32select.c
new file mode 100644
index 0000000000..1766858c2c
--- /dev/null
+++ b/libs/libevent/src/win32select.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright 2007-2012 Niels Provos and Nick Mathewson
+ * Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * Copyright 2003 Michael A. Davis <mike@datanerds.net>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "event2/event-config.h"
+#include "evconfig-private.h"
+
+#ifdef _WIN32
+
+#include <winsock2.h>
+#include <windows.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <limits.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include "event2/util.h"
+#include "util-internal.h"
+#include "log-internal.h"
+#include "event2/event.h"
+#include "event-internal.h"
+#include "evmap-internal.h"
+#include "event2/thread.h"
+#include "evthread-internal.h"
+#include "time-internal.h"
+
+#define XFREE(ptr) do { if (ptr) mm_free(ptr); } while (0)
+
+extern struct event_list timequeue;
+extern struct event_list addqueue;
+
+struct win_fd_set {
+ unsigned int fd_count;
+ SOCKET fd_array[1];
+};
+
+/* MSDN says this is required to handle SIGFPE */
+volatile double SIGFPE_REQ = 0.0f;
+
+struct idx_info {
+ int read_pos_plus1;
+ int write_pos_plus1;
+};
+
+struct win32op {
+ unsigned num_fds_in_fd_sets;
+ int resize_out_sets;
+ struct win_fd_set *readset_in;
+ struct win_fd_set *writeset_in;
+ struct win_fd_set *readset_out;
+ struct win_fd_set *writeset_out;
+ struct win_fd_set *exset_out;
+ unsigned signals_are_broken : 1;
+};
+
+static void *win32_init(struct event_base *);
+static int win32_add(struct event_base *, evutil_socket_t, short old, short events, void *idx_);
+static int win32_del(struct event_base *, evutil_socket_t, short old, short events, void *idx_);
+static int win32_dispatch(struct event_base *base, struct timeval *);
+static void win32_dealloc(struct event_base *);
+
+struct eventop win32ops = {
+ "win32",
+ win32_init,
+ win32_add,
+ win32_del,
+ win32_dispatch,
+ win32_dealloc,
+ 0, /* doesn't need reinit */
+ 0, /* No features supported. */
+ sizeof(struct idx_info),
+};
+
+#define FD_SET_ALLOC_SIZE(n) ((sizeof(struct win_fd_set) + ((n)-1)*sizeof(SOCKET)))
+
+static int
+grow_fd_sets(struct win32op *op, unsigned new_num_fds)
+{
+ size_t size;
+
+ EVUTIL_ASSERT(new_num_fds >= op->readset_in->fd_count &&
+ new_num_fds >= op->writeset_in->fd_count);
+ EVUTIL_ASSERT(new_num_fds >= 1);
+
+ size = FD_SET_ALLOC_SIZE(new_num_fds);
+ if (!(op->readset_in = mm_realloc(op->readset_in, size)))
+ return (-1);
+ if (!(op->writeset_in = mm_realloc(op->writeset_in, size)))
+ return (-1);
+ op->resize_out_sets = 1;
+ op->num_fds_in_fd_sets = new_num_fds;
+ return (0);
+}
+
+static int
+do_fd_set(struct win32op *op, struct idx_info *ent, evutil_socket_t s, int read)
+{
+ struct win_fd_set *set = read ? op->readset_in : op->writeset_in;
+ if (read) {
+ if (ent->read_pos_plus1 > 0)
+ return (0);
+ } else {
+ if (ent->write_pos_plus1 > 0)
+ return (0);
+ }
+ if (set->fd_count == op->num_fds_in_fd_sets) {
+ if (grow_fd_sets(op, op->num_fds_in_fd_sets*2))
+ return (-1);
+ /* set pointer will have changed and needs reiniting! */
+ set = read ? op->readset_in : op->writeset_in;
+ }
+ set->fd_array[set->fd_count] = s;
+ if (read)
+ ent->read_pos_plus1 = set->fd_count+1;
+ else
+ ent->write_pos_plus1 = set->fd_count+1;
+ return (set->fd_count++);
+}
+
+static int
+do_fd_clear(struct event_base *base,
+ struct win32op *op, struct idx_info *ent, int read)
+{
+ int i;
+ struct win_fd_set *set = read ? op->readset_in : op->writeset_in;
+ if (read) {
+ i = ent->read_pos_plus1 - 1;
+ ent->read_pos_plus1 = 0;
+ } else {
+ i = ent->write_pos_plus1 - 1;
+ ent->write_pos_plus1 = 0;
+ }
+ if (i < 0)
+ return (0);
+ if (--set->fd_count != (unsigned)i) {
+ struct idx_info *ent2;
+ SOCKET s2;
+ s2 = set->fd_array[i] = set->fd_array[set->fd_count];
+
+ ent2 = evmap_io_get_fdinfo_(&base->io, s2);
+
+ if (!ent2) /* This indicates a bug. */
+ return (0);
+ if (read)
+ ent2->read_pos_plus1 = i+1;
+ else
+ ent2->write_pos_plus1 = i+1;
+ }
+ return (0);
+}
+
+#define NEVENT 32
+void *
+win32_init(struct event_base *base)
+{
+ struct win32op *winop;
+ size_t size;
+ if (!(winop = mm_calloc(1, sizeof(struct win32op))))
+ return NULL;
+ winop->num_fds_in_fd_sets = NEVENT;
+ size = FD_SET_ALLOC_SIZE(NEVENT);
+ if (!(winop->readset_in = mm_malloc(size)))
+ goto err;
+ if (!(winop->writeset_in = mm_malloc(size)))
+ goto err;
+ if (!(winop->readset_out = mm_malloc(size)))
+ goto err;
+ if (!(winop->writeset_out = mm_malloc(size)))
+ goto err;
+ if (!(winop->exset_out = mm_malloc(size)))
+ goto err;
+ winop->readset_in->fd_count = winop->writeset_in->fd_count = 0;
+ winop->readset_out->fd_count = winop->writeset_out->fd_count
+ = winop->exset_out->fd_count = 0;
+
+ if (evsig_init_(base) < 0)
+ winop->signals_are_broken = 1;
+
+ evutil_weakrand_seed_(&base->weakrand_seed, 0);
+
+ return (winop);
+ err:
+ XFREE(winop->readset_in);
+ XFREE(winop->writeset_in);
+ XFREE(winop->readset_out);
+ XFREE(winop->writeset_out);
+ XFREE(winop->exset_out);
+ XFREE(winop);
+ return (NULL);
+}
+
+int
+win32_add(struct event_base *base, evutil_socket_t fd,
+ short old, short events, void *idx_)
+{
+ struct win32op *win32op = base->evbase;
+ struct idx_info *idx = idx_;
+
+ if ((events & EV_SIGNAL) && win32op->signals_are_broken)
+ return (-1);
+
+ if (!(events & (EV_READ|EV_WRITE)))
+ return (0);
+
+ event_debug(("%s: adding event for %d", __func__, (int)fd));
+ if (events & EV_READ) {
+ if (do_fd_set(win32op, idx, fd, 1)<0)
+ return (-1);
+ }
+ if (events & EV_WRITE) {
+ if (do_fd_set(win32op, idx, fd, 0)<0)
+ return (-1);
+ }
+ return (0);
+}
+
+int
+win32_del(struct event_base *base, evutil_socket_t fd, short old, short events,
+ void *idx_)
+{
+ struct win32op *win32op = base->evbase;
+ struct idx_info *idx = idx_;
+
+ event_debug(("%s: Removing event for "EV_SOCK_FMT,
+ __func__, EV_SOCK_ARG(fd)));
+ if (events & EV_READ)
+ do_fd_clear(base, win32op, idx, 1);
+ if (events & EV_WRITE)
+ do_fd_clear(base, win32op, idx, 0);
+
+ return 0;
+}
+
+static void
+fd_set_copy(struct win_fd_set *out, const struct win_fd_set *in)
+{
+ out->fd_count = in->fd_count;
+ memcpy(out->fd_array, in->fd_array, in->fd_count * (sizeof(SOCKET)));
+}
+
+/*
+ static void dump_fd_set(struct win_fd_set *s)
+ {
+ unsigned int i;
+ printf("[ ");
+ for(i=0;i<s->fd_count;++i)
+ printf("%d ",(int)s->fd_array[i]);
+ printf("]\n");
+ }
+*/
+
+int
+win32_dispatch(struct event_base *base, struct timeval *tv)
+{
+ struct win32op *win32op = base->evbase;
+ int res = 0;
+ unsigned j, i;
+ int fd_count;
+ SOCKET s;
+
+ if (win32op->resize_out_sets) {
+ size_t size = FD_SET_ALLOC_SIZE(win32op->num_fds_in_fd_sets);
+ if (!(win32op->readset_out = mm_realloc(win32op->readset_out, size)))
+ return (-1);
+ if (!(win32op->exset_out = mm_realloc(win32op->exset_out, size)))
+ return (-1);
+ if (!(win32op->writeset_out = mm_realloc(win32op->writeset_out, size)))
+ return (-1);
+ win32op->resize_out_sets = 0;
+ }
+
+ fd_set_copy(win32op->readset_out, win32op->readset_in);
+ fd_set_copy(win32op->exset_out, win32op->writeset_in);
+ fd_set_copy(win32op->writeset_out, win32op->writeset_in);
+
+ fd_count =
+ (win32op->readset_out->fd_count > win32op->writeset_out->fd_count) ?
+ win32op->readset_out->fd_count : win32op->writeset_out->fd_count;
+
+ if (!fd_count) {
+ long msec = tv ? evutil_tv_to_msec_(tv) : LONG_MAX;
+ /* Sleep's DWORD argument is unsigned long */
+ if (msec < 0)
+ msec = LONG_MAX;
+ /* Windows doesn't like you to call select() with no sockets */
+ Sleep(msec);
+ return (0);
+ }
+
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+
+ res = select(fd_count,
+ (struct fd_set*)win32op->readset_out,
+ (struct fd_set*)win32op->writeset_out,
+ (struct fd_set*)win32op->exset_out, tv);
+
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
+ event_debug(("%s: select returned %d", __func__, res));
+
+ if (res <= 0) {
+ return res;
+ }
+
+ if (win32op->readset_out->fd_count) {
+ i = evutil_weakrand_range_(&base->weakrand_seed,
+ win32op->readset_out->fd_count);
+ for (j=0; j<win32op->readset_out->fd_count; ++j) {
+ if (++i >= win32op->readset_out->fd_count)
+ i = 0;
+ s = win32op->readset_out->fd_array[i];
+ evmap_io_active_(base, s, EV_READ);
+ }
+ }
+ if (win32op->exset_out->fd_count) {
+ i = evutil_weakrand_range_(&base->weakrand_seed,
+ win32op->exset_out->fd_count);
+ for (j=0; j<win32op->exset_out->fd_count; ++j) {
+ if (++i >= win32op->exset_out->fd_count)
+ i = 0;
+ s = win32op->exset_out->fd_array[i];
+ evmap_io_active_(base, s, EV_WRITE);
+ }
+ }
+ if (win32op->writeset_out->fd_count) {
+ SOCKET s;
+ i = evutil_weakrand_range_(&base->weakrand_seed,
+ win32op->writeset_out->fd_count);
+ for (j=0; j<win32op->writeset_out->fd_count; ++j) {
+ if (++i >= win32op->writeset_out->fd_count)
+ i = 0;
+ s = win32op->writeset_out->fd_array[i];
+ evmap_io_active_(base, s, EV_WRITE);
+ }
+ }
+ return (0);
+}
+
+void
+win32_dealloc(struct event_base *base)
+{
+ struct win32op *win32op = base->evbase;
+
+ evsig_dealloc_(base);
+ if (win32op->readset_in)
+ mm_free(win32op->readset_in);
+ if (win32op->writeset_in)
+ mm_free(win32op->writeset_in);
+ if (win32op->readset_out)
+ mm_free(win32op->readset_out);
+ if (win32op->writeset_out)
+ mm_free(win32op->writeset_out);
+ if (win32op->exset_out)
+ mm_free(win32op->exset_out);
+ /* XXXXX free the tree. */
+
+ memset(win32op, 0, sizeof(*win32op));
+ mm_free(win32op);
+}
+
+#endif